├── .clang-format ├── .clang-tidy ├── .gitignore ├── .npmignore ├── .nvmrc ├── .travis.yml ├── API-CPP.md ├── API-JavaScript.md ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.md ├── Makefile ├── README.md ├── bench └── bench-batch.js ├── bin ├── vtshave.js └── vtshaver-filters.js ├── binding.gyp ├── cloudformation ├── ci.template └── ci.template.js ├── codecov.yml ├── common.gypi ├── lib ├── index.js ├── styleToFilters.js └── vtshaver.js ├── mason-versions.ini ├── package-lock.json ├── package.json ├── scripts ├── clang-format.sh ├── clang-tidy.sh ├── coverage.sh ├── create_scheme.sh ├── generate_compile_commands.py ├── install_node.sh ├── leak_suppressions.txt ├── library.xcscheme ├── node.xcscheme ├── publish.sh └── sanitize.sh ├── src ├── filters.cpp ├── filters.hpp ├── shave.cpp ├── shave.hpp └── vtshaver.cpp ├── test ├── cli.test.js ├── fixtures │ ├── filters │ │ ├── bright-filter.json │ │ ├── expressions-filter.json │ │ ├── expressions-properties.json │ │ └── floating-filter.json │ ├── properties │ │ └── floating-filter.json │ ├── styles │ │ ├── bright-v9.json │ │ ├── cafe.json │ │ ├── expressions-legacy.json │ │ ├── expressions.json │ │ ├── floating-point-zoom.json │ │ ├── one-feature.json │ │ ├── properties.json │ │ └── water.json │ └── tiles │ │ ├── feature-single-point-no-id.mvt │ │ ├── invalid.mvt │ │ ├── sf_16_10465_25329.vector.pbf │ │ └── z16-housenum.mvt ├── mvtfixtures.test.js ├── propertyKeyValueFilter-Error.test.js ├── propertyKeyValueFilter.test.js ├── speed.js ├── styleToFilter-property.test.js ├── styleToFilter.test.js ├── temp.js └── vtshaver.test.js └── vendor └── nunicode ├── LICENSE ├── files.txt ├── include └── libnu │ ├── casemap.h │ ├── casemap_internal.h │ ├── config.h │ ├── defines.h │ ├── ducet.h │ ├── mph.h │ ├── strcoll.h │ ├── strcoll_internal.h │ ├── strings.h │ ├── udb.h │ ├── unaccent.h │ ├── utf8.h │ └── utf8_internal.h ├── src └── libnu │ ├── ducet.c │ ├── gen │ ├── _ducet.c │ ├── _ducet_switch.c │ ├── _tolower.c │ ├── _tounaccent.c │ └── _toupper.c │ ├── strcoll.c │ ├── strings.c │ ├── tolower.c │ ├── tounaccent.c │ ├── toupper.c │ └── utf8.c └── version.txt /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | # Mapbox.Variant C/C+ style 3 | Language: Cpp 4 | AccessModifierOffset: -2 5 | AlignAfterOpenBracket: Align 6 | AlignConsecutiveAssignments: false 7 | AlignConsecutiveDeclarations: false 8 | AlignEscapedNewlinesLeft: false 9 | AlignOperands: true 10 | AlignTrailingComments: true 11 | AllowAllParametersOfDeclarationOnNextLine: true 12 | AllowShortBlocksOnASingleLine: false 13 | AllowShortCaseLabelsOnASingleLine: false 14 | AllowShortFunctionsOnASingleLine: All 15 | AllowShortIfStatementsOnASingleLine: true 16 | AllowShortLoopsOnASingleLine: true 17 | AlwaysBreakAfterDefinitionReturnType: None 18 | AlwaysBreakAfterReturnType: None 19 | AlwaysBreakBeforeMultilineStrings: false 20 | AlwaysBreakTemplateDeclarations: true 21 | BinPackArguments: true 22 | BinPackParameters: true 23 | BraceWrapping: 24 | AfterClass: true 25 | AfterControlStatement: true 26 | AfterEnum: true 27 | AfterFunction: true 28 | AfterNamespace: false 29 | AfterObjCDeclaration: true 30 | AfterStruct: true 31 | AfterUnion: true 32 | BeforeCatch: true 33 | BeforeElse: true 34 | IndentBraces: false 35 | BreakBeforeBinaryOperators: None 36 | BreakBeforeBraces: Attach 37 | BreakBeforeTernaryOperators: true 38 | BreakConstructorInitializersBeforeComma: false 39 | ColumnLimit: 0 40 | CommentPragmas: '^ IWYU pragma:' 41 | ConstructorInitializerAllOnOneLineOrOnePerLine: true 42 | ConstructorInitializerIndentWidth: 4 43 | ContinuationIndentWidth: 4 44 | Cpp11BracedListStyle: true 45 | DerivePointerAlignment: false 46 | DisableFormat: false 47 | ExperimentalAutoDetectBinPacking: false 48 | ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] 49 | IncludeCategories: 50 | - Regex: '^"(llvm|llvm-c|clang|clang-c)/' 51 | Priority: 2 52 | - Regex: '^(<|"(gtest|isl|json)/)' 53 | Priority: 3 54 | - Regex: '.*' 55 | Priority: 1 56 | IndentCaseLabels: false 57 | IndentWidth: 4 58 | IndentWrappedFunctionNames: false 59 | KeepEmptyLinesAtTheStartOfBlocks: true 60 | MacroBlockBegin: '' 61 | MacroBlockEnd: '' 62 | MaxEmptyLinesToKeep: 1 63 | NamespaceIndentation: None 64 | ObjCBlockIndentWidth: 2 65 | ObjCSpaceAfterProperty: false 66 | ObjCSpaceBeforeProtocolList: true 67 | PenaltyBreakBeforeFirstCallParameter: 19 68 | PenaltyBreakComment: 300 69 | PenaltyBreakFirstLessLess: 120 70 | PenaltyBreakString: 1000 71 | PenaltyExcessCharacter: 1000000 72 | PenaltyReturnTypeOnItsOwnLine: 60 73 | PointerAlignment: Left 74 | ReflowComments: true 75 | SortIncludes: true 76 | SpaceAfterCStyleCast: false 77 | SpaceBeforeAssignmentOperators: true 78 | SpaceBeforeParens: ControlStatements 79 | SpaceInEmptyParentheses: false 80 | SpacesBeforeTrailingComments: 1 81 | SpacesInAngles: false 82 | SpacesInContainerLiterals: true 83 | SpacesInCStyleCastParentheses: false 84 | SpacesInParentheses: false 85 | SpacesInSquareBrackets: false 86 | Standard: Cpp11 87 | TabWidth: 4 88 | UseTab: Never -------------------------------------------------------------------------------- /.clang-tidy: -------------------------------------------------------------------------------- 1 | --- 2 | Checks: '*,-fuchsia*,-llvm-header-guard,-google-readability-todo,-cppcoreguidelines-pro-type-reinterpret-cast,-cppcoreguidelines-owning-memory,-modernize-use-trailing-return-type,-readability-magic-numbers,-cppcoreguidelines-avoid-magic-numbers' 3 | WarningsAsErrors: '*,-modernize-avoid-c-arrays,-hicpp-avoid-c-arrays,-cppcoreguidelines-avoid-c-arrays' 4 | HeaderFilterRegex: '\/src\/' 5 | AnalyzeTemporaryDtors: false 6 | CheckOptions: 7 | - key: google-readability-braces-around-statements.ShortStatementLines 8 | value: '1' 9 | - key: google-readability-function-size.StatementThreshold 10 | value: '800' 11 | - key: google-readability-namespace-comments.ShortNamespaceLines 12 | value: '10' 13 | - key: google-readability-namespace-comments.SpacesBeforeComments 14 | value: '2' 15 | - key: modernize-loop-convert.MaxCopySize 16 | value: '16' 17 | - key: modernize-loop-convert.MinConfidence 18 | value: reasonable 19 | - key: modernize-loop-convert.NamingStyle 20 | value: CamelCase 21 | - key: modernize-pass-by-value.IncludeStyle 22 | value: llvm 23 | - key: modernize-replace-auto-ptr.IncludeStyle 24 | value: llvm 25 | - key: modernize-use-nullptr.NullMacros 26 | value: 'NULL' 27 | ... 28 | 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | lib/binding 2 | node_modules 3 | build 4 | mason_packages 5 | .DS_Store 6 | *tgz 7 | coverage 8 | .nyc_output 9 | .vscode 10 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | lib/binding 2 | bench 3 | codecov.yml 4 | node_modules 5 | build 6 | mason_packages 7 | .DS_Store 8 | *tgz 9 | scripts 10 | test 11 | docs 12 | .clang* 13 | .travis.yml 14 | cloudformation 15 | CONTRIBUTING.md 16 | CODE_OF_CONDUCT.md 17 | API-CPP.md 18 | API-JavaScript.md 19 | .vscode 20 | *.*~ 21 | .nyc_output 22 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | 16 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | dist: bionic 3 | 4 | # enable c++11/14 builds 5 | addons: 6 | apt: 7 | sources: [ 'ubuntu-toolchain-r-test' ] 8 | packages: [ 'libstdc++-5-dev' ] 9 | 10 | install: 11 | - node -v 12 | - which node 13 | - clang++ -v 14 | - which clang++ 15 | - make ${BUILDTYPE} 16 | - rm -rf mason_packages 17 | 18 | # *Here we run tests* 19 | # We prefer running tests in the 'before_script' section rather than 'script' to ensure fast failure. 20 | # Be aware that if you use the 'script' section it will continue running all commands in the section even if one line fails. 21 | # This is documented at https://docs.travis-ci.com/user/customizing-the-build#Breaking-the-Build 22 | # We don't want this behavior because otherwise we might risk publishing builds when the tests did not pass. 23 | # For this reason, we disable the 'script' section below, since we prefer using 'before_script'. 24 | before_script: 25 | - npm test 26 | 27 | script: 28 | # after successful tests, publish binaries if specified in commit message 29 | - ./scripts/publish.sh --toolset=${TOOLSET:-} --debug=$([ "${BUILDTYPE}" == 'debug' ] && echo "true" || echo "false") 30 | 31 | # the matrix allows you to specify different operating systems and environments to 32 | # run your tests and build binaries 33 | matrix: 34 | include: 35 | 36 | ## ** Builds that are published ** 37 | 38 | - os: linux 39 | env: BUILDTYPE=release TOOLSET=cfi CXXFLAGS="-fsanitize=cfi -fno-sanitize=cfi-derived-cast -fvisibility=hidden" LDFLAGS="-fsanitize=cfi -fno-sanitize=cfi-derived-cast" 40 | # linux publishable node release 41 | - os: linux 42 | env: BUILDTYPE=release 43 | # linux publishable node debug 44 | - os: linux 45 | env: BUILDTYPE=debug 46 | # osx publishable node release 47 | - os: osx 48 | osx_image: xcode12 49 | env: BUILDTYPE=release 50 | # osx publishable node asan 51 | - os: linux 52 | env: BUILDTYPE=debug TOOLSET=asan 53 | sudo: required 54 | # Overrides `install` to set up custom asan flags 55 | install: 56 | - make sanitize 57 | # Overrides `before_script` (tests are already run in `make sanitize`) 58 | before_script: true 59 | 60 | ## ** Builds that do not get published ** 61 | 62 | # g++ build (default builds all use clang++) 63 | - os: linux 64 | env: BUILDTYPE=debug CXX="g++-6" CC="gcc-6" LINK="g++-6" AR="ar" NM="nm" CXXFLAGS="-fext-numeric-literals" 65 | addons: 66 | apt: 67 | sources: 68 | - ubuntu-toolchain-r-test 69 | packages: 70 | - libstdc++-6-dev 71 | - g++-6 72 | # Overrides `install` to avoid initializing clang toolchain 73 | install: 74 | - make ${BUILDTYPE} 75 | # Overrides `script` to disable publishing 76 | script: true 77 | # Coverage build 78 | - os: linux 79 | env: BUILDTYPE=debug CXXFLAGS="--coverage" LDFLAGS="--coverage" 80 | # Overrides `script` to publish coverage data to codecov 81 | script: 82 | - export PATH=$(pwd)/mason_packages/.link/bin/:${PATH} 83 | - PATH=`echo $PATH | sed -e 's/:\.\/node_modules\/\.bin//'` # remove relative path ^^ 84 | - which llvm-cov 85 | - curl -S -f https://codecov.io/bash -o codecov 86 | - chmod +x codecov 87 | - ./codecov -x "llvm-cov gcov" -Z 88 | # Clang format build 89 | - os: linux 90 | env: CLANG_FORMAT 91 | # Overrides `install` to avoid initializing clang toolchain 92 | install: 93 | # Run the clang-format script. Any code formatting changes 94 | # will trigger the build to fail (idea here is to get us to pay attention 95 | # and get in the habit of running these locally before committing) 96 | - make format 97 | # Overrides `before_script`, no need to run tests 98 | before_script: true 99 | # Overrides `script` to disable publishing 100 | script: true 101 | # Clang tidy build 102 | - os: linux 103 | env: CLANG_TIDY 104 | # Overrides `install` to avoid initializing clang toolchain 105 | install: 106 | # First run the clang-tidy target 107 | # Any code formatting fixes automatically applied by clang-tidy 108 | # will trigger the build to fail (idea here is to get us to pay attention 109 | # and get in the habit of running these locally before committing) 110 | - make tidy 111 | # Overrides `before_script`, no need to run tests 112 | before_script: true 113 | # Overrides `script` to disable publishing 114 | script: true 115 | -------------------------------------------------------------------------------- /API-CPP.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Table of Contents 4 | 5 | - [Filters](#filters) 6 | - [shave](#shave) 7 | 8 | ## Filters 9 | 10 | Takes optimized filter object from shaver.styleToFilters and returns c++ filters for shave. 11 | 12 | **Parameters** 13 | 14 | - `filters` **[Object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)** the filter object from the `shaver.styleToFilters` 15 | 16 | **Examples** 17 | 18 | ```javascript 19 | var shaver = require('@mapbox/vtshaver'); 20 | var style = require('/path/to/style.json'); 21 | // get the filters object from `styleToFilters` 22 | var styleFilters = shaver.styleToFilters(style); 23 | // call the function to create filters 24 | var filters = new shaver.Filters(styleFilters); 25 | ``` 26 | 27 | ## shave 28 | 29 | Shave off unneeded layers and features, asynchronously 30 | 31 | **Parameters** 32 | 33 | - `buffer` **[Buffer](https://nodejs.org/api/buffer.html)** Vector Tile PBF 34 | - `options` **[Object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)** (optional, default `{}`) 35 | - `options.zoom` **[Number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)?** 36 | - `options.maxzoom` **[Number](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Number)?** 37 | - `options.compress` **[Object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)?** 38 | - `options.compress.type` **[String](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/String)** output a compressed shaved ['none'|'gzip'] 39 | - `callback` **[Function](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Statements/function)** from whence the shaven vector tile comes 40 | 41 | **Examples** 42 | 43 | ```javascript 44 | var shaver = require('@mapbox/vtshaver'); 45 | var fs = require('fs'); 46 | var buffer = fs.readFileSync('/path/to/vector-tile.mvt'); 47 | var style = require('/path/to/style.json'); 48 | var filters = new shaver.Filters(shaver.styleToFilters(style)); 49 | 50 | var options = { 51 | filters: filters, // required 52 | zoom: 14, // required 53 | maxzoom: 16, // optional 54 | compress: { // optional 55 | type: 'none' 56 | } 57 | }; 58 | 59 | shaver.shave(buffer, options, function(err, shavedTile) { 60 | if (err) throw err; 61 | console.log(shavedTile); // => vector tile buffer 62 | }); 63 | ``` 64 | -------------------------------------------------------------------------------- /API-JavaScript.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### Table of Contents 4 | 5 | - [styleToFilters](#styletofilters) 6 | 7 | ## styleToFilters 8 | 9 | Takes optimized filter object from shaver.styleToFilters and returns c++ filters for shave. 10 | 11 | **Parameters** 12 | 13 | - `style` **[Object](https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Object)** Mapbox GL Style JSON 14 | 15 | **Examples** 16 | 17 | ```javascript 18 | var shaver = require('@mapbox/vtshaver'); 19 | var style = require('/path/to/style.json'); 20 | var filters = shaver.styleToFilters(style); 21 | console.log(filters); 22 | // { 23 | // "poi_label": ["!=","maki","cafe"], 24 | // "road": ["==","class","path"], 25 | // "water": true, 26 | // ... 27 | // } 28 | ``` 29 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## v0.3.3 4 | - Replace no-op style filter expressions with `true` [#62](https://github.com/mapbox/vtshaver/pull/62). 5 | 6 | ## v0.3.2 7 | - Add missing symbol implementations for `downcase`, `upcase`, and `number-formatter` expressions. The implementation for `number-formatter` is a basic `to_string` and uses no config options. [#58](https://github.com/mapbox/vtshaver/pull/58) 8 | 9 | ## v0.3.1 10 | - Upgrade node to v16 11 | - Removes cxx11abi flag custom setting as it's the default for compilers building with newer libc++ 12 | - Upgrades mbgl-core to 1.6.0-cxx11abi rebuilt with new compiler to remove flag conflict 13 | - Upgrades node-pre-gyp, node-addon-api dependencies 14 | - Upgrades @mapbox/mvt-fixtures, aws-sdk, bytes, d3-queue, pbf dependencies 15 | 16 | ## v0.3.0 17 | - Support universal binaries by switching to `node-addon-api` 18 | - Binaries are now compiled with clang 10.x 19 | - `AsyncWorker` based implementation 20 | 21 | ## v0.2.1 22 | - New CLI called `vtshaver-filters` which parses a style, collapses all zoom and filter restrictions per source-layer, and outputs a json object the parsed metadata to be used for shaving. 23 | - Improvements to the `vtshave` CLI: now supporting compressed tiles as input and will output before and after bytes for the original and shaved tile. 24 | - Improvements to code coverage: now tracking both JS and C++ code. 25 | 26 | ## v0.2.0 27 | 28 | - Support key/value filter. (https://github.com/mapbox/vtshaver/issues/15) 29 | 30 | ## v0.1.3 31 | 32 | - Upgrade nan and node-pre-gyp 33 | - Don't depend directory on documentation, since it is so big. Require install directly (npm install -g documentation@4.0.0) 34 | 35 | ## v0.1.2 36 | 37 | * Reduced the package size 38 | * Upgraded to latest @mapbox/mvt-fixtures and @mapbox/mason-js 39 | 40 | ## v0.1.1 41 | 42 | * Fixed support for `zoom` in filter expressions (https://github.com/mapbox/vtshaver/pull/16) 43 | 44 | ## v0.1.0 45 | 46 | * It begins 47 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # global owners 2 | * @mapbox/tilesets-api 3 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of conduct 2 | 3 | Everyone is invited to participate in Mapbox’s open source projects and public discussions: we want to create a welcoming and friendly environment. Harassment of participants or other unethical and unprofessional behavior will not be tolerated in our spaces. The [Contributor Covenant](http://contributor-covenant.org) applies to all projects under the Mapbox organization and we ask that you please read [the full text](http://contributor-covenant.org/version/1/2/0/). 4 | 5 | You can learn more about our open source philosophy on [mapbox.com](https://www.mapbox.com/about/open/). -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thanks for getting involved and contributing to the vtshaver :tada: Below are a few things to setup when submitting a PR. 4 | 5 | ## Code comments 6 | 7 | If adding new code, be sure to include relevant code comments. Code comments are a great way for others to learn from your code. This is especially true within the skeleton, since it is made for learning. 8 | 9 | ## Update Documentation 10 | 11 | Be sure to update any documentation relevant to your change. This includes updating the [CHANGELOG.md](https://github.com/mapbox/vtshaver/blob/master/CHANGELOG.md). 12 | 13 | ## [Code Formatting](https://github.com/mapbox/node-cpp-skel/blob/8630d9f07f5ea78b5118c4ecb2fc2f4d310c9d72/docs/extended-tour.md#clang-tools) 14 | 15 | We use [this script](/scripts/clang-format.sh#L20) to install a consistent version of [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) to format the code base. The format is automatically checked via a Travis CI build as well. Run the following script locally to ensure formatting is ready to merge: 16 | 17 | make format 18 | 19 | We also use [`clang-tidy`](https://clang.llvm.org/extra/clang-tidy/) as a C++ linter. Run the following command to lint and ensure your code is ready to merge: 20 | 21 | make tidy 22 | 23 | These commands are set from within [the Makefile](./Makefile). 24 | 25 | ## Releasing new version 26 | 27 | If you want release a new version of vtshaver: 28 | 29 | - [ ] All features are landed and tickets are closed for the milestone. 30 | - [ ] All tests are passing on travis 31 | - [ ] Test coverage is good: same or increased 32 | - [ ] If anything has been added to `.npmignore`, then we run `make testpacked` to ensure tests pass 33 | - [ ] For any major new feature we've made a dev package and tested downstream in staging 34 | - [ ] A developer has bumped the version in the `package.json` in `master` 35 | - [ ] A developer has committed with `[publish binary]` in the commit message (this is necessary for a pre-release as well) 36 | - [ ] We've confirmed that the travis job with `[publish binary]` was fully 🍏 37 | - [ ] We've tagged a new git tag `git tag vX.X.X -a -m "vX.X.X"` and uploaded to github `git push --tags` 38 | - [ ] Update the `changelog.md` 39 | - [ ] Run npm pack and ensure that show only the intended files will be packaged and nothing unintended or accidental 40 | - [ ] Publish to npm repository: `mbx npm publish` 41 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2017, Mapbox 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # This Makefile serves a few purposes: 2 | # 3 | # 1. It provides an interface to iterate quickly while developing the C++ code in src/ 4 | # by typing `make` or `make debug`. To make iteration as fast as possible it calls out 5 | # directly to underlying build tools and skips running steps that appear to have already 6 | # been run (determined by the presence of a known file or directory). What `make` does is 7 | # the same as running `npm install --build-from-source` except that it is faster when 8 | # run a second time because it will skip re-running expensive steps. 9 | # Note: in rare cases (like if you hit `crtl-c` during an install) you might end up with 10 | # build deps only partially installed. In this case you should run `make distclean` to fully 11 | # restore your repo to is starting state and then running `make` again should start from 12 | # scratch, fixing any inconsistencies. 13 | # 14 | # 2. It provides a few commands that call out to external scripts like `make coverage` or 15 | # `make tidy`. These scripts can be called directly but this Makefile provides a more uniform 16 | # interface to call them. 17 | # 18 | # To learn more about the build system see https://github.com/mapbox/node-cpp-skel/blob/master/docs/extended-tour.md#builds 19 | 20 | # Whether to turn compiler warnings into errors 21 | export WERROR ?= true 22 | 23 | # the default target. This line means that 24 | # just typing `make` will call `make release` 25 | default: release 26 | 27 | node_modules/node-addon-api: 28 | npm install --ignore-scripts 29 | 30 | mason_packages/headers: node_modules/node-addon-api 31 | node_modules/.bin/mason-js install 32 | 33 | mason_packages/.link/include: mason_packages/headers 34 | node_modules/.bin/mason-js link 35 | 36 | build-deps: mason_packages/.link/include 37 | 38 | release: build-deps 39 | V=1 ./node_modules/.bin/node-pre-gyp configure build --error_on_warnings=$(WERROR) --loglevel=error 40 | @echo "run 'make clean' for full rebuild" 41 | 42 | debug: mason_packages/.link/include 43 | V=1 ./node_modules/.bin/node-pre-gyp configure build --error_on_warnings=$(WERROR) --loglevel=error --debug 44 | @echo "run 'make clean' for full rebuild" 45 | 46 | coverage: build-deps 47 | ./scripts/coverage.sh 48 | 49 | tidy: build-deps 50 | ./scripts/clang-tidy.sh 51 | 52 | format: build-deps 53 | ./scripts/clang-format.sh 54 | 55 | sanitize: build-deps 56 | ./scripts/sanitize.sh 57 | 58 | clean: 59 | rm -rf lib/binding 60 | rm -rf build 61 | # remove remains from running 'make coverage' 62 | rm -f *.profraw 63 | rm -f *.profdata 64 | @echo "run 'make distclean' to also clear node_modules, mason_packages, and .mason directories" 65 | 66 | distclean: clean 67 | rm -rf node_modules 68 | rm -rf mason_packages 69 | 70 | # variable used in the `xcode` target below 71 | MODULE_NAME := $(shell node -e "console.log(require('./package.json').binary.module_name)") 72 | 73 | xcode: node_modules 74 | ./node_modules/.bin/node-pre-gyp configure -- -f xcode 75 | @# If you need more targets, e.g. to run other npm scripts, duplicate the last line and change NPM_ARGUMENT 76 | SCHEME_NAME="$(MODULE_NAME)" SCHEME_TYPE=library BLUEPRINT_NAME=$(MODULE_NAME) BUILDABLE_NAME=$(MODULE_NAME).node scripts/create_scheme.sh 77 | SCHEME_NAME="npm test" SCHEME_TYPE=node BLUEPRINT_NAME=$(MODULE_NAME) BUILDABLE_NAME=$(MODULE_NAME).node NODE_ARGUMENT="`npm bin tape`/tape test/*.test.js" scripts/create_scheme.sh 78 | 79 | open build/binding.xcodeproj 80 | 81 | testpack: 82 | rm -f ./*tgz 83 | npm pack 84 | 85 | testpacked: testpack 86 | rm -rf /tmp/package 87 | tar -xf *tgz --directory=/tmp/ 88 | du -h -d 0 /tmp/package 89 | cp -r test /tmp/package/ 90 | ln -s `pwd`/mason_packages /tmp/package/mason_packages 91 | (cd /tmp/package && make && make test) 92 | 93 | docs: 94 | npm install -g documentation@4.0.0 95 | npm run docs 96 | 97 | test: 98 | npm test 99 | 100 | .PHONY: test docs 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vector Tile Shaver 2 | 3 | *Style-optimized vector tiles.* The shaver takes a **Mapbox Vector Tile** and a **Mapbox GL Style** and removes layers, features, and properties in the tile that are not used by the style to reduce the size of the tile. Read the feature release [blog post](https://blog.mapbox.com/style-optimized-vector-tiles-39868da81275) and the [api-documentation](https://www.mapbox.com/api-documentation/#retrieve-tiles) for more info. 4 | 5 | [![Build Status](https://travis-ci.com/mapbox/vtshaver.svg?branch=master)](https://travis-ci.com/mapbox/vtshaver) 6 | [![codecov](https://codecov.io/gh/mapbox/vtshaver/branch/master/graph/badge.svg)](https://codecov.io/gh/mapbox/vtshaver) 7 | [![badge](https://mapbox.s3.amazonaws.com/cpp-assets/node-cpp-skel-badge_blue.svg)](https://github.com/mapbox/node-cpp-skel) 8 | 9 | ![shaved-bearded tile and unshaved-bearded tile](https://user-images.githubusercontent.com/1943001/37542004-e49656b6-2919-11e8-9635-db1b47fcd0fa.jpg) 10 | 11 | # Installation 12 | 13 | ```bash 14 | npm install @mapbox/vtshaver 15 | ``` 16 | 17 | If you want to install locally you can also do: 18 | 19 | ```bash 20 | git clone https://github.com/mapbox/vtshaver 21 | cd vtshaver 22 | npm install 23 | ``` 24 | 25 | # API Usage 26 | 27 | * [styleToFilters](API-JavaScript.md#styletofilters) 28 | * [Filters](API-CPP.md#filters) 29 | * [shave](API-CPP.md#shave) 30 | 31 | # CLI 32 | 33 | Shaver provides 2 command line tools: 34 | 35 | ## vtshave 36 | 37 | ``` 38 | vtshave [args] 39 | 40 | --tile: required: path to the input vector tile 41 | --style: required: path to a gl style to use to shave 42 | --zoom: required: the zoom level 43 | --maxzoom: optional: the maxzoom of a tileset relevant to the tile buffer being shaved 44 | --out: optional: pass a path if you want the shaved tile to be saved 45 | 46 | Will output a size comparison of how many bytes were shaved off the tile. 47 | 48 | Example: 49 | 50 | vtshave --tile tile.mvt --zoom 0 --maxzoom 16 --style style.json 51 | ``` 52 | 53 | ## vtshaver-filters 54 | 55 | ``` 56 | vtshaver-filters [args] 57 | 58 | --style: required: path to a gl style to parse 59 | --sources: optional: list of one or more sources (comma separated) to display in the output (default is all sources) 60 | --pretty: optional: whether to pretty print the output (default false). Pass '--pretty' to indent the JSON. 61 | 62 | Will output a json object describing each of the source-layers and their parsed metadata to be used for shaving. 63 | 64 | Example: 65 | 66 | vtshaver-filters --style style.json > meta.json 67 | ``` 68 | 69 | # Develop 70 | 71 | Build binaries 72 | 73 | ``` 74 | make 75 | ``` 76 | 77 | For Mac M1 users, there are a couple of extra steps before building 78 | 79 | - Comment out linking instructions in your local binding.gyp as follows 80 | ``` 81 | # 'make_global_settings': [ 82 | # ['CXX', '<(module_root_dir)/mason_packages/.link/bin/clang++'], 83 | # ['CC', '<(module_root_dir)/mason_packages/.link/bin/clang'], 84 | # ['LINK', '<(module_root_dir)/mason_packages/.link/bin/clang++'], 85 | # ['AR', '<(module_root_dir)/mason_packages/.link/bin/llvm-ar'], 86 | # ['NM', '<(module_root_dir)/mason_packages/.link/bin/llvm-nm'] 87 | # ], 88 | ``` 89 | 90 | - Switch to x86_64 processor since arm64 has unresolved issues with the latest mbgl-core library 91 | ``` 92 | $ arch --x86_64 zsh 93 | ``` 94 | 95 | # Test 96 | 97 | ``` 98 | make test 99 | ``` 100 | 101 | Run bench test 102 | 103 | ``` 104 | node bench/bench-batch.js --iterations 50 --concurrency 10 105 | ``` 106 | 107 | Optionally combine with the `time` command 108 | 109 | # Docs 110 | 111 | Documentation is generated using Documentation.js `--polyglot` mode. Generate docs in `API.md` by running: 112 | 113 | ``` 114 | make docs 115 | ``` 116 | 117 | NOTE: we are pinned to `documentation@4.0.0` because 5.x removed C++ support: https://github.com/documentationjs/documentation/blob/master/CHANGELOG.md#500-2017-07-27 118 | -------------------------------------------------------------------------------- /bench/bench-batch.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var fs = require('fs'); 4 | var argv = require('minimist')(process.argv.slice(2)); 5 | var s = require('../lib/index.js'); 6 | var path = require('path'); 7 | var assert = require('assert'); 8 | var bytes = require('bytes'); 9 | 10 | if (!argv.iterations || !argv.concurrency) { 11 | console.error('Please provide desired iterations and concurrency'); 12 | console.error('Example: \n\tnode bench/bench-batch.js --iterations 50 --concurrency 10'); 13 | console.error('Optional args: \n\t--mem (reports memory stats)'); 14 | process.exit(1); 15 | } 16 | 17 | if (!argv.iterations || !argv.concurrency) { 18 | process.stdout.write('Please provide desired iterations and concurrency'); 19 | process.exit(1); 20 | } 21 | 22 | if (argv.compress) { 23 | if (argv.compress != 'gzip') { 24 | console.error("Whoops! We currently only support gzip compression. Falling back to using gzip."); 25 | } 26 | argv.compress = 'gzip'; 27 | } else { 28 | argv.compress = 'none'; 29 | } 30 | 31 | if (process.env.NPM_FLAGS === '--debug' || process.env.COVERAGE === true) { 32 | console.log('# SKIP benchmark: tests are in debug or coverage mode'); 33 | process.exit(0); 34 | } 35 | 36 | process.env.UV_THREADPOOL_SIZE = argv.concurrency; 37 | 38 | var p = "node_modules/@mapbox/mvt-fixtures/real-world/chicago/"; 39 | 40 | // Get chicago tiles from real-world fixtures 41 | fs.readdir(p, function (err, files) { 42 | if (err) throw err; 43 | start(files); 44 | }); 45 | 46 | var start = function(files){ 47 | var track_mem = argv.mem ? true : false; 48 | var style = require('../test/fixtures/styles/expressions.json'); 49 | var filters = new s.Filters(s.styleToFilters(style)); 50 | var options = { 51 | zoom: 13, 52 | filters: filters, 53 | compress: { 54 | type: argv.compress 55 | } 56 | }; 57 | var d3_queue = require('d3-queue'); 58 | var iterations = argv.iterations; 59 | var concurrency = argv.concurrency; 60 | // @springmeyer noticed a > 120 ops/s benefit to not passing a fixed queue size on OS X 61 | var queue = d3_queue.queue(); 62 | var runs = 0; 63 | var memstats = { 64 | max_rss:0, 65 | max_heap:0, 66 | max_heap_total:0 67 | }; 68 | var tiles = []; 69 | 70 | files.forEach(function(file) { 71 | var path = p+file; 72 | var buffer = fs.readFileSync(path); 73 | 74 | tiles.push(buffer); 75 | }); 76 | 77 | function run(tile, cb) { 78 | s.shave(tile, options, function(err, shavedTile) { 79 | if (err) { 80 | return cb(err); 81 | } 82 | ++runs; 83 | if (track_mem && runs % 1000) { 84 | var mem = process.memoryUsage(); 85 | if (mem.rss > memstats.max_rss) memstats.max_rss = mem.rss; 86 | if (mem.heapTotal > memstats.max_heap_total) memstats.max_heap_total = mem.heapTotal; 87 | if (mem.heapUsed > memstats.max_heap) memstats.max_heap = mem.heapUsed; 88 | } 89 | return cb(); 90 | }); 91 | } 92 | 93 | console.log("Running benchmark..."); 94 | var time = +(new Date()); 95 | 96 | for (var i = 1; i <= iterations; i++) { 97 | tiles.forEach(function(tile) { 98 | queue.defer(run,tile); 99 | }); 100 | } 101 | 102 | queue.awaitAll(function(error) { 103 | if (error) throw error; 104 | if (runs != iterations*tiles.length) { 105 | throw new Error("Error: did not run as expected"); 106 | } 107 | // check rate 108 | time = +(new Date()) - time; 109 | 110 | if (time == 0) { 111 | console.log("Warning: ms timer not high enough resolution to reliably track rate. Try more iterations"); 112 | } else { 113 | // number of milliseconds per iteration 114 | var rate = runs/(time/1000); 115 | console.log('Benchmark speed: ' + rate.toFixed(0) + ' runs/s (runs:' + runs + ' ms:' + time + ' )'); 116 | 117 | if (track_mem) { 118 | console.log('Benchmark peak mem: ',bytes(memstats.max_rss),bytes(memstats.max_heap),bytes(memstats.max_heap_total)); 119 | } else { 120 | console.log('Note: pass --mem to track memory usage'); 121 | } 122 | } 123 | 124 | console.log("Benchmark iterations:",argv.iterations,"concurrency:",argv.concurrency); 125 | var min_rate = 1000; 126 | if (process.platform === 'darwin' && process.env.TRAVIS !== undefined) { 127 | min_rate = 1300; 128 | } 129 | if (rate > min_rate) { 130 | console.log("Success: rate("+rate+" ops/s) > min_rate("+min_rate+")"); 131 | } else { 132 | console.error("Fail: rate("+rate+" ops/s) <= min_rate("+min_rate+")"); 133 | process.exit(-1); 134 | } 135 | process.exit(0); 136 | }); 137 | }; -------------------------------------------------------------------------------- /bin/vtshave.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | "use strict"; 4 | 5 | var fs = require('fs'); 6 | var path = require('path'); 7 | var argv = require('minimist')(process.argv.slice(2)); 8 | var shaver = require('../'); 9 | var vt = require('@mapbox/vector-tile').VectorTile; 10 | var pbf = require('pbf'); 11 | var zlib = require('zlib'); 12 | var bytes = require('bytes'); 13 | 14 | var usage = `usage: 15 | 16 | vtshave [args] 17 | 18 | --tile: required: path to the input vector tile 19 | --style: required: path to a gl style to use to shave 20 | --zoom: required: the zoom level 21 | --maxzoom: optional: the maxzoom of a tileset relevant to the tile buffer being shaved 22 | --out: optional: pass a path if you want the shaved tile to be saved 23 | 24 | Will output a size comparison of how many bytes were shaved off the tile. 25 | 26 | Example: 27 | 28 | vtshave --tile tile.mvt --zoom 0 --maxzoom 16 --style style.json 29 | 30 | ` 31 | 32 | function error(msg) { 33 | console.error(usage); 34 | console.error(msg); 35 | process.exit(1); 36 | } 37 | 38 | if (argv.tile == undefined || !fs.existsSync(argv.tile) ) { 39 | return error("please provide the path to a tile.mvt"); 40 | } 41 | 42 | if (argv.style== undefined || !fs.existsSync(argv.style)) { 43 | return error("must supply path to filters.json") 44 | } 45 | 46 | if (argv.zoom == undefined) { 47 | return error("please provide the zoom of the tile being shaved"); 48 | } 49 | 50 | var buffer = fs.readFileSync(argv.tile); 51 | var style_json = fs.readFileSync(argv.style); 52 | 53 | try { 54 | var filters = new shaver.Filters(shaver.styleToFilters(JSON.parse(style_json))); 55 | } catch (err) { 56 | console.error(err.message); 57 | process.exit(1); 58 | } 59 | 60 | var is_compressed = (buffer[0] === 0x1F && buffer[1] === 0x8B); 61 | var opts = { 62 | filters: filters, 63 | zoom: argv.zoom 64 | }; 65 | 66 | if (is_compressed) { 67 | opts.compress = {type: "gzip"}; 68 | } 69 | 70 | if (argv.maxzoom) opts.maxzoom = argv.maxzoom; 71 | 72 | shaver.shave(buffer, opts, function(err, shavedBuffer) { 73 | if (err) throw err.message; 74 | 75 | if (is_compressed) { 76 | console.log('Before (gzip):\n',bytes(buffer.length)); 77 | console.log('After (gzip):\n',bytes(shavedBuffer.length)); 78 | console.log('Savings (gzip):\n',(shavedBuffer.length/buffer.length*100).toFixed(2)+'%'); 79 | const og_decompressed = zlib.gunzipSync(buffer); 80 | const shaved_decompressed = zlib.gunzipSync(shavedBuffer); 81 | console.log('Before (raw):\n',bytes(og_decompressed.length)); 82 | console.log('After (raw):\n',bytes(shaved_decompressed.length)); 83 | console.log('Savings (raw):\n',(shaved_decompressed.length/og_decompressed.length*100).toFixed(2)+'%'); 84 | } else { 85 | const og_compressed = zlib.gzipSync(buffer); 86 | const shaved_compressed = zlib.gzipSync(shavedBuffer); 87 | console.log('Before (gzip):\n',bytes(og_compressed.length)); 88 | console.log('After (gzip):\n',bytes(shaved_compressed.length)); 89 | console.log('Savings (gzip):\n',(shaved_compressed.length/og_compressed.length*100).toFixed(2)+'%'); 90 | console.log('Before (raw):\n',bytes(buffer.length)); 91 | console.log('After (raw):\n',bytes(shavedBuffer.length)); 92 | console.log('Savings (raw):\n',(shavedBuffer.length/buffer.length*100).toFixed(2)+'%'); 93 | } 94 | 95 | if (argv.out != undefined) { 96 | fs.writeFileSync(argv.out,shavedBuffer); 97 | console.log('Wrote shaved tile to ' + argv.out); 98 | } 99 | }) 100 | -------------------------------------------------------------------------------- /bin/vtshaver-filters.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | "use strict"; 4 | 5 | const fs = require('fs'); 6 | const path = require('path'); 7 | const argv = require('minimist')(process.argv.slice(2)); 8 | const styleToFilters = require('../lib/styleToFilters.js'); 9 | 10 | const usage = `usage: 11 | 12 | vtshaver-filters [args] 13 | 14 | --style: required: path to a gl style to parse 15 | --sources: optional: list of one or more sources (comma separated) to display in the output (default is all sources) 16 | --pretty: optional: whether to pretty print the output (default false). Pass '--pretty' to indent the JSON. 17 | 18 | Will output a json object describing each of the source-layers and their parsed metadata to be used for shaving. 19 | 20 | Example: 21 | 22 | vtshaver-filters --style style.json > meta.json 23 | 24 | ` 25 | 26 | function error(msg) { 27 | console.error(usage); 28 | console.error(msg); 29 | process.exit(1); 30 | } 31 | 32 | if (argv.style == undefined || !fs.existsSync(argv.style)) { 33 | return error("must supply path to style.json"); 34 | } 35 | 36 | try { 37 | const style_json = fs.readFileSync(argv.style); 38 | const meta = styleToFilters(JSON.parse(style_json)); 39 | let indent = 0; 40 | if (argv.pretty !== undefined) { 41 | indent = 4; 42 | } 43 | if (argv.sources !== undefined) { 44 | const sources = argv.sources.split(','); 45 | var limited_meta = {}; 46 | Object.keys(meta).forEach(function(k) { 47 | if (sources.includes(k)) { 48 | limited_meta[k] = meta[k]; 49 | } 50 | }); 51 | console.log(JSON.stringify(limited_meta,null,indent)); 52 | } else { 53 | console.log(JSON.stringify(meta,null,indent)); 54 | } 55 | 56 | } catch (err) { 57 | console.error(err.message); 58 | process.exit(1); 59 | } -------------------------------------------------------------------------------- /binding.gyp: -------------------------------------------------------------------------------- 1 | # This file inherits default targets for Node addons, see https://github.com/nodejs/node-gyp/blob/master/addon.gypi 2 | { 3 | # https://github.com/springmeyer/gyp/blob/master/test/make_global_settings/wrapper/wrapper.gyp 4 | 'make_global_settings': [ 5 | ['CXX', '<(module_root_dir)/mason_packages/.link/bin/clang++'], 6 | ['CC', '<(module_root_dir)/mason_packages/.link/bin/clang'], 7 | ['LINK', '<(module_root_dir)/mason_packages/.link/bin/clang++'], 8 | ['AR', '<(module_root_dir)/mason_packages/.link/bin/llvm-ar'], 9 | ['NM', '<(module_root_dir)/mason_packages/.link/bin/llvm-nm'] 10 | ], 11 | 'includes': [ 'common.gypi' ], 12 | 'variables': { # custom variables we use specific to this file 13 | 'error_on_warnings%':'true', # can be overriden by a command line variable because of the % sign using "WERROR" (defined in Makefile) 14 | # Use this variable to silence warnings from mason dependencies 15 | # It's a variable to make easy to pass to 16 | # cflags (linux) and xcode (mac) 17 | 'system_includes': [ 18 | "-isystem , std::allocator > const&) 99 | './mason_packages/.link/platform/default/src/mbgl/util/utf.cpp', 100 | # mbgl::platform::lowercase and mbgl::platform::upcase 101 | './mason_packages/.link/platform/default/src/mbgl/util/string_stdlib.cpp', 102 | './vendor/nunicode/src/libnu/ducet.c', 103 | './vendor/nunicode/src/libnu/strcoll.c', 104 | './vendor/nunicode/src/libnu/strings.c', 105 | './vendor/nunicode/src/libnu/tolower.c', 106 | './vendor/nunicode/src/libnu/tounaccent.c', 107 | './vendor/nunicode/src/libnu/toupper.c', 108 | './vendor/nunicode/src/libnu/utf8.c', 109 | # Bring in mbgl::platform::formatNumber 110 | './mason_packages/.link/platform/default/src/mbgl/i18n/number_format.cpp', 111 | ], 112 | # Not enabling eager binding because there are unused symbols declared but not defined (e.g., heatmap program) 113 | # 'ldflags': [ 114 | # '-Wl,-z,now' 115 | # ], 116 | "libraries": [ 117 | # static linking (combining): Take a lib and smoosh it into the thing you're building. 118 | # A portable file extension name. Build static lib (.a) then when you're linking, 119 | # you're smooshing it into your lib. Static lib is linked when we build a project, rather than at runtime. 120 | # But Dynamic lib is loaded at runtime. (.node is a type of dynamic lib cause it's loaded into node at runtime) 121 | "<(module_root_dir)/mason_packages/.link/lib/libmbgl-core.a" 122 | ], 123 | 'conditions': [ 124 | ['error_on_warnings == "true"', { 125 | 'cflags_cc' : [ '-Werror' ], 126 | 'xcode_settings': { 127 | 'OTHER_CPLUSPLUSFLAGS': [ '-Werror' ], 128 | 'OTHER_LDFLAGS': ['-framework Foundation'] 129 | } 130 | }] 131 | ], 132 | # Add to cpp glossary (or other doc in cpp repo) different types of binaries (.node, .a, static, dynamic (.so on linux and .dylib on osx)) 133 | # talk from cppcon by person from Apple, exploration of every builds systems in c++ are awful since theyre system-specific 134 | 'cflags': [ 135 | '<@(system_includes)', 136 | '<@(compiler_checks)' 137 | ], 138 | 'xcode_settings': { 139 | 'OTHER_LDFLAGS':[ 140 | '-framework Foundation' 141 | ], 142 | 'OTHER_CFLAGS': [ 143 | "-isystem <(module_root_dir)/vendor/nunicode/include" 144 | ], 145 | 'OTHER_CPLUSPLUSFLAGS': [ 146 | '<@(system_includes)', 147 | '<@(compiler_checks)' 148 | ], 149 | 'GCC_ENABLE_CPP_RTTI': 'YES', 150 | 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 151 | 'MACOSX_DEPLOYMENT_TARGET':'10.11', 152 | 'CLANG_CXX_LIBRARY': 'libc++', 153 | 'CLANG_CXX_LANGUAGE_STANDARD':'c++14', 154 | 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0' 155 | } 156 | 157 | } 158 | ] 159 | } 160 | -------------------------------------------------------------------------------- /cloudformation/ci.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "user for publishing to s3://mapbox-node-binary/@mapbox/vtshaver", 4 | "Resources": { 5 | "User": { 6 | "Type": "AWS::IAM::User", 7 | "Properties": { 8 | "Policies": [ 9 | { 10 | "PolicyName": "list", 11 | "PolicyDocument": { 12 | "Statement": [ 13 | { 14 | "Action": [ 15 | "s3:ListBucket" 16 | ], 17 | "Effect": "Allow", 18 | "Resource": "arn:aws:s3:::mapbox-node-binary", 19 | "Condition": { 20 | "StringLike": { 21 | "s3:prefix": [ 22 | "@mapbox/vtshaver/*" 23 | ] 24 | } 25 | } 26 | } 27 | ] 28 | } 29 | }, 30 | { 31 | "PolicyName": "publish", 32 | "PolicyDocument": { 33 | "Statement": [ 34 | { 35 | "Action": [ 36 | "s3:DeleteObject", 37 | "s3:GetObject", 38 | "s3:GetObjectAcl", 39 | "s3:PutObject", 40 | "s3:PutObjectAcl" 41 | ], 42 | "Effect": "Allow", 43 | "Resource": "arn:aws:s3:::mapbox-node-binary/@mapbox/vtshaver/*" 44 | } 45 | ] 46 | } 47 | } 48 | ] 49 | } 50 | }, 51 | "AccessKey": { 52 | "Type": "AWS::IAM::AccessKey", 53 | "Properties": { 54 | "UserName": { 55 | "Ref": "User" 56 | } 57 | } 58 | } 59 | }, 60 | "Outputs": { 61 | "AccessKeyId": { 62 | "Value": { 63 | "Ref": "AccessKey" 64 | } 65 | }, 66 | "SecretAccessKey": { 67 | "Value": { 68 | "Fn::GetAtt": [ 69 | "AccessKey", 70 | "SecretAccessKey" 71 | ] 72 | } 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /cloudformation/ci.template.js: -------------------------------------------------------------------------------- 1 | var cf = require('@mapbox/cloudfriend'); 2 | var package_json = require('../package.json') 3 | 4 | module.exports = { 5 | AWSTemplateFormatVersion: '2010-09-09', 6 | Description: 'user for publishing to s3://mapbox-node-binary/' + package_json.name, 7 | Resources: { 8 | User: { 9 | Type: 'AWS::IAM::User', 10 | Properties: { 11 | Policies: [ 12 | { 13 | PolicyName: 'list', 14 | PolicyDocument: { 15 | Statement: [ 16 | { 17 | Action: ['s3:ListBucket'], 18 | Effect: 'Allow', 19 | Resource: 'arn:aws:s3:::mapbox-node-binary', 20 | Condition : { 21 | StringLike : { 22 | "s3:prefix": [ package_json.name + "/*"] 23 | } 24 | } 25 | } 26 | ] 27 | } 28 | }, 29 | { 30 | PolicyName: 'publish', 31 | PolicyDocument: { 32 | Statement: [ 33 | { 34 | Action: ['s3:DeleteObject', 's3:GetObject', 's3:GetObjectAcl', 's3:PutObject', 's3:PutObjectAcl'], 35 | Effect: 'Allow', 36 | Resource: 'arn:aws:s3:::mapbox-node-binary/' + package_json.name + '/*' 37 | } 38 | ] 39 | } 40 | } 41 | ] 42 | } 43 | }, 44 | AccessKey: { 45 | Type: 'AWS::IAM::AccessKey', 46 | Properties: { 47 | UserName: cf.ref('User') 48 | } 49 | } 50 | }, 51 | Outputs: { 52 | AccessKeyId: { 53 | Value: cf.ref('AccessKey') 54 | }, 55 | SecretAccessKey: { 56 | Value: cf.getAtt('AccessKey', 'SecretAccessKey') 57 | } 58 | } 59 | }; -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "test" -------------------------------------------------------------------------------- /common.gypi: -------------------------------------------------------------------------------- 1 | { 2 | 'target_defaults': { 3 | 'default_configuration': 'Release', 4 | 'cflags_cc' : ['-std=c++14'], 5 | 'cflags_cc!': ['-std=gnu++0x','-std=gnu++1y', '-fno-rtti', '-fno-exceptions'], 6 | 'configurations': { 7 | 'Debug': { 8 | 'defines!': [ 9 | 'NDEBUG' 10 | ], 11 | 'cflags_cc!': [ 12 | '-O3', 13 | '-Os', 14 | '-DNDEBUG' 15 | ], 16 | 'xcode_settings': { 17 | 'OTHER_CPLUSPLUSFLAGS!': [ 18 | '-O3', 19 | '-Os', 20 | '-DDEBUG' 21 | ], 22 | 'GCC_OPTIMIZATION_LEVEL': '0', 23 | 'GCC_GENERATE_DEBUGGING_SYMBOLS': 'YES' 24 | } 25 | }, 26 | 'Release': { 27 | 'defines': [ 28 | 'NDEBUG' 29 | ], 30 | 'cflags': [ 31 | '-flto', '-fvisibility=hidden' 32 | ], 33 | 'ldflags': [ 34 | '-flto', 35 | '-fuse-ld=<(module_root_dir)/mason_packages/.link/bin/ld' 36 | ], 37 | 'xcode_settings': { 38 | 'OTHER_CPLUSPLUSFLAGS!': [ 39 | '-Os', 40 | '-O2' 41 | ], 42 | 'OTHER_LDFLAGS':[ '-flto' ], 43 | 'OTHER_CPLUSPLUSFLAGS': [ '-flto', '-fvisibility=hidden' ], 44 | 'GCC_OPTIMIZATION_LEVEL': '3', 45 | 'GCC_GENERATE_DEBUGGING_SYMBOLS': 'NO', 46 | 'DEAD_CODE_STRIPPING': 'YES', 47 | 'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES' 48 | } 49 | } 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | module.exports = require('./vtshaver.js'); -------------------------------------------------------------------------------- /lib/styleToFilters.js: -------------------------------------------------------------------------------- 1 | // import {isExpression} from ; 2 | let styleSpec = require('@mapbox/mapbox-gl-style-spec'); 3 | 4 | /** 5 | * Takes optimized filter object from shaver.styleToFilters and returns c++ filters for shave. 6 | * @function styleToFilters 7 | * @param {Object} style - Mapbox GL Style JSON 8 | * @example 9 | * var shaver = require('@mapbox/vtshaver'); 10 | * var style = require('/path/to/style.json'); 11 | * var filters = shaver.styleToFilters(style); 12 | * console.log(filters); 13 | * // { 14 | * // "poi_label": ["!=","maki","cafe"], 15 | * // "road": ["==","class","path"], 16 | * // "water": true, 17 | * // ... 18 | * // } 19 | */ 20 | 21 | function styleToFilters(style) { 22 | var layers = {}; 23 | // Store layers and filters used in style 24 | if (style && style.layers) { 25 | for (var i = 0; i < style.layers.length; i++) { 26 | var layerName = style.layers[i]['source-layer']; 27 | if (layerName) { 28 | // if the layer already exists in our filters, update it 29 | if (layers[layerName]) { 30 | // Update zoom levels 31 | var styleMin = style.layers[i].minzoom || 0; 32 | var styleMax = style.layers[i].maxzoom || 22; 33 | if (styleMin < layers[layerName].minzoom) layers[layerName].minzoom = styleMin; 34 | if (styleMax > layers[layerName].maxzoom) layers[layerName].maxzoom = styleMax; 35 | // Modify filter 36 | if (layers[layerName].filters === true || !style.layers[i].filter) { 37 | layers[layerName].filters = true; 38 | } else { 39 | let filter = replaceNoOpExpressions(style.layers[i].filter); 40 | layers[layerName].filters.push(filter === 'noop' ? ['literal', true] : filter); 41 | } 42 | } else { 43 | // otherwise create the layer & filter array, with min/max zoom 44 | layers[layerName] = {}; 45 | if (style.layers[i].filter) { 46 | let filter = replaceNoOpExpressions(style.layers[i].filter); 47 | layers[layerName].filters = ['any', filter === 'noop' ? ['literal', true] : filter] 48 | } else { 49 | layers[layerName].filters = true; 50 | } 51 | layers[layerName].minzoom = style.layers[i].minzoom || 0; 52 | layers[layerName].maxzoom = style.layers[i].maxzoom || 22; 53 | } 54 | 55 | // Collect the used properties 56 | // 1. from paint, layout, and filter 57 | layers[layerName].properties = layers[layerName].properties || []; 58 | ['paint', 'layout'].forEach(item => { 59 | let itemObject = style.layers[i][item]; 60 | itemObject && getPropertyFromLayoutAndPainter(itemObject, layers[layerName].properties); 61 | }); 62 | // 2. from filter 63 | if (style.layers[i].filter) { 64 | getPropertyFromFilter(style.layers[i].filter, layers[layerName].properties); 65 | } 66 | } 67 | } 68 | } 69 | 70 | // remove duplicate propertys and fix choose all the propertys in layers[i].properties 71 | Object.keys(layers).forEach(layerId => { 72 | let properties = layers[layerId].properties; 73 | if (properties.indexOf(true) !== -1) { 74 | layers[layerId].properties = true; 75 | } else { 76 | let unique = {}; 77 | properties.forEach(function(i) { 78 | if (!unique[i]) { 79 | unique[i] = true; 80 | } 81 | }); 82 | layers[layerId].properties = Object.keys(unique); 83 | } 84 | }); 85 | return layers; 86 | } 87 | 88 | function getPropertyFromFilter(filter, properties) { 89 | if (styleSpec.expression.isExpression(filter)) { 90 | getPropertyFromExpression(filter, properties); 91 | } 92 | 93 | // Warning: Below code should put in to an else conditions, 94 | // but since the `isExpression` can not tell it is a expression or filter syntax I put it outsied the else 95 | // this could reduce the performance or cause some potential bugs, we must keep an eye on this. 96 | 97 | // else { 98 | let subFilter = []; 99 | for (let i = 0; i < filter.length; i++) { 100 | if (typeof filter[i] === 'object' && filter[i] instanceof Array) { 101 | subFilter.push(filter[i]); 102 | } 103 | } 104 | 105 | if (subFilter.length > 0) { 106 | subFilter.forEach(sfilter => { 107 | getPropertyFromFilter(sfilter, properties); 108 | }) 109 | } else { 110 | if (filter.length >= 3 && typeof filter[1] === 'string') { 111 | if (filter[1].indexOf('$') === -1) { 112 | properties.push(filter[1]); 113 | } 114 | } 115 | } 116 | // } 117 | } 118 | 119 | 120 | function getPropertyFromLayoutAndPainter(propertyObj, properties) { 121 | Object.keys(propertyObj).forEach(key => { 122 | let value = propertyObj[key]; 123 | // TODO we still have outher situation: 124 | // - special properties: `mapbox_clip_start`, `mapbox_clip_end` 125 | if (typeof value === 'string') { 126 | // if the value is string try to get property name from `xx{PropertyName}xx` like. 127 | // the /{[^}]+}/ig return all the value like {xxx} 128 | // eg 'a{hello}badfa' => ['{hello}'] 129 | // eg 'a{hello}ba{world}dfa' => ['{hello}','{world}'] 130 | let preProperties = value.match(/{[^}]+}/ig); 131 | preProperties && preProperties.forEach(item => { 132 | properties.push(item.slice(1, -1)); 133 | }); 134 | } else if (typeof value === 'object' && typeof value.property === 'string') { 135 | // - legacy functions with `property` 136 | properties.push(value.property); 137 | } else { 138 | // test isExpression from sytleSpec 139 | if (styleSpec.expression.isExpression(value)) { 140 | // TODO: now we implement this by ourself in vtshavem, we need to talk with ‘style spec’ member to see if there have a official method to get used property, to make this can be synchronized with the expression update. 141 | getPropertyFromExpression(value, properties); 142 | } else { 143 | // otherwise continual loop; 144 | getPropertyFromLayoutAndPainter(value, properties); 145 | } 146 | } 147 | }) 148 | } 149 | 150 | 151 | function replaceNoOpExpressions(exp) { 152 | if (exp instanceof Array) { 153 | switch (exp[0]) { 154 | case 'pitch': 155 | case 'distance-from-center': 156 | return 'noop'; 157 | } 158 | 159 | let newExp = exp.map(sub => sub instanceof Array ? replaceNoOpExpressions(sub) : sub); 160 | 161 | if (newExp.includes('noop')) { 162 | switch (newExp[0]) { 163 | case 'any': 164 | case 'all': 165 | return newExp.map(sub => sub === 'noop' ? ['literal', true] : sub); 166 | default: 167 | return 'noop'; 168 | } 169 | } 170 | else return newExp; 171 | } 172 | else return exp; 173 | } 174 | 175 | 176 | function getPropertyFromExpression(exp, properties) { 177 | // now we care about the expression like: 178 | // ["get", string] not ["get", string, Object], 179 | // ["has", string] not ["has", string, Object], 180 | // ["properties"], 181 | // ["feature-state", string] 182 | if (exp instanceof Array) { 183 | switch (exp[0]) { 184 | case 'get': 185 | case 'has': 186 | if (typeof exp[1] === 'string' && !(exp[2] && typeof exp[2] === 'object')) { 187 | properties.push(exp[1]); 188 | } 189 | break; 190 | case 'feature-state': 191 | properties.push(exp[1]); 192 | break; 193 | case 'properties': 194 | properties.push(true); 195 | break; 196 | } 197 | 198 | exp.forEach(sub => { 199 | if (sub instanceof Array) { 200 | getPropertyFromExpression(sub, properties) 201 | } 202 | }) 203 | } 204 | } 205 | 206 | module.exports = styleToFilters; 207 | -------------------------------------------------------------------------------- /lib/vtshaver.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var binary = require('@mapbox/node-pre-gyp'); 4 | var exists = require('fs').existsSync || require('path').existsSync; 5 | var path = require('path'); 6 | var binding_path = binary.find(path.resolve(path.join(__dirname,'../package.json'))); 7 | var styleToFilters = require(__dirname + '/styleToFilters.js'); 8 | 9 | var VTSHAVER = module.exports = require(binding_path); 10 | VTSHAVER.styleToFilters = styleToFilters; 11 | VTSHAVER.version = require('../package.json').version; 12 | -------------------------------------------------------------------------------- /mason-versions.ini: -------------------------------------------------------------------------------- 1 | [headers] 2 | vtzero=1.1.0 3 | protozero=1.7.0 4 | gzip-hpp=0.1.0 5 | [compiled] 6 | clang++=10.0.0 7 | clang-tidy=10.0.0 8 | clang-format=10.0.0 9 | llvm-cov=10.0.0 10 | binutils=2.31 11 | mbgl-core=1.6.0-cxx11abi 12 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@mapbox/vtshaver", 3 | "version": "0.3.3", 4 | "description": "Creates style-optimized vector tiles", 5 | "main": "./lib/index.js", 6 | "repository": { 7 | "type": "git", 8 | "url": "git@github.com:mapbox/vtshaver.git" 9 | }, 10 | "scripts": { 11 | "test": "tape test/*.test.js", 12 | "install": "node-pre-gyp install --fallback-to-build", 13 | "docs": "npm run docs-cpp && npm run docs-js", 14 | "docs-cpp": "documentation build src/*.cpp --re --polyglot -f md -o API-CPP.md", 15 | "docs-js": "documentation build lib/styleToFilters.js -f md -o API-JavaScript.md", 16 | "build:dev": "make debug" 17 | }, 18 | "author": "Mapbox", 19 | "license": "ISC", 20 | "dependencies": { 21 | "@mapbox/mapbox-gl-style-spec": "^13.12.0", 22 | "@mapbox/node-pre-gyp": "^1.0.8", 23 | "node-addon-api": "^4.3.0" 24 | }, 25 | "bin": { 26 | "vtshave": "./bin/vtshave.js", 27 | "vtshaver-filters": "./bin/vtshaver-filters.js" 28 | }, 29 | "devDependencies": { 30 | "@mapbox/mason-js": "^0.1.5", 31 | "@mapbox/mvt-fixtures": "^3.7.0", 32 | "@mapbox/vector-tile": "^1.3.1", 33 | "aws-sdk": "^2.1080.0", 34 | "bytes": "^3.1.2", 35 | "d3-queue": "^3.0.7", 36 | "minimist": "^1.2.5", 37 | "pbf": "^3.2.1", 38 | "tape": "^4.5.1" 39 | }, 40 | "binary": { 41 | "module_name": "vtshaver", 42 | "module_path": "./lib/binding/", 43 | "host": "https://mapbox-node-binary.s3.amazonaws.com", 44 | "remote_path": "./{name}/v{version}/{configuration}/{toolset}/", 45 | "package_name": "{platform}-{arch}.tar.gz" 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /scripts/clang-format.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | : ' 7 | 8 | Runs clang-format on the code in src/ 9 | 10 | Return `1` if there are files to be formatted, and automatically formats them. 11 | 12 | Returns `0` if everything looks properly formatted. 13 | 14 | ' 15 | 16 | PATH_TO_FORMAT_SCRIPT="$(pwd)/mason_packages/.link/bin/clang-format" 17 | 18 | # Run clang-format on all cpp and hpp files in the /src directory 19 | find src/ -type f -name '*.hpp' -o -name '*.cpp' \ 20 | | xargs -I{} ${PATH_TO_FORMAT_SCRIPT} -i -style=file {} 21 | 22 | # Print list of modified files 23 | dirty=$(git ls-files --modified src/) 24 | 25 | if [[ $dirty ]]; then 26 | echo "The following files have been modified:" 27 | echo $dirty 28 | git diff 29 | exit 1 30 | else 31 | exit 0 32 | fi -------------------------------------------------------------------------------- /scripts/clang-tidy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | # https://clang.llvm.org/extra/clang-tidy/ 7 | 8 | : ' 9 | Runs clang-tidy on the code in src/ 10 | Return `1` if there are files automatically fixed by clang-tidy. 11 | Returns `0` if no fixes by clang-tidy. 12 | TODO: should also return non-zero if clang-tidy emits warnings 13 | or errors about things it cannot automatically fix. However I cannot 14 | figure out how to get this working yet as it seems that clang-tidy 15 | always returns 0 even on errors. 16 | ' 17 | 18 | PATH_TO_CLANG_TIDY_SCRIPT="$(pwd)/mason_packages/.link/share/run-clang-tidy.py" 19 | # make sure that run-clang-tidy.py can find the right clang-tidy 20 | export PATH=$(pwd)/mason_packages/.link/bin:${PATH} 21 | 22 | # build the compile_commands.json file if it does not exist 23 | if [[ ! -f build/compile_commands.json ]]; then 24 | # We need to clean otherwise when we make the project 25 | # will will not see all the compile commands 26 | make clean 27 | # Create the build directory to put the compile_commands in 28 | # We do this first to ensure it is there to start writing to 29 | # immediately (make make not create it right away) 30 | mkdir -p build 31 | # Run make, pipe the output to the generate_compile_commands.py 32 | # and drop them in a place that clang-tidy will automatically find them 33 | RESULT=0 34 | make | tee /tmp/make-node-cpp-skel-build-output.txt || RESULT=$? 35 | if [[ ${RESULT} != 0 ]]; then 36 | echo "Build failed, could not generate compile commands for clang-tidy, aborting!" 37 | exit ${RESULT} 38 | else 39 | cat /tmp/make-node-cpp-skel-build-output.txt | scripts/generate_compile_commands.py > build/compile_commands.json 40 | fi 41 | 42 | fi 43 | 44 | # change into the build directory so that clang-tidy can find the files 45 | # at the right paths (since this is where the actual build happens) 46 | cd build 47 | ${PATH_TO_CLANG_TIDY_SCRIPT} -fix 48 | cd ../ 49 | 50 | # Print list of modified files 51 | dirty=$(git ls-files --modified src/) 52 | 53 | if [[ $dirty ]]; then 54 | echo "The following files have been modified:" 55 | echo $dirty 56 | git diff 57 | exit 1 58 | else 59 | exit 0 60 | fi 61 | -------------------------------------------------------------------------------- /scripts/coverage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | # http://clang.llvm.org/docs/UsersManual.html#profiling-with-instrumentation 7 | # https://www.bignerdranch.com/blog/weve-got-you-covered/ 8 | 9 | make clean 10 | export CXXFLAGS="-fprofile-instr-generate -fcoverage-mapping" 11 | export LDFLAGS="-fprofile-instr-generate" 12 | make debug 13 | rm -f *profraw 14 | rm -f *gcov 15 | rm -f *profdata 16 | LLVM_PROFILE_FILE="code-%p.profraw" npm test 17 | CXX_MODULE=$(./node_modules/.bin/node-pre-gyp reveal module --silent) 18 | export PATH=$(pwd)/mason_packages/.link/bin/:${PATH} 19 | llvm-profdata merge -output=code.profdata code-*.profraw 20 | llvm-cov report ${CXX_MODULE} -instr-profile=code.profdata -use-color 21 | llvm-cov show ${CXX_MODULE} -instr-profile=code.profdata src/*.cpp -path-equivalence -use-color -format html > /tmp/coverage.html 22 | echo "open /tmp/coverage.html for HTML version of this report" 23 | -------------------------------------------------------------------------------- /scripts/create_scheme.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | CONTAINER=build/binding.xcodeproj 7 | OUTPUT="${CONTAINER}/xcshareddata/xcschemes/${SCHEME_NAME}.xcscheme" 8 | 9 | # Required ENV vars: 10 | # - SCHEME_TYPE: type of the scheme 11 | # - SCHEME_NAME: name of the scheme 12 | 13 | # Optional ENV vars: 14 | # - NODE_ARGUMENT (defaults to "") 15 | # - BUILDABLE_NAME (defaults ot SCHEME_NAME) 16 | # - BLUEPRINT_NAME (defaults ot SCHEME_NAME) 17 | 18 | 19 | # Try to reuse the existing Blueprint ID if the scheme already exists. 20 | if [ -f "${OUTPUT}" ]; then 21 | BLUEPRINT_ID=$(sed -n "s/[ \t]*BlueprintIdentifier *= *\"\([A-Z0-9]\{24\}\)\"/\\1/p" "${OUTPUT}" | head -1) 22 | fi 23 | 24 | NODE_ARGUMENT=${NODE_ARGUMENT:-} 25 | BLUEPRINT_ID=${BLUEPRINT_ID:-$(hexdump -n 12 -v -e '/1 "%02X"' /dev/urandom)} 26 | BUILDABLE_NAME=${BUILDABLE_NAME:-${SCHEME_NAME}} 27 | BLUEPRINT_NAME=${BLUEPRINT_NAME:-${SCHEME_NAME}} 28 | 29 | mkdir -p "${CONTAINER}/xcshareddata/xcschemes" 30 | 31 | sed "\ 32 | s#{{BLUEPRINT_ID}}#${BLUEPRINT_ID}#;\ 33 | s#{{BLUEPRINT_NAME}}#${BLUEPRINT_NAME}#;\ 34 | s#{{BUILDABLE_NAME}}#${BUILDABLE_NAME}#;\ 35 | s#{{CONTAINER}}#${CONTAINER}#;\ 36 | s#{{WORKING_DIRECTORY}}#$(pwd)#;\ 37 | s#{{NODE_PATH}}#$(dirname `which node`)#;\ 38 | s#{{NODE_ARGUMENT}}#${NODE_ARGUMENT}#" \ 39 | scripts/${SCHEME_TYPE}.xcscheme > "${OUTPUT}" 40 | -------------------------------------------------------------------------------- /scripts/generate_compile_commands.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import json 5 | import os 6 | import re 7 | 8 | # Script to generate compile_commands.json based on Makefile output 9 | # Works by accepting Makefile output from stdin, parsing it, and 10 | # turning into json records. These are then printed to stdout. 11 | # More details on the compile_commands format at: 12 | # https://clang.llvm.org/docs/JSONCompilationDatabase.html 13 | # 14 | # Note: make must be run in verbose mode, e.g. V=1 make or VERBOSE=1 make 15 | # 16 | # Usage with node-cpp-skel: 17 | # 18 | # make | ./scripts/generate_compile_commands.py > build/compile_commands.json 19 | 20 | # These work for node-cpp-skel to detect the files being compiled 21 | # They may need to be modified if you adapt this to another tool 22 | matcher = re.compile('^(.*) (.+cpp)\n') 23 | build_dir = os.path.join(os.getcwd(),"build") 24 | TOKEN_DENOTING_COMPILED_FILE='NODE_GYP_MODULE_NAME' 25 | 26 | def generate(): 27 | compile_commands = [] 28 | for line in sys.stdin.readlines(): 29 | if TOKEN_DENOTING_COMPILED_FILE in line: 30 | match = matcher.match(line) 31 | if match and 'src/mbgl' not in match.group(2): 32 | compile_commands.append({ 33 | "directory": build_dir, 34 | "command": line.strip(), 35 | "file": os.path.normpath(os.path.join(build_dir,match.group(2))) 36 | }) 37 | print(json.dumps(compile_commands,indent=4)) 38 | 39 | if __name__ == '__main__': 40 | generate() 41 | -------------------------------------------------------------------------------- /scripts/install_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ ${1:-false} == 'false' ]]; then 4 | echo "Error: pass node version as first argument" 5 | exit 1 6 | fi 7 | 8 | NODE_VERSION=$1 9 | 10 | # if an existing nvm is already installed we need to unload it 11 | nvm unload || true 12 | 13 | # here we set up the node version on the fly based on the matrix value. 14 | # This is done manually so that the build works the same on OS X 15 | rm -rf ./__nvm/ && git clone --depth 1 https://github.com/creationix/nvm.git ./__nvm 16 | source ./__nvm/nvm.sh 17 | nvm install ${NODE_VERSION} 18 | nvm use ${NODE_VERSION} 19 | node --version 20 | npm --version 21 | which node -------------------------------------------------------------------------------- /scripts/leak_suppressions.txt: -------------------------------------------------------------------------------- 1 | leak:__strdup 2 | leak:v8::internal 3 | leak:node::CreateEnvironment 4 | leak:protozero::pbf_writer::add_bytes 5 | -------------------------------------------------------------------------------- /scripts/library.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 8 | 9 | 15 | 21 | 22 | 23 | 24 | 25 | 30 | 31 | 32 | 33 | 34 | 35 | 45 | 46 | 52 | 53 | 54 | 55 | 56 | 57 | 63 | 64 | 70 | 71 | 72 | 73 | 75 | 76 | 79 | 80 | 81 | -------------------------------------------------------------------------------- /scripts/node.xcscheme: -------------------------------------------------------------------------------- 1 | 2 | 5 | 8 | 9 | 15 | 21 | 22 | 23 | 24 | 25 | 30 | 31 | 32 | 33 | 34 | 35 | 46 | 49 | 50 | 51 | 57 | 58 | 59 | 60 | 63 | 64 | 65 | 66 | 70 | 71 | 72 | 73 | 74 | 75 | 81 | 82 | 88 | 89 | 90 | 91 | 93 | 94 | 97 | 98 | 99 | -------------------------------------------------------------------------------- /scripts/publish.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | export COMMIT_MESSAGE=$(git log --format=%B --no-merges -n 1 | tr -d '\n') 7 | 8 | # `is_pr_merge` is designed to detect if a gitsha represents a normal 9 | # push commit (to any branch) or whether it represents travis attempting 10 | # to merge between the origin and the upstream branch. 11 | # For more details see: https://docs.travis-ci.com/user/pull-requests 12 | function is_pr_merge() { 13 | # Get the commit message via git log 14 | # This should always be the exactly the text the developer provided 15 | local COMMIT_LOG=${COMMIT_MESSAGE} 16 | 17 | # Get the commit message via git show 18 | # If the gitsha represents a merge then this will 19 | # look something like "Merge e3b1981 into 615d2a3" 20 | # Otherwise it will be the same as the "git log" output 21 | export COMMIT_SHOW=$(git show -s --format=%B | tr -d '\n') 22 | 23 | if [[ "${COMMIT_LOG}" != "${COMMIT_SHOW}" ]]; then 24 | echo true 25 | fi 26 | } 27 | 28 | # Detect if this commit represents a tag. This is useful 29 | # to detect if we are on a travis job that is running due to 30 | # "git tags --push". In this case we don't want to publish even 31 | # if [publish binary] is present since that should refer only to the 32 | # previously job that ran for that commit and not the tag made 33 | function is_tag_commit() { 34 | export COMMIT_MATCHES_KNOWN_TAG=$(git describe --exact-match $(git rev-parse HEAD) 2> /dev/null) 35 | if [[ ${COMMIT_MATCHES_KNOWN_TAG} ]]; then 36 | echo true 37 | fi 38 | } 39 | 40 | # `publish` is used to publish binaries to s3 via commit messages if: 41 | # - the commit message includes [publish binary] 42 | # - the commit message includes [republish binary] 43 | # - the commit is not a pr_merge (checked with `is_pr_merge` function) 44 | function publish() { 45 | echo "dumping binary meta..." 46 | ./node_modules/.bin/node-pre-gyp reveal --loglevel=error $@ 47 | 48 | echo "determining publishing status..." 49 | 50 | if [[ $(is_pr_merge) ]]; then 51 | echo "Skipping publishing because this is a PR merge commit" 52 | elif [[ $(is_tag_commit) ]]; then 53 | echo "Skipping publishing because this is a tag" 54 | else 55 | echo "Commit message was: '${COMMIT_MESSAGE}'" 56 | 57 | if [[ ${COMMIT_MESSAGE} =~ "[publish binary]" ]]; then 58 | echo "Publishing" 59 | ./node_modules/.bin/node-pre-gyp package publish $@ 60 | elif [[ ${COMMIT_MESSAGE} =~ "[republish binary]" ]]; then 61 | echo "Re-Publishing" 62 | ./node_modules/.bin/node-pre-gyp package unpublish publish $@ 63 | else 64 | echo "Skipping publishing since we did not detect either [publish binary] or [republish binary] in commit message" 65 | fi 66 | fi 67 | } 68 | 69 | function usage() { 70 | >&2 echo "Usage" 71 | >&2 echo "" 72 | >&2 echo "$ ./scripts/publish.sh " 73 | >&2 echo "" 74 | >&2 echo "All args are forwarded to node-pre-gyp like --debug" 75 | >&2 echo "" 76 | exit 1 77 | } 78 | 79 | # https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash 80 | for i in "$@" 81 | do 82 | case $i in 83 | -h | --help) 84 | usage 85 | shift 86 | ;; 87 | *) 88 | ;; 89 | esac 90 | done 91 | 92 | publish $@ 93 | -------------------------------------------------------------------------------- /scripts/sanitize.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | : ' 7 | 8 | Rebuilds the code with the sanitizers and runs the tests 9 | 10 | ' 11 | 12 | # See https://github.com/mapbox/node-cpp-skel/blob/master/docs/extended-tour.md#configuration-files 13 | 14 | make clean 15 | 16 | # https://github.com/google/sanitizers/wiki/AddressSanitizerAsDso 17 | SHARED_LIB_EXT=.so 18 | if [[ $(uname -s) == 'Darwin' ]]; then 19 | SHARED_LIB_EXT=.dylib 20 | fi 21 | 22 | export MASON_LLVM_RT_PRELOAD=$(pwd)/$(ls mason_packages/.link/lib/clang/*/lib/*/libclang_rt.asan*${SHARED_LIB_EXT}) 23 | SUPPRESSION_FILE="/tmp/leak_suppressions.txt" 24 | echo "leak:__strdup" > ${SUPPRESSION_FILE} 25 | echo "leak:v8::internal" >> ${SUPPRESSION_FILE} 26 | echo "leak:node::CreateEnvironment" >> ${SUPPRESSION_FILE} 27 | echo "leak:node::Start" >> ${SUPPRESSION_FILE} 28 | echo "leak:node::Init" >> ${SUPPRESSION_FILE} 29 | export ASAN_SYMBOLIZER_PATH=$(pwd)/mason_packages/.link/bin/llvm-symbolizer 30 | export MSAN_SYMBOLIZER_PATH=$(pwd)/mason_packages/.link/bin/llvm-symbolizer 31 | export UBSAN_OPTIONS=print_stacktrace=1 32 | export LSAN_OPTIONS=suppressions=${SUPPRESSION_FILE} 33 | export ASAN_OPTIONS=detect_leaks=1:symbolize=1:abort_on_error=1:detect_container_overflow=1:check_initialization_order=1:detect_stack_use_after_return=1 34 | export MASON_SANITIZE="-fsanitize=address,undefined,integer,leak -fno-sanitize=vptr,function" 35 | export MASON_SANITIZE_CXXFLAGS="${MASON_SANITIZE} -fno-sanitize=vptr,function -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-common" 36 | export MASON_SANITIZE_LDFLAGS="${MASON_SANITIZE}" 37 | # Note: to build without stopping on errors remove the -fno-sanitize-recover=all flag 38 | # You might want to do this if there are multiple errors and you want to see them all before fixing 39 | export CXXFLAGS="${MASON_SANITIZE_CXXFLAGS} ${CXXFLAGS:-} -fno-sanitize-recover=all" 40 | export LDFLAGS="${MASON_SANITIZE_LDFLAGS} ${LDFLAGS:-}" 41 | make debug 42 | export ASAN_OPTIONS=fast_unwind_on_malloc=0:${ASAN_OPTIONS} 43 | if [[ $(uname -s) == 'Darwin' ]]; then 44 | # NOTE: we must call node directly here rather than `npm test` 45 | # because OS X blocks `DYLD_INSERT_LIBRARIES` being inherited by sub shells 46 | # If this is not done right we'll see 47 | # ==18464==ERROR: Interceptors are not working. This may be because AddressSanitizer is loaded too late (e.g. via dlopen). 48 | # 49 | # See https://github.com/mapbox/node-cpp-skel/issues/122 50 | DYLD_INSERT_LIBRARIES=${MASON_LLVM_RT_PRELOAD} \ 51 | node node_modules/.bin/tape test/*test.js 52 | else 53 | LD_PRELOAD=${MASON_LLVM_RT_PRELOAD} \ 54 | npm test 55 | fi 56 | -------------------------------------------------------------------------------- /src/filters.cpp: -------------------------------------------------------------------------------- 1 | #include "filters.hpp" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | Napi::FunctionReference Filters::constructor; // NOLINT 12 | 13 | Napi::Object Filters::Initialize(Napi::Env env, Napi::Object exports) { 14 | Napi::Function func = DefineClass(env, "Filters", {InstanceMethod<&Filters::layers>("layers")}); 15 | constructor = Napi::Persistent(func); 16 | constructor.SuppressDestruct(); 17 | exports.Set("Filters", func); 18 | return exports; 19 | } 20 | 21 | /** 22 | * Takes optimized filter object from shaver.styleToFilters and returns c++ filters for shave. 23 | * @class Filters 24 | * @param {Object} filters - the filter object from the `shaver.styleToFilters` 25 | * @example 26 | * var shaver = require('@mapbox/vtshaver'); 27 | * var style = require('/path/to/style.json'); 28 | * // get the filters object from `styleToFilters` 29 | * var styleFilters = shaver.styleToFilters(style); 30 | * // call the function to create filters 31 | * var filters = new shaver.Filters(styleFilters); 32 | */ 33 | 34 | Filters::Filters(Napi::CallbackInfo const& info) 35 | : Napi::ObjectWrap(info) { 36 | Napi::Env env = info.Env(); 37 | try { 38 | if (info.Length() >= 1) { 39 | Napi::Value filters_val = info[0]; 40 | if (!filters_val.IsObject()) { 41 | Napi::Error::New(env, "filters must be an object and cannot be null or undefined").ThrowAsJavaScriptException(); 42 | return; 43 | } 44 | Napi::Object filters_obj = filters_val.As(); 45 | Napi::Array layers = filters_obj.GetPropertyNames(); 46 | // Loop through each layer in the object and convert its filter to a mbgl::style::Filter 47 | std::uint32_t length = layers.Length(); 48 | for (std::uint32_t i = 0; i < length; ++i) { 49 | Napi::Value layer_key = layers.Get(i); 50 | if (layer_key.IsNull() || layer_key.IsUndefined()) { 51 | Napi::Error::New(env, "layer name must be a string and cannot be null or undefined").ThrowAsJavaScriptException(); 52 | return; 53 | } 54 | 55 | Napi::Value layer_val = filters_obj.Get(layer_key); 56 | 57 | if (!layer_val.IsObject() || layer_val.IsNull() || layer_val.IsUndefined()) { 58 | Napi::Error::New(env, "layer must be an object and cannot be null or undefined").ThrowAsJavaScriptException(); 59 | return; 60 | } 61 | auto layer = layer_val.As(); 62 | 63 | // set default 0/22 for min/max zooms 64 | // if they exist in the filter object, update the values here 65 | zoom_type minzoom = 0; 66 | zoom_type maxzoom = 22; 67 | if (layer.Has("minzoom")) { 68 | Napi::Value minzoom_val = layer.Get("minzoom"); 69 | if (!minzoom_val.IsNumber() || minzoom_val.As().DoubleValue() < 0) { 70 | Napi::Error::New(env, "Value for 'minzoom' must be a positive number.").ThrowAsJavaScriptException(); 71 | return; 72 | } 73 | minzoom = minzoom_val.As().DoubleValue(); 74 | } else { 75 | Napi::Error::New(env, "Filter must include a minzoom property.").ThrowAsJavaScriptException(); 76 | return; 77 | } 78 | if (layer.Has("maxzoom")) { 79 | Napi::Value maxzoom_val = layer.Get("maxzoom"); 80 | if (!maxzoom_val.IsNumber() || maxzoom_val.As().DoubleValue() < 0) { 81 | Napi::Error::New(env, "Value for 'maxzoom' must be a positive number.").ThrowAsJavaScriptException(); 82 | return; 83 | } 84 | maxzoom = maxzoom_val.As().DoubleValue(); 85 | } else { 86 | Napi::Error::New(env, "Filter must include a maxzoom property.").ThrowAsJavaScriptException(); 87 | return; 88 | } 89 | // handle filters array 90 | const Napi::Value layer_filter = layer.Get("filters"); 91 | // error handling in case filter value passed in from JS-world is somehow invalid 92 | if (layer_filter.IsNull() || layer_filter.IsUndefined()) { 93 | Napi::Error::New(env, "Filters is not properly constructed.").ThrowAsJavaScriptException(); 94 | return; 95 | } 96 | 97 | // Convert each filter array to an mbgl::style::Filter object 98 | mbgl::style::Filter filter; 99 | 100 | // NOTICE: If a layer is styled, but does not have a filter, the filter value will equal 101 | // true (see logic within lib/styleToFilters.js) 102 | // Ex: { water: true } 103 | // Because of this, we check for if the filter is an array or a boolean before converting to a mbgl Filter 104 | // If a boolean and is true, create a null/empty Filter object. 105 | Napi::Object json = env.Global().Get("JSON").As(); 106 | Napi::Function stringify = json.Get("stringify").As(); 107 | 108 | if (layer_filter.IsArray()) { 109 | mbgl::style::conversion::Error filterError; 110 | std::string filter_str = stringify.Call(json, {layer_filter}).As(); 111 | auto optional_filter = mbgl::style::conversion::convertJSON(filter_str, filterError); 112 | if (!optional_filter) { 113 | if (filterError.message == "filter property must be a string") { 114 | Napi::TypeError::New(env, "Unable to create Filter object, ensure all filters are expression-based").ThrowAsJavaScriptException(); 115 | 116 | } else { 117 | Napi::TypeError::New(env, filterError.message.c_str()).ThrowAsJavaScriptException(); 118 | } 119 | return; 120 | } 121 | filter = *optional_filter; 122 | } else if (layer_filter.IsBoolean() && layer_filter.As()) { 123 | filter = mbgl::style::Filter{}; 124 | } else { 125 | Napi::TypeError::New(env, "invalid filter value, must be an array or a boolean").ThrowAsJavaScriptException(); 126 | return; 127 | } 128 | 129 | Napi::Value const layer_properties = layer.Get("properties"); 130 | if (layer_properties.IsNull() || layer_properties.IsUndefined()) { 131 | Napi::Error::New(env, "Property-Filters is not properly constructed.").ThrowAsJavaScriptException(); 132 | return; 133 | } 134 | 135 | // NOTICE: If a layer is styled, but does not have a property, the property value will equal [] 136 | // NOTICE: If a property is true, that means we need to keep all the properties 137 | filter_properties_type property; 138 | if (layer_properties.IsArray()) { 139 | auto propertyArray = layer_properties.As(); 140 | std::uint32_t propertiesLength = propertyArray.Length(); 141 | std::vector values; 142 | values.reserve(propertiesLength); 143 | for (std::uint32_t index = 0; index < propertiesLength; ++index) { 144 | Napi::Value property_value = propertyArray.Get(index); 145 | std::string value = property_value.As(); 146 | if (!value.empty()) { 147 | values.emplace_back(value); 148 | } 149 | } 150 | property.first = list; 151 | property.second = values; 152 | } else if (layer_properties.IsBoolean() && layer_properties.As()) { 153 | property.first = all; 154 | property.second = {}; 155 | } else { 156 | Napi::TypeError::New(env, "invalid filter value, must be an array or a boolean").ThrowAsJavaScriptException(); 157 | return; 158 | } 159 | std::string source_layer = layer_key.ToString(); 160 | add_filter(std::move(source_layer), std::move(filter), std::move(property), minzoom, maxzoom); 161 | } 162 | } 163 | } catch (std::exception const& ex) { 164 | Napi::TypeError::New(env, ex.what()).ThrowAsJavaScriptException(); 165 | } 166 | } 167 | 168 | Napi::Value Filters::layers(Napi::CallbackInfo const& info) { 169 | Napi::EscapableHandleScope scope(info.Env()); 170 | auto layers = Napi::Array::New(Env()); 171 | std::uint32_t idx = 0; 172 | for (auto const& lay : filters) { 173 | layers.Set(idx++, lay.first); 174 | } 175 | return scope.Escape(layers); 176 | } 177 | -------------------------------------------------------------------------------- /src/filters.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | // This class adheres to the rule of Zero 9 | // because we define no custom destructor or copy constructor 10 | class Filters : public Napi::ObjectWrap { 11 | public: 12 | using filter_value_type = mbgl::style::Filter; 13 | using filter_properties_types = enum { all, 14 | list }; 15 | using filter_properties_type = std::pair>; 16 | using filter_key_type = std::string; // TODO(danespringmeyer): convert to data_view 17 | using zoom_type = double; 18 | using filter_values_type = std::tuple; 19 | using filters_type = std::map; 20 | 21 | // ctor 22 | static Napi::FunctionReference constructor; 23 | // initializer 24 | static Napi::Object Initialize(Napi::Env env, Napi::Object exports); 25 | explicit Filters(Napi::CallbackInfo const& info); 26 | 27 | Napi::Value layers(Napi::CallbackInfo const& info); 28 | 29 | void add_filter(filter_key_type&& key, filter_value_type&& filter, filter_properties_type&& properties, zoom_type minzoom, zoom_type maxzoom) { 30 | // add a new key/value pair, with the value equaling a tuple 'filter_values_type' defined above 31 | filters.emplace(key, std::make_tuple(std::move(filter), std::move(properties), minzoom, maxzoom)); 32 | } 33 | 34 | auto get_filters() const -> filters_type const& { 35 | return filters; 36 | } 37 | 38 | private: 39 | filters_type filters{}; 40 | }; 41 | -------------------------------------------------------------------------------- /src/shave.hpp: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | // shave, custom async method 6 | Napi::Value shave(Napi::CallbackInfo const& info); 7 | -------------------------------------------------------------------------------- /src/vtshaver.cpp: -------------------------------------------------------------------------------- 1 | #include "filters.hpp" 2 | #include "shave.hpp" 3 | 4 | Napi::Object init(Napi::Env env, Napi::Object exports) { 5 | exports.Set(Napi::String::New(env, "shave"), Napi::Function::New(env, shave)); 6 | Filters::Initialize(env, exports); 7 | return exports; 8 | } 9 | 10 | NODE_API_MODULE(NODE_GYP_MODULE_NAME, init) // NOLINT 11 | -------------------------------------------------------------------------------- /test/cli.test.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | var test = require('tape'); 3 | var path = require('path'); 4 | var fs = require('fs'); 5 | var os = require('os'); 6 | var spawn = require('child_process').spawn; 7 | 8 | var vtshave_cli = path.resolve(__dirname, '..', 'bin', 'vtshave.js'); 9 | var vtshaver_filters_cli = path.resolve(__dirname, '..', 'bin', 'vtshaver-filters.js'); 10 | var tile = path.join(__dirname, 'fixtures', 'tiles', 'sf_16_10465_25329.vector.pbf'); 11 | var style = path.join(__dirname, 'fixtures', 'styles', 'bright-v9.json'); 12 | 13 | if (process.env.TOOLSET && process.env.TOOLSET === 'asan') { 14 | test('vtshave cli works - SKIPPED due to ASAN build', function(t) { t.end() }); 15 | } else { 16 | test('vtshave cli works', function(t) { 17 | var args = [vtshave_cli, '--tile', tile, '--style', style, '--zoom', 16]; 18 | spawn(process.execPath, args) 19 | .on('error', function(err) { t.ifError(err, 'no error'); }) 20 | .on('close', function(code) { 21 | t.equal(code, 0, 'exit 0'); 22 | t.end(); 23 | }); 24 | }); 25 | 26 | test('vtshaver-filters cli works', function(t) { 27 | var args = [vtshaver_filters_cli, '--style', style]; 28 | spawn(process.execPath, args) 29 | .on('error', function(err) { t.ifError(err, 'no error'); }) 30 | .on('close', function(code) { 31 | t.equal(code, 0, 'exit 0'); 32 | t.end(); 33 | }); 34 | }); 35 | 36 | test('vtshaver-filters cli works with --pretty and --sources', function(t) { 37 | var args = [vtshaver_filters_cli, '--style', style, '--pretty', '--sources', 'landuse_overlay,landuse']; 38 | spawn(process.execPath, args) 39 | .on('error', function(err) { t.ifError(err, 'no error'); }) 40 | .on('close', function(code) { 41 | t.equal(code, 0, 'exit 0'); 42 | t.end(); 43 | }) 44 | .stdout.on('data', function(data) { 45 | t.deepEqual(Object.keys(JSON.parse(data.toString())),[ 'landuse_overlay', 'landuse' ]); 46 | }) 47 | }); 48 | 49 | test('vtshaver-filters cli errors on invalid style arg', function(t) { 50 | var args = [vtshaver_filters_cli]; 51 | spawn(process.execPath, args) 52 | .on('error', function(err) { t.ifError(err, 'no error'); }) 53 | .on('close', function(code) { 54 | t.equal(code, 1, 'exit 1'); 55 | t.end(); 56 | }); 57 | }); 58 | 59 | test('vtshaver-filters cli errors on invalid style that cannot be parsed', function(t) { 60 | var args = [vtshaver_filters_cli, '--style', __dirname]; 61 | spawn(process.execPath, args) 62 | .on('error', function(err) { t.ifError(err, 'no error'); }) 63 | .on('close', function(code) { 64 | t.equal(code, 1, 'exit 1'); 65 | t.end(); 66 | }); 67 | }); 68 | 69 | } 70 | 71 | -------------------------------------------------------------------------------- /test/fixtures/filters/bright-filter.json: -------------------------------------------------------------------------------- 1 | {"landuse_overlay":{"filters":["any",["==","class","national_park"]],"minzoom":0,"maxzoom":22,"properties":["class"]},"landuse":{"filters":["any",["==","class","park"],["==","class","school"],["==","class","wood"]],"minzoom":0,"maxzoom":22,"properties":["class"]},"waterway":{"filters":["any",["all",["!=","class","river"],["!=","class","stream"],["!=","class","canal"]],["==","class","river"],["in","class","stream","canal"]],"minzoom":0,"maxzoom":22,"properties":["class"]},"water":{"filters":true,"minzoom":0,"maxzoom":22,"properties":[]},"aeroway":{"filters":["any",["==","$type","Polygon"],["all",["==","$type","LineString"],["==","type","runway"]],["all",["==","$type","LineString"],["==","type","taxiway"]]],"minzoom":11,"maxzoom":22,"properties":["type"]},"road":{"filters":["any",["all",["==","structure","tunnel"],["==","class","motorway_link"]],["all",["==","structure","tunnel"],["in","class","secondary","tertiary"]]],"minzoom":0,"maxzoom":22,"properties":["structure","class"]}} -------------------------------------------------------------------------------- /test/fixtures/filters/expressions-filter.json: -------------------------------------------------------------------------------- 1 | {"landcover":{"filters":["any",["step",["zoom"],true,7,["==","class","snow"]]],"minzoom":0,"maxzoom":22,"properties":["class"]},"road":{"filters":["any",["all",["==",["geometry-type"],"LineString"],["match",["get","structure"],"tunnel",true,false],["step",["zoom"],["any",["match",["get","class"],["street","street_limited","track"],true,false],["match",["get","type"],"primary_link",true,false]],14,["any",["match",["get","class"],["street","street_limited","track","service"],true,false],["match",["get","type"],["primary_link","secondary_link","tertiary_link"],true,false]]]],["all",["==","$type","LineString"],["all",["in","class","primary","secondary","tertiary"],["==","structure","tunnel"]]]],"minzoom":0,"maxzoom":22,"properties":["structure","class","type","street_limited","secondary_link"]},"building":{"filters":["any",["all",["!=","type","building:part"],["==","underground","false"]]],"minzoom":15,"maxzoom":22,"properties":["type","underground"]}} -------------------------------------------------------------------------------- /test/fixtures/filters/expressions-properties.json: -------------------------------------------------------------------------------- 1 | {"landuse":{"filters":true,"minzoom":0,"maxzoom":22,"properties":["p1","p2","p3","p4","p5"]},"water":{"filters":true,"minzoom":0,"maxzoom":22,"properties":true}} -------------------------------------------------------------------------------- /test/fixtures/filters/floating-filter.json: -------------------------------------------------------------------------------- 1 | {"landcover":{"filters":true,"minzoom":10.9999999999999,"maxzoom":11.0000000000001,"properties":[]}} -------------------------------------------------------------------------------- /test/fixtures/properties/floating-filter.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /test/fixtures/styles/bright-v9.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "Bright", 4 | "layers": [ 5 | { 6 | "id": "landuse_overlay_national_park", 7 | "type": "fill", 8 | "source-layer": "landuse_overlay", 9 | "filter": [ 10 | "==", 11 | "class", 12 | "national_park" 13 | ] 14 | }, 15 | { 16 | "id": "landuse_park", 17 | "type": "fill", 18 | "source-layer": "landuse", 19 | "filter": [ 20 | "==", 21 | "class", 22 | "park" 23 | ] 24 | }, 25 | { 26 | "id": "landuse_school", 27 | "type": "fill", 28 | "source-layer": "landuse", 29 | "filter": [ 30 | "==", 31 | "class", 32 | "school" 33 | ] 34 | }, 35 | { 36 | "id": "landuse_wood", 37 | "type": "fill", 38 | "source-layer": "landuse", 39 | "filter": [ 40 | "==", 41 | "class", 42 | "wood" 43 | ] 44 | }, 45 | { 46 | "layout": { 47 | "line-cap": "round" 48 | }, 49 | "filter": [ 50 | "all", 51 | [ 52 | "!=", 53 | "class", 54 | "river" 55 | ], 56 | [ 57 | "!=", 58 | "class", 59 | "stream" 60 | ], 61 | [ 62 | "!=", 63 | "class", 64 | "canal" 65 | ] 66 | ], 67 | "type": "line", 68 | "id": "waterway", 69 | "source-layer": "waterway" 70 | }, 71 | { 72 | "layout": { 73 | "line-cap": "round" 74 | }, 75 | "filter": [ 76 | "==", 77 | "class", 78 | "river" 79 | ], 80 | "type": "line", 81 | "id": "waterway_river", 82 | "source-layer": "waterway" 83 | }, 84 | { 85 | "layout": { 86 | "line-cap": "round" 87 | }, 88 | "filter": [ 89 | "in", 90 | "class", 91 | "stream", 92 | "canal" 93 | ], 94 | "type": "line", 95 | "id": "waterway_stream_canal", 96 | "source-layer": "waterway" 97 | }, 98 | { 99 | "id": "water", 100 | "type": "fill", 101 | "source-layer": "water" 102 | }, 103 | { 104 | "id": "water_offset", 105 | "ref": "water" 106 | }, 107 | { 108 | "minzoom": 11, 109 | "filter": [ 110 | "==", 111 | "$type", 112 | "Polygon" 113 | ], 114 | "type": "fill", 115 | "id": "aeroway_fill", 116 | "source-layer": "aeroway" 117 | }, 118 | { 119 | "minzoom": 11, 120 | "filter": [ 121 | "all", 122 | [ 123 | "==", 124 | "$type", 125 | "LineString" 126 | ], 127 | [ 128 | "==", 129 | "type", 130 | "runway" 131 | ] 132 | ], 133 | "type": "line", 134 | "id": "aeroway_runway", 135 | "source-layer": "aeroway" 136 | }, 137 | { 138 | "interactive": true, 139 | "minzoom": 11, 140 | "filter": [ 141 | "all", 142 | [ 143 | "==", 144 | "$type", 145 | "LineString" 146 | ], 147 | [ 148 | "==", 149 | "type", 150 | "taxiway" 151 | ] 152 | ], 153 | "type": "line", 154 | "id": "aeroway_taxiway", 155 | "source-layer": "aeroway" 156 | }, 157 | { 158 | "interactive": true, 159 | "layout": { 160 | "line-join": "round", 161 | "visibility": "visible" 162 | }, 163 | "filter": [ 164 | "all", 165 | [ 166 | "==", 167 | "structure", 168 | "tunnel" 169 | ], 170 | [ 171 | "==", 172 | "class", 173 | "motorway_link" 174 | ] 175 | ], 176 | "type": "line", 177 | "id": "tunnel_motorway_link_casing", 178 | "source-layer": "road" 179 | }, 180 | { 181 | "interactive": true, 182 | "layout": { 183 | "line-join": "round" 184 | }, 185 | "filter": [ 186 | "all", 187 | [ 188 | "==", 189 | "structure", 190 | "tunnel" 191 | ], 192 | [ 193 | "in", 194 | "class", 195 | "secondary", 196 | "tertiary" 197 | ] 198 | ], 199 | "type": "line", 200 | "id": "tunnel_secondary_tertiary_casing", 201 | "source-layer": "road" 202 | } 203 | ] 204 | } -------------------------------------------------------------------------------- /test/fixtures/styles/cafe.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "cafe", 4 | "center": [ 5 | -122.51238479904751, 6 | 37.77981694417855 7 | ], 8 | "zoom": 16.5252340340155, 9 | "bearing": 0, 10 | "pitch": 0, 11 | "layers": [ 12 | { 13 | "filter": ["==","maki","cafe"], 14 | "type": "symbol", 15 | "source": "composite", 16 | "id": "poi-scalerank1", 17 | "source-layer": "poi_label" 18 | }, 19 | { 20 | "filter": [ 21 | "all", 22 | ["==","maki","cafe"], 23 | ["<=", ["pitch"], 45], 24 | ["<=", ["distance-from-center"], 1] 25 | ], 26 | "type": "symbol", 27 | "source": "composite", 28 | "id": "poi-landmarks", 29 | "source-layer": "poi_label" 30 | } 31 | ], 32 | "owner": "greta" 33 | } 34 | -------------------------------------------------------------------------------- /test/fixtures/styles/expressions-legacy.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "Expressions Legacy", 4 | "layers": [ 5 | { 6 | "id": "landcover", 7 | "type": "fill", 8 | "source": "composite", 9 | "source-layer": "landcover", 10 | "filter": [ 11 | "step", 12 | ["zoom"], 13 | true, 14 | 7, 15 | [ "==", "class", "snow" ] 16 | ] 17 | }, 18 | { 19 | "id": "tunnel-minor-low", 20 | "type": "line", 21 | "source": "composite", 22 | "source-layer": "road", 23 | "filter": [ 24 | "all", 25 | ["==", ["geometry-type"], "LineString"], 26 | ["match", ["get", "structure"], "tunnel", true, false], 27 | ["step", ["zoom"], 28 | ["any", 29 | ["match", ["get", "class"], ["street", "street_limited", "track"], true, false], 30 | ["match", ["get", "type"], "primary_link", true, false] 31 | ], 32 | 14, 33 | ["any", 34 | ["match", ["get", "class"], ["street", "street_limited", "track", "service"], true, false], 35 | ["match", ["get", "type"], ["primary_link", "secondary_link", "tertiary_link"], true, false] 36 | ] 37 | ] 38 | ] 39 | }, 40 | { 41 | "id": "building", 42 | "type": "fill", 43 | "source": "composite", 44 | "source-layer": "building", 45 | "minzoom": 15, 46 | "filter": [ "all", [ "!=", "type", "building:part" ], [ "==", "underground", "false" ] ] 47 | }, 48 | { 49 | "id": "tunnel-primary-secondary-tertiary-case", 50 | "type": "line", 51 | "source": "composite", 52 | "source-layer": "road", 53 | "filter": [ 54 | "all", 55 | [ "==", "$type", "LineString" ], 56 | [ "all", 57 | [ "in", "class", "primary", "secondary", "tertiary" ], 58 | [ "==", "structure", "tunnel" ] ] 59 | ] 60 | } 61 | ] 62 | } -------------------------------------------------------------------------------- /test/fixtures/styles/expressions.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "Expressions", 4 | "layers": [{ 5 | "id": "landuse", 6 | "type": "fill", 7 | "source": "composite", 8 | "source-layer": "landuse", 9 | "filter": ["match", ["get", "class"], 10 | ["airport", "cemetery", "hospital", "park", "pitch", "sand", "school"], true, false 11 | ], 12 | "paint": { 13 | "line-cap": "round", 14 | "expression-test4": ["feature-state", "underground4"], 15 | "expression-test3": ["==", ["feature-state", "underground"], "false"], 16 | "expression-test2": ["==", ["has", "underground"], "false"], 17 | "expression-test": ["==", ["get", "underground1"], "false"], 18 | "expression-test2-fake": ["==", ["has", "underground1", { "obj": 1 }], "false"], 19 | "expression-test5": ["==", ["get", "class"], "false"] 20 | } 21 | }, 22 | { 23 | "id": "water-shadow", 24 | "type": "fill", 25 | "source": "composite", 26 | "source-layer": "water", 27 | "layout": { 28 | "icon-image": "{maki}-{what}-{ever}", 29 | "expression-test4": ["==", ["properties"], "false"] 30 | } 31 | }, 32 | { 33 | "id": "building", 34 | "type": "fill", 35 | "source": "composite", 36 | "source-layer": "building", 37 | "minzoom": 15, 38 | "filter": ["all", ["!", ["match", ["get", "type"], "building:part", true, false]], 39 | ["==", ["get", "underground"], "false"] 40 | ] 41 | }, 42 | { 43 | "id": "road-pedestrian-polygon-fill", 44 | "type": "fill", 45 | "source": "composite", 46 | "source-layer": "road", 47 | "minzoom": 12, 48 | "filter": ["all", ["==", ["geometry-type"], "Polygon"], 49 | ["match", ["get", "structure"], 50 | ["none", "ford"], true, false 51 | ], 52 | ["match", ["get", "class"], 53 | ["path", "pedestrian"], true, false 54 | ] 55 | ], 56 | "paint": { 57 | "whatervey": "{oneway}" 58 | } 59 | }, 60 | { 61 | "id": "poi-label", 62 | "type": "symbol", 63 | "source": "composite", 64 | "source-layer": "poi_label", 65 | "filter": ["<=", ["number", ["get", "filterrank"]], 3], 66 | "paint": { 67 | "circle-radius": "{circle-radius}" 68 | }, 69 | "layout": { 70 | "text-field": "{name_zh}" 71 | } 72 | }, 73 | { "id": "water", "ref": "water-shadow", "paint": { "fill-color": "hsl(196, 80%, 70%)" } }, 74 | { 75 | "id": "housenum-label", 76 | "type": "symbol", 77 | "source": "composite", 78 | "source-layer": "housenum_label", 79 | "minzoom": 17, 80 | "layout": { 81 | "text-field": { 82 | "stops": [ 83 | [11, "{ref}"], 84 | [12, "{name_zh}"] 85 | ] 86 | } 87 | } 88 | } 89 | ] 90 | } -------------------------------------------------------------------------------- /test/fixtures/styles/floating-point-zoom.json: -------------------------------------------------------------------------------- 1 | { 2 | "layers": [ 3 | { 4 | "id": "landcover_style", 5 | "type": "fill", 6 | "source-layer": "landcover", 7 | "minzoom": 10.9999999999999, 8 | "maxzoom": 11.0000000000001 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /test/fixtures/styles/one-feature.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "One Feature", 4 | "layers": [ 5 | { 6 | "id": "park", 7 | "type": "fill", 8 | "source": "composite", 9 | "source-layer": "landuse", 10 | "filter": [ "==", "$id", 0 ] 11 | } 12 | ] 13 | } -------------------------------------------------------------------------------- /test/fixtures/styles/properties.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "Expressions", 4 | "layers": [{ 5 | "id": "landuse", 6 | "type": "fill", 7 | "source-layer": "landuse", 8 | "layout": { 9 | "text-field": "{class}" 10 | } 11 | }, 12 | { 13 | "id": "water", 14 | "type": "fill", 15 | "source-layer": "water" 16 | }, 17 | { 18 | "id": "building", 19 | "type": "fill", 20 | "source-layer": "building" 21 | }, 22 | { 23 | "id": "road", 24 | "type": "fill", 25 | "source-layer": "road", 26 | "layout": { 27 | "text-field": "{type}" 28 | }, 29 | "paint": { 30 | "expression-test-fake1": ["oneway"], 31 | "expression-test-fake2": ["==", ["has", "structure", { "obj": 1 }], "false"], 32 | "expression-test": ["==", ["has", "type"], "false"] 33 | } 34 | }, 35 | { 36 | "id": "poi_label", 37 | "type": "fill", 38 | "source-layer": "poi_label", 39 | "layout": { 40 | "expression-all": ["==", ["properties"], "false"] 41 | } 42 | }, 43 | { 44 | "id": "road_label", 45 | "type": "fill", 46 | "source-layer": "road_label" 47 | }, 48 | { 49 | "id": "housenum_label", 50 | "type": "fill", 51 | "source-layer": "housenum_label" 52 | } 53 | ] 54 | } -------------------------------------------------------------------------------- /test/fixtures/styles/water.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "name": "Water", 4 | "center": [ 5 | 0, 6 | -1.4210854715202004e-13 7 | ], 8 | "zoom": 0.8407778102278685, 9 | "bearing": 0, 10 | "pitch": 0, 11 | "layers": [ 12 | { 13 | "id": "water", 14 | "paint": { 15 | "fill-color": "hsl(190, 51%, 55%)" 16 | }, 17 | "interactive": true, 18 | "layout": {}, 19 | "type": "fill", 20 | "source": "composite", 21 | "source-layer": "water" 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /test/fixtures/tiles/feature-single-point-no-id.mvt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/vtshaver/9df32fc238c40ff5ba3506215e45ced4791ce11d/test/fixtures/tiles/feature-single-point-no-id.mvt -------------------------------------------------------------------------------- /test/fixtures/tiles/invalid.mvt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/vtshaver/9df32fc238c40ff5ba3506215e45ced4791ce11d/test/fixtures/tiles/invalid.mvt -------------------------------------------------------------------------------- /test/fixtures/tiles/sf_16_10465_25329.vector.pbf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/vtshaver/9df32fc238c40ff5ba3506215e45ced4791ce11d/test/fixtures/tiles/sf_16_10465_25329.vector.pbf -------------------------------------------------------------------------------- /test/fixtures/tiles/z16-housenum.mvt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mapbox/vtshaver/9df32fc238c40ff5ba3506215e45ced4791ce11d/test/fixtures/tiles/z16-housenum.mvt -------------------------------------------------------------------------------- /test/mvtfixtures.test.js: -------------------------------------------------------------------------------- 1 | var test = require('tape'); 2 | var Shaver = require('../lib/index.js'); 3 | var fs = require('fs'); 4 | var vt = require('@mapbox/vector-tile').VectorTile; 5 | var pbf = require('pbf'); 6 | var fixtures = require('@mapbox/mvt-fixtures'); 7 | var SHOW_ERROR = process.env.SHOW_ERROR; 8 | 9 | var genericFilter = new Shaver.Filters(Shaver.styleToFilters({ 10 | layers: [{ 11 | "source-layer": "layer_name", 12 | filter: ["==", "string", "hello"] 13 | }] 14 | })); 15 | 16 | test('validator: layers successfully shaved, all value types', function(t) { 17 | var buffer = fixtures.get('038').buffer; 18 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 19 | layers: [{ 20 | "source-layer": "hello", 21 | "filter": ["==", "string_value", "ello"], 22 | "layout": { 23 | "allproperties": ["==", ["properties"], "false"] 24 | } 25 | }], 26 | 27 | })); 28 | 29 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 30 | if (err) throw err; 31 | var postTile = new vt(new pbf(shavedTile)); 32 | t.ok(shavedTile); 33 | t.equals(Object.keys(postTile.layers).length, 1, 'shaved tile contains expected number of layers'); 34 | t.equals(shavedTile.length, 176, 'expected tile size after filtering'); 35 | t.end(); 36 | }); 37 | }); 38 | 39 | test('validator: layers successfully shaved, expression', function(t) { 40 | var buffer = fixtures.get('038').buffer; 41 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 42 | layers: [{ 43 | "source-layer": "hello", 44 | "filter": ["==", ["get", "string_value"], "ello"], 45 | "layout": { 46 | "allproperties": ["==", ["properties"], "false"] 47 | } 48 | }] 49 | })); 50 | 51 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 52 | if (err) throw err; 53 | var postTile = new vt(new pbf(shavedTile)); 54 | t.ok(shavedTile); 55 | t.equals(Object.keys(postTile.layers).length, 1, 'shaved tile contains expected number of layers'); 56 | t.equals(shavedTile.length, 176, 'expected tile size after filtering'); 57 | t.end(); 58 | }); 59 | }); 60 | 61 | test('validator: layers successfully shaved, expression - getType', function(t) { 62 | var buffer = fixtures.get('021').buffer; 63 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 64 | layers: [{ 65 | "source-layer": "hello", 66 | "filter": ["==", ["geometry-type"], "LineString"], 67 | "layout": { 68 | "allproperties": ["==", ["properties"], "false"] 69 | } 70 | }] 71 | })); 72 | 73 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 74 | if (err) throw err; 75 | var postTile = new vt(new pbf(shavedTile)); 76 | t.ok(shavedTile); 77 | t.equals(Object.keys(postTile.layers).length, 1, 'shaved tile contains expected number of layers'); 78 | t.equals(shavedTile.length, 56, 'expected tile size after filtering'); 79 | t.end(); 80 | }); 81 | }); 82 | 83 | test('validator: version 2 no name field in Layer', function(t) { 84 | var buffer = fixtures.get('014').buffer; 85 | Shaver.shave(buffer, { filters: genericFilter, zoom: 0 }, function(err, shavedTile) { 86 | t.ok(err); 87 | if (SHOW_ERROR) console.log(err); 88 | t.end(); 89 | }); 90 | }); 91 | 92 | test('validator: unknown field type in Layer', function(t) { 93 | var buffer = fixtures.get('016').buffer; 94 | 95 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 96 | layers: [{ 97 | "source-layer": "hello", 98 | filter: ["==", "id", "1"] 99 | }] 100 | })); 101 | 102 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 103 | t.notOk(err); 104 | t.end(); 105 | }); 106 | }); 107 | 108 | test('validator: version 1 no name', function(t) { 109 | var buffer = fixtures.get('023').buffer; 110 | 111 | Shaver.shave(buffer, { filters: genericFilter, zoom: 0 }, function(err, shavedTile) { 112 | t.ok(err); 113 | if (SHOW_ERROR) console.log(err); 114 | t.end(); 115 | }); 116 | }); 117 | 118 | test('validator: odd number of tags in Feature', function(t) { 119 | var buffer = fixtures.get('005').buffer; 120 | 121 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 122 | layers: [{ 123 | "source-layer": "hello", 124 | filter: ["==", "string_value", "world"] 125 | }] 126 | })); 127 | 128 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 129 | t.ok(err); 130 | if (SHOW_ERROR) console.log(err); 131 | t.end(); 132 | }); 133 | }); 134 | 135 | test('validator: invalid key or value as it does not appear in the layer', function(t) { 136 | var buffer = fixtures.get('042').buffer; 137 | 138 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 139 | layers: [{ 140 | "source-layer": "hello", 141 | filter: ["==", "string_value", "park"] 142 | }] 143 | })); 144 | 145 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 146 | t.ok(err); 147 | if (SHOW_ERROR) console.log(err); 148 | t.end(); 149 | }); 150 | }); 151 | 152 | test('validator: Feature unknown geometry type', function(t) { 153 | var buffer = fixtures.get('006').buffer; 154 | 155 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 156 | layers: [{ 157 | "source-layer": "hello", 158 | "filter": ["==", "$id", 0] 159 | }] 160 | })); 161 | 162 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 163 | t.ok(err); 164 | if (SHOW_ERROR) console.log(err); 165 | t.end(); 166 | }); 167 | }); 168 | 169 | test('validator: Feature unknown field type type', function(t) { 170 | var buffer = fixtures.get('041').buffer; 171 | 172 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 173 | layers: [{ 174 | "source-layer": "hello", 175 | filter: ["==", "string_value", "lake"] 176 | }] 177 | })); 178 | 179 | Shaver.shave(buffer, { filters: filters, zoom: 0 }, function(err, shavedTile) { 180 | t.ok(err); 181 | if (SHOW_ERROR) console.log(err); 182 | t.end(); 183 | }); 184 | }); -------------------------------------------------------------------------------- /test/propertyKeyValueFilter-Error.test.js: -------------------------------------------------------------------------------- 1 | var Shaver = require('../'); 2 | var fs = require('fs'); 3 | var vt = require('@mapbox/vector-tile').VectorTile; 4 | var pbf = require('pbf'); 5 | var test = require('tape'); 6 | var path = require('path'); 7 | var propertyrJSON = './fixtures/properties/floating-filter.json'; 8 | 9 | 10 | var sfTileBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/sf_16_10465_25329.vector.pbf'); 11 | var z16HousenumBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/z16-housenum.mvt'); 12 | var filter_obj = Shaver.styleToFilters(JSON.parse(fs.readFileSync('./test/fixtures/styles/properties.json').toString())); 13 | 14 | 15 | 16 | 17 | test('property key value filter size check', t => { 18 | let FilterObj = Shaver.styleToFilters({ 19 | "layers": [ 20 | { "id": "landuse", "source-layer": "landuse" } 21 | ] 22 | }); 23 | FilterObj.landuse.properties = null; 24 | // var filters = new Shaver.Filters(null); 25 | // FilterObj.landuse.properties = null 26 | 27 | try { 28 | var filters = new Shaver.Filters(FilterObj, (err) => { 29 | console.log(err); 30 | }); 31 | } catch (err) { 32 | t.ok(err); 33 | t.equal(err.message, 'Property-Filters is not properly constructed.', 'expected error message'); 34 | t.end(); 35 | } 36 | }); 37 | 38 | 39 | test('property key value filter size check', t => { 40 | let FilterObj = Shaver.styleToFilters({ 41 | "layers": [{ 42 | "id": "landuse", 43 | "source-layer": "landuse", 44 | layout: { "expression-test4": ["==", ["properties"], "false"] } 45 | }] 46 | }); 47 | 48 | try { 49 | FilterObj.landuse.properties = false; 50 | var filters = new Shaver.Filters(FilterObj, (err) => { 51 | console.log(err); 52 | }); 53 | } catch (err) { 54 | t.ok(err); 55 | t.equal(err.message, 'invalid filter value, must be an array or a boolean', 'expected error message'); 56 | t.end(); 57 | } 58 | }); -------------------------------------------------------------------------------- /test/propertyKeyValueFilter.test.js: -------------------------------------------------------------------------------- 1 | var Shaver = require('../'); 2 | var fs = require('fs'); 3 | var vt = require('@mapbox/vector-tile').VectorTile; 4 | var pbf = require('pbf'); 5 | var test = require('tape'); 6 | var path = require('path'); 7 | var propertyrJSON = './fixtures/properties/floating-filter.json'; 8 | 9 | 10 | var sfTileBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/sf_16_10465_25329.vector.pbf'); 11 | var z16HousenumBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/z16-housenum.mvt'); 12 | var filter_obj = Shaver.styleToFilters(JSON.parse(fs.readFileSync('./test/fixtures/styles/properties.json').toString())); 13 | 14 | 15 | 16 | // test expressin 17 | function vtinfo(buffer) { 18 | var tile = new vt(new pbf(buffer)); 19 | var layerInfo = {}; 20 | var info = { 21 | layers: [] 22 | }; 23 | Object.keys(tile.layers).forEach(function(k) { 24 | var lay = tile.layers[k]; 25 | let propertyKies = {}; 26 | for (var i = 0; i < lay.length; i++) { 27 | let features = lay.feature(i).toGeoJSON(0, 0, 0); 28 | Object.keys(features.properties).forEach(key => { 29 | propertyKies[key] = true; 30 | }); 31 | } 32 | 33 | layerInfo[k] = { 34 | features: lay.length, 35 | properties: JSON.stringify(Object.keys(propertyKies)) 36 | } 37 | }); 38 | return layerInfo; 39 | } 40 | 41 | 42 | test('property key value filter size check', t => { 43 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 44 | "layers": [ 45 | { "id": "landuse", "source-layer": "landuse" }, 46 | { "id": "water", "source-layer": "water" }, 47 | { "id": "building", "source-layer": "building" }, 48 | { "id": "road", "source-layer": "road" }, 49 | { "id": "poi_label", "source-layer": "poi_label" }, 50 | { "id": "road_label", "source-layer": "road_label" }, 51 | { "id": "housenum_label", "source-layer": "housenum_label" } 52 | ] 53 | })); 54 | Shaver.shave(sfTileBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 55 | if (err) throw err; 56 | t.equals(sfTileBuffer.length, 7718, 'the size before shave of sf tile'); 57 | t.equals(shavedTile.length, 5514, 'the size after the shave of sf tile'); 58 | }); 59 | Shaver.shave(z16HousenumBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 60 | if (err) throw err; 61 | t.equals(z16HousenumBuffer.length, 30607, 'the size before shave of z16 Housenum'); 62 | t.equals(shavedTile.length, 16780, 'the size after the shave of z16 Housenum'); 63 | }); 64 | t.end(); 65 | }); 66 | 67 | 68 | test('property key value filter', t => { 69 | var filters = new Shaver.Filters(filter_obj); 70 | Shaver.shave(sfTileBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 71 | if (err) throw err; 72 | t.equals(sfTileBuffer.length, 7718, 'the size before shave in round2 test'); 73 | t.equals(shavedTile.length, 6609, 'the size after the shave in round2 test'); 74 | if (process.env.UPDATE) { 75 | fs.writeFileSync(path.resolve(__dirname, propertyrJSON), JSON.stringify(filters)); 76 | } 77 | t.deepEquals(filters, require(propertyrJSON), 'property key value filter correctly'); 78 | t.end(); 79 | }); 80 | }); -------------------------------------------------------------------------------- /test/speed.js: -------------------------------------------------------------------------------- 1 | var Shaver = require('../'); 2 | var fs = require('fs'); 3 | var vt = require('@mapbox/vector-tile').VectorTile; 4 | var pbf = require('pbf'); 5 | var test = require('tape'); 6 | var path = require('path'); 7 | var propertyrJSON = './fixtures/properties/floating-filter.json'; 8 | 9 | 10 | var sfTileBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/sf_16_10465_25329.vector.pbf'); 11 | var z16HousenumBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/z16-housenum.mvt'); 12 | // var filter_obj = Shaver.styleToFilters(JSON.parse(fs.readFileSync('./test/fixtures/styles/properties.json').toString())); 13 | 14 | 15 | 16 | var filter_obj = Shaver.styleToFilters({ 17 | "layers": [ 18 | { "id": "landuse", "source-layer": "landuse", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 19 | { "id": "water", "source-layer": "water", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 20 | { "id": "building", "source-layer": "building", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 21 | { "id": "road", "source-layer": "road", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 22 | { "id": "poi_label", "source-layer": "poi_label", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 23 | { "id": "road_label", "source-layer": "road_label", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } }, 24 | { "id": "housenum_label", "source-layer": "housenum_label", filter: true, layout: { "expression-test5": ["==", ["get", "class"], "false"] } } 25 | ] 26 | }); 27 | var filters = new Shaver.Filters(filter_obj); 28 | for (var i = 0; i < 1000000; i++) { 29 | Shaver.shave(sfTileBuffer, { filters, zoom: 14 }, function(err, shavedTile) {}); 30 | } 31 | 32 | console.log('done') -------------------------------------------------------------------------------- /test/styleToFilter-property.test.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs'); 2 | var path = require('path'); 3 | var test = require('tape'); 4 | var styleToFilter = require('../lib/styleToFilters.js'); 5 | var properties_result_expressions = './fixtures/filters/expressions-properties.json'; 6 | 7 | test('test get used properites from style.json', function(t) { 8 | var filters = styleToFilter({ 9 | "layers": [{ 10 | "source-layer": "landuse", 11 | "paint": { 12 | "exp-test1": ["==", ["get", "p1"], "false"], 13 | "exp-test1-fake": ["==", ["get", "p1-fake", { "obj": 1 }], "false"], 14 | "exp-test2": ["==", ["has", "p2"], "false"], 15 | "exp-test2-fake": ["==", ["has", "p2-fake", { "obj": 1 }], "false"], 16 | "exp-test3": ["==", ["feature-state", "p3"], "false"], 17 | "exp-test4": ["feature-state", "p4"], 18 | "exp-test5": { 19 | "property": "p5" 20 | }, 21 | } 22 | }, { 23 | "source-layer": "water", 24 | "paint": { 25 | "exp-test0": ["properties"], 26 | "exp-test1": ["==", ["get", "p1"], "false"], 27 | "exp-test1-fake": ["==", ["get", "p1-fake", { "obj": 1 }], "false"], 28 | "exp-test2": ["==", ["has", "p2"], "false"], 29 | "exp-test2-fake": ["==", ["has", "p2-fake", { "obj": 1 }], "false"], 30 | "exp-test3": ["==", ["feature-state", "p3"], "false"], 31 | "exp-test4": ["feature-state", "p4"], 32 | } 33 | }] 34 | }); 35 | // console.log('xxx', filters); 36 | if (process.env.UPDATE) { 37 | console.log('> UPDATING ' + properties_result_expressions); 38 | fs.writeFileSync(path.resolve(__dirname, properties_result_expressions), JSON.stringify(filters)); 39 | } 40 | t.deepEquals(filters, require(properties_result_expressions), 'expressions filter is extracted correctly'); 41 | 42 | t.end(); 43 | }); -------------------------------------------------------------------------------- /test/styleToFilter.test.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs'); 2 | var path = require('path'); 3 | var test = require('tape'); 4 | var styleToFilter = require('../lib/styleToFilters.js'); 5 | 6 | var brightV9 = require('./fixtures/styles/bright-v9.json'); 7 | var floating = require('./fixtures/styles/floating-point-zoom.json'); 8 | var style_expressions_legacy = require('./fixtures/styles/expressions-legacy.json'); 9 | var filter_result_bright = './fixtures/filters/bright-filter.json'; 10 | var filter_result_floating = './fixtures/filters/floating-filter.json'; 11 | var filter_result_expressions = './fixtures/filters/expressions-filter.json'; 12 | 13 | test('error handling', function(t) { 14 | t.deepEqual(styleToFilter({}), {}, 'returns a plain object when given a plain object'); 15 | t.deepEqual(styleToFilter([]), {}, 'returns a plain object when given an array'); 16 | t.deepEqual(styleToFilter('hello'), {}, 'returns a plain object when given a string'); 17 | t.deepEqual(styleToFilter({ layers: [] }), {}, 'returns a plain object when given an empty style layers'); 18 | t.deepEqual(styleToFilter({ layers: 'lol no layers here' }), {}, 'returns a plain object when given snarky style layers'); 19 | t.end(); 20 | }); 21 | 22 | test('min/max zoom defaults are set if the do not exist', function(t) { 23 | t.deepEqual(styleToFilter({ 24 | layers: [{ 25 | 'source-layer': 'water' 26 | }] 27 | }), { water: { filters: true, minzoom: 0, maxzoom: 22, properties: [] } }, 'returns water:true for only water layer and includes min/max zoom'); 28 | t.end(); 29 | }); 30 | 31 | test('simple style layers', function(t) { 32 | t.deepEqual(styleToFilter({ layers: [{ arbitrary: 'layer' }] }), {}, 'skips any layers without source-layer key'); 33 | t.deepEqual(styleToFilter({ 34 | layers: [{ 35 | 'source-layer': 'water', 36 | minzoom: 10, 37 | maxzoom: 15 38 | }] 39 | }), { water: { filters: true, minzoom: 10, maxzoom: 15, properties: [] } }, 'returns water:true for only water layer and includes min/max zoom'); 40 | 41 | t.deepEqual(styleToFilter({ 42 | layers: [{ 43 | 'source-layer': 'water', 44 | filter: ['==', 'color', 'blue'] 45 | }] 46 | }), { water: { filters: ['any', ['==', 'color', 'blue']], minzoom: 0, maxzoom: 22, properties: ['color'] } }, 'returns water:filter for water layer with filter'); 47 | 48 | t.deepEqual(styleToFilter({ 49 | layers: [{ 50 | 'source-layer': 'water' 51 | }, 52 | { 53 | 'source-layer': 'water', 54 | filter: ['==', 'color', 'blue'] 55 | } 56 | ] 57 | }), { water: { filters: true, minzoom: 0, maxzoom: 22, properties: ['color'] } }, 'returns water:filter for multiple water layers, some with filters'); 58 | 59 | t.deepEqual(styleToFilter({ 60 | layers: [{ 61 | 'source-layer': 'water', 62 | filter: ['!=', 'color', 'blue'], 63 | minzoom: 10, 64 | maxzoom: 15 65 | }, 66 | { 67 | 'source-layer': 'water', 68 | filter: ['==', 'color', 'blue'], 69 | minzoom: 8, 70 | maxzoom: 16 71 | } 72 | ] 73 | }), { 74 | water: { 75 | filters: ['any', ['!=', 'color', 'blue'], 76 | ['==', 'color', 'blue'] 77 | ], 78 | minzoom: 8, 79 | maxzoom: 16, 80 | properties: ['color'] 81 | } 82 | }, 'returns water:filter for multiple water filters, and updates min/max zoom for smallest/largest values'); 83 | 84 | t.deepEqual(styleToFilter({ 85 | layers: [{ 86 | 'source-layer': 'water', 87 | filter: ['!=', 'color', 'blue'], 88 | minzoom: 10, 89 | maxzoom: 15 90 | }, 91 | { 92 | 'source-layer': 'water', 93 | filter: ['==', 'color', 'blue'] 94 | } 95 | ] 96 | }), { 97 | water: { 98 | filters: ['any', ['!=', 'color', 'blue'], 99 | ['==', 'color', 'blue'] 100 | ], 101 | minzoom: 0, 102 | maxzoom: 22, 103 | properties: ['color'] 104 | } 105 | }, 'returns water:filter for multiple water filters, and updates min/max zoom to 0 and 22 if one filter doesn\'t have zooms'); 106 | 107 | t.deepEqual(styleToFilter({ 108 | layers: [{ 109 | 'source-layer': 'water', 110 | filter: ['!=', 'color', 'blue'] 111 | }, 112 | { 113 | 'source-layer': 'water', 114 | filter: ['==', 'color', 'blue'] 115 | } 116 | ] 117 | }), { 118 | water: { 119 | filters: ['any', ['!=', 'color', 'blue'], 120 | ['==', 'color', 'blue'] 121 | ], 122 | minzoom: 0, 123 | maxzoom: 22, 124 | properties: ['color'] 125 | } 126 | }, 'returns water:filter for multiple water layers with filters'); 127 | 128 | t.deepEqual(styleToFilter({ 129 | layers: [{ 130 | 'source-layer': 'water', 131 | filter: [ 132 | 'all', 133 | [ 134 | 'case', 135 | ['>=', ['distance-from-center'], 5], // test no-op in case condition 136 | false, 137 | ['>=', ['pitch'], 45], 138 | false, 139 | true 140 | ], 141 | [ 142 | 'match', 143 | ['get', 'distance'], 144 | [1, 4, ['distance-from-center']], // test no-op in match value 145 | false, 146 | true 147 | ], 148 | [ 149 | 'coalesce', 150 | ['get', 'display'], 151 | ['>=', ['distance-from-center'], 3] // test no-op in coalesce 152 | ], 153 | [ 154 | 'any', 155 | ['boolean', false], 156 | ['>=', ['pitch'], 5] // test no-op in any 157 | ], 158 | [ 159 | 'all', 160 | ['boolean', true], 161 | ['<', ['pitch'], 5] // test no-op in all 162 | ], 163 | ['==', 'color', 'blue'] 164 | ] 165 | }, 166 | { 167 | 'source-layer': 'landcover', 168 | filter: [ 169 | '>=', 170 | ['distance-from-center'], 171 | [ 172 | 'case', 173 | ['==', 'color', 'blue'], 174 | 2, 175 | 4 176 | ] 177 | ] 178 | }, 179 | { 180 | 'source-layer': 'landuse_overlay', 181 | filter: [ 182 | 'case', 183 | ['<=', ['pitch'], 10], 184 | ['==', ['distance-from-center'], 4], // test no-op in value 185 | ['to-boolean', ['get', 'display']], 186 | true, 187 | false 188 | ] 189 | } 190 | ] 191 | }), { water: { filters: ['any', ['all', ['literal', true], ['literal', true], ['literal', true], ['any', ['boolean', false], ['literal', true]], ['all', ['boolean', true], ['literal', true]], ['==', 'color', 'blue']]], minzoom: 0, maxzoom: 22, properties: ['distance', 'display', 'color'] }, landcover: { filters: ['any', ['literal', true]], minzoom: 0, maxzoom: 22, properties: ['color'] }, landuse_overlay: { filters: ['any', ['literal', true]], minzoom: 0, maxzoom: 22, properties: ['display'] } }, 'returns right filters for no-op expressions'); 192 | 193 | t.end(); 194 | }); 195 | 196 | test('real-world style test', function(t) { 197 | var filters = styleToFilter(brightV9); 198 | if (process.env.UPDATE) { 199 | console.log('> UPDATING ' + filter_result_bright); 200 | fs.writeFileSync(path.resolve(__dirname, filter_result_bright), JSON.stringify(filters)); 201 | } 202 | t.deepEquals(filters, require(filter_result_bright), 'bright-v9 filter is extracted correctly'); 203 | 204 | t.end(); 205 | }); 206 | 207 | test('floating point zoom', function(t) { 208 | var filters = styleToFilter(floating); 209 | if (process.env.UPDATE) { 210 | console.log('> UPDATING ' + filter_result_floating); 211 | fs.writeFileSync(path.resolve(__dirname, filter_result_floating), JSON.stringify(filters)); 212 | } 213 | t.deepEquals(filters, require(filter_result_floating), 'floating-point filter is extracted correctly'); 214 | 215 | t.end(); 216 | }); 217 | 218 | // Technically will succeed, but will fail later when attempting to create 219 | // a GL Filter object per https://github.com/mapbox/mapbox-gl-native/pull/12065 220 | test('v8 streets style with legacy+expressions filter combo', function(t) { 221 | var filters = styleToFilter(style_expressions_legacy); 222 | if (process.env.UPDATE) { 223 | console.log('> UPDATING ' + filter_result_expressions); 224 | fs.writeFileSync(path.resolve(__dirname, filter_result_expressions), JSON.stringify(filters)); 225 | } 226 | t.deepEquals(filters, require(filter_result_expressions), 'expressions filter is extracted correctly'); 227 | 228 | t.end(); 229 | }); 230 | -------------------------------------------------------------------------------- /test/temp.js: -------------------------------------------------------------------------------- 1 | var Shaver = require('../'); 2 | var fs = require('fs'); 3 | var vt = require('@mapbox/vector-tile').VectorTile; 4 | var pbf = require('pbf'); 5 | var test = require('tape'); 6 | var path = require('path'); 7 | var propertyrJSON = './fixtures/properties/floating-filter.json'; 8 | 9 | 10 | var sfTileBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/sf_16_10465_25329.vector.pbf'); 11 | var z16HousenumBuffer = fs.readFileSync(__dirname + '/fixtures/tiles/z16-housenum.mvt'); 12 | var filter_obj = Shaver.styleToFilters(JSON.parse(fs.readFileSync('./test/fixtures/styles/properties.json').toString())); 13 | 14 | 15 | 16 | // test expressin 17 | function vtinfo(buffer) { 18 | var tile = new vt(new pbf(buffer)); 19 | var layerInfo = {}; 20 | var info = { 21 | layers: [] 22 | }; 23 | Object.keys(tile.layers).forEach(function(k) { 24 | var lay = tile.layers[k]; 25 | let propertyKies = {}; 26 | for (var i = 0; i < lay.length; i++) { 27 | let features = lay.feature(i).toGeoJSON(0, 0, 0); 28 | Object.keys(features.properties).forEach(key => { 29 | propertyKies[key] = true; 30 | }); 31 | } 32 | 33 | layerInfo[k] = { 34 | features: lay.length, 35 | properties: JSON.stringify(Object.keys(propertyKies)) 36 | } 37 | }); 38 | return layerInfo; 39 | } 40 | 41 | 42 | test('property key value filter size check', t => { 43 | var filters = new Shaver.Filters(Shaver.styleToFilters({ 44 | "layers": [ 45 | { "id": "landuse", "source-layer": "landuse" }, 46 | { "id": "water", "source-layer": "water" }, 47 | { "id": "building", "source-layer": "building" }, 48 | { "id": "road", "source-layer": "road" }, 49 | { "id": "poi_label", "source-layer": "poi_label" }, 50 | { "id": "road_label", "source-layer": "road_label" }, 51 | { "id": "housenum_label", "source-layer": "housenum_label" } 52 | ] 53 | })); 54 | Shaver.shave(sfTileBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 55 | if (err) throw err; 56 | t.equals(sfTileBuffer.length, 7718, 'the size before shave of sf tile'); 57 | t.equals(shavedTile.length, 5514, 'the size after the shave of sf tile'); 58 | }); 59 | Shaver.shave(z16HousenumBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 60 | if (err) throw err; 61 | t.equals(z16HousenumBuffer.length, 30607, 'the size before shave of z16 Housenum'); 62 | t.equals(shavedTile.length, 16780, 'the size after the shave of z16 Housenum'); 63 | }); 64 | t.end(); 65 | }); 66 | 67 | 68 | test('property key value filter', t => { 69 | var filters = new Shaver.Filters(filter_obj); 70 | Shaver.shave(sfTileBuffer, { filters, zoom: 14 }, function(err, shavedTile) { 71 | if (err) throw err; 72 | t.equals(sfTileBuffer.length, 7718, 'the size before shave in round2 test'); 73 | t.equals(shavedTile.length, 6609, 'the size after the shave in round2 test'); 74 | if (process.env.UPDATE) { 75 | fs.writeFileSync(path.resolve(__dirname, propertyrJSON), JSON.stringify(filters)); 76 | } 77 | t.deepEquals(filters, require(propertyrJSON), 'property key value filter correctly'); 78 | t.end(); 79 | }); 80 | }); -------------------------------------------------------------------------------- /vendor/nunicode/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013 Aleksey Tulinov 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /vendor/nunicode/files.txt: -------------------------------------------------------------------------------- 1 | include/libnu/casemap.h 2 | include/libnu/casemap_internal.h 3 | include/libnu/config.h 4 | include/libnu/defines.h 5 | include/libnu/ducet.h 6 | include/libnu/mph.h 7 | include/libnu/strcoll.h 8 | include/libnu/strcoll_internal.h 9 | include/libnu/strings.h 10 | include/libnu/udb.h 11 | include/libnu/unaccent.h 12 | include/libnu/utf8.h 13 | include/libnu/utf8_internal.h 14 | src/libnu/ducet.c 15 | src/libnu/strcoll.c 16 | src/libnu/strings.c 17 | src/libnu/tolower.c 18 | src/libnu/tounaccent.c 19 | src/libnu/toupper.c 20 | src/libnu/utf8.c 21 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/casemap.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_TOUPPER_H 2 | #define NU_TOUPPER_H 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #if defined (__cplusplus) || defined (c_plusplus) 12 | extern "C" { 13 | #endif 14 | 15 | /** 16 | * @example folding.c 17 | * @example special_casing.c 18 | */ 19 | 20 | /** Synonim to nu_casemap_read. It is recommended to use 21 | * nu_casemap_read instead. 22 | */ 23 | #define NU_CASEMAP_DECODING_FUNCTION NU_UDB_DECODING_FUNCTION 24 | /** Read (decoding) function for use with transformation results of 25 | * casemapping functions. E.g. nu_casemap_read(nu_tolower(0x0041)); 26 | * will read first codepoint of 'A' transformed to lower case. 27 | */ 28 | #define nu_casemap_read (nu_udb_read) 29 | 30 | /** Casemap codepoint 31 | * 32 | * @ingroup transformations 33 | */ 34 | typedef nu_transformation_t nu_casemapping_t; 35 | 36 | #ifdef NU_WITH_TOUPPER 37 | 38 | /** Return uppercase value of codepoint. Uncoditional casemapping. 39 | * 40 | * @ingroup transformations 41 | * @param codepoint unicode codepoint 42 | * @return uppercase codepoint or 0 if mapping doesn't exist 43 | */ 44 | NU_EXPORT 45 | const char* nu_toupper(uint32_t codepoint); 46 | 47 | /** Return uppercase value of codepoint. Context-sensitivity is not 48 | * implemented internally, returned result is equal to calling nu_toupper() 49 | * on corresponding codepoint. 50 | * 51 | * @ingroup transformations_internal 52 | * @param encoded pointer to encoded string 53 | * @param limit memory limit of encoded string or NU_UNLIMITED 54 | * @param read read (decoding) function 55 | * @param u (optional) codepoint which was (or wasn't) transformed 56 | * @param transform output value of codepoint transformed into uppercase or 0 57 | * if mapping doesn't exist. Can't be NULL, supposed to be decoded with 58 | * nu_casemap_read 59 | * @param context not used 60 | * @return pointer to the next codepoint in string 61 | */ 62 | NU_EXPORT 63 | const char* _nu_toupper(const char *encoded, const char *limit, nu_read_iterator_t read, 64 | uint32_t *u, const char **transform, 65 | void *context); 66 | 67 | #endif /* NU_WITH_TOUPPER */ 68 | 69 | #ifdef NU_WITH_TOLOWER 70 | 71 | /** Return lowercase value of codepoint. Unconditional casemapping. 72 | * 73 | * @ingroup transformations 74 | * @param codepoint unicode codepoint 75 | * @return lowercase codepoint or 0 if mapping doesn't exist 76 | */ 77 | NU_EXPORT 78 | const char* nu_tolower(uint32_t codepoint); 79 | 80 | /** Return lowercase value of codepoint. Will transform uppercase 81 | * Sigma ('Σ') into final sigma ('ς') if it occurs at string boundary or 82 | * followed by U+0000. Might require single read-ahead when 83 | * encountering Sigma. 84 | * 85 | * @ingroup transformations_internal 86 | * @param encoded pointer to encoded string 87 | * @param limit memory limit of encoded string or NU_UNLIMITED 88 | * @param read read (decoding) function 89 | * @param u (optional) codepoint which was (or wasn't) transformed 90 | * @param transform output value of codepoint transformed into lowercase or 0 91 | * if mapping doesn't exist. Can't be NULL, supposed to be decoded with 92 | * nu_casemap_read 93 | * @param context not used 94 | * @return pointer to the next codepoint in string 95 | */ 96 | NU_EXPORT 97 | const char* _nu_tolower(const char *encoded, const char *limit, nu_read_iterator_t read, 98 | uint32_t *u, const char **transform, 99 | void *context); 100 | 101 | #endif /* NU_WITH_TOLOWER */ 102 | 103 | #ifdef NU_WITH_TOFOLD 104 | 105 | /** Return value of codepoint with case differences eliminated 106 | * 107 | * @ingroup transformations 108 | * @param codepoint unicode codepoint 109 | * @return casefolded codepoint or 0 if mapping doesn't exist 110 | */ 111 | NU_EXPORT 112 | const char* nu_tofold(uint32_t codepoint); 113 | 114 | /** Return value of codepoint with case differences eliminated. 115 | * Context-sensitivity is not implemented internally, returned result is equal 116 | * to calling nu_tofold() on corresponding codepoint. 117 | * 118 | * @ingroup transformations_internal 119 | * @param encoded pointer to encoded string 120 | * @param limit memory limit of encoded string or NU_UNLIMITED 121 | * @param read read (decoding) function 122 | * @param u (optional) codepoint which was (or wasn't) transformed 123 | * @param transform output value of casefolded codepoint or 0 124 | * if mapping doesn't exist. Can't be NULL, supposed to be decoded with 125 | * nu_casemap_read 126 | * @param context not used 127 | * @return pointer to the next codepoint in string 128 | */ 129 | NU_EXPORT 130 | const char* _nu_tofold(const char *encoded, const char *limit, nu_read_iterator_t read, 131 | uint32_t *u, const char **transform, 132 | void *context); 133 | 134 | #endif /* NU_WITH_TOFOLD */ 135 | 136 | #if defined (__cplusplus) || defined (c_plusplus) 137 | } 138 | #endif 139 | 140 | #endif /* NU_TOUPPER_H */ 141 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/casemap_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_CASEMAP_INTERNAL_H 2 | #define NU_CASEMAP_INTERNAL_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | /** Casemap codepoint 10 | * 11 | * @ingroup transformations_internal 12 | */ 13 | static inline 14 | const char* _nu_to_something(uint32_t codepoint, 15 | const int16_t *G, size_t G_SIZE, 16 | const uint32_t *VALUES_C, const uint16_t *VALUES_I, const uint8_t *COMBINED) { 17 | 18 | return nu_udb_lookup(codepoint, G, G_SIZE, VALUES_C, VALUES_I, COMBINED); 19 | } 20 | 21 | #endif /* NU_CASEMAP_INTERNAL_H */ 22 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/config.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_BUILD_CONFIG_H 2 | #define NU_BUILD_CONFIG_H 3 | 4 | // Hardcoded defines for vendored copy 5 | #define NU_WITH_UTF8 6 | #define NU_WITH_TOUPPER 7 | #define NU_WITH_TOLOWER 8 | #define NU_WITH_UNACCENT 9 | #define NU_WITH_Z_COLLATION 10 | 11 | /** @file config.h 12 | * 13 | * This file list available build options and provide some shortcuts, 14 | * like NU_WITH_UTF16 will enable NU_WITH_UTF16LE + NU_WITH_UTF16BE. 15 | * 16 | * At build time you might set either particular option or shortcut. Either 17 | * way you don't have to and shouldn't modify this file, just set build flags 18 | * at the environment. 19 | * 20 | * This file will also enable several dependencies for you: case-mapping 21 | * depends on NU_WITH_UDB, NU_UTF8_READER and so. 22 | */ 23 | 24 | /* Definitions not covered in this file which should be defined 25 | * externally. 26 | * 27 | * NU_BUILD_STATIC: will change functions visibility to "hidden" (GCC). 28 | * @see defines.h 29 | * 30 | * NU_DISABLE_CONTRACTIONS: disables forward-reading during collation, 31 | * only weights of a single codepoints will be compared (enabled in release build) 32 | */ 33 | 34 | /* Enable everything, see below for details on a specific option */ 35 | #ifdef NU_WITH_EVERYTHING 36 | # define NU_WITH_UTF8 37 | # define NU_WITH_CESU8 38 | # define NU_WITH_UTF16 39 | # define NU_WITH_UTF16HE 40 | # define NU_WITH_UTF32 41 | # define NU_WITH_UTF32HE 42 | # define NU_WITH_STRINGS 43 | # define NU_WITH_EXTRA 44 | # define NU_WITH_REVERSE_READ 45 | # define NU_WITH_VALIDATION 46 | # define NU_WITH_COLLATION 47 | # define NU_WITH_CASEMAP 48 | # define NU_WITH_UNACCENT 49 | #endif /* NU_WITH_EVERYTHING */ 50 | 51 | /* Enable UTF-8 decoding and encoding */ 52 | #ifdef NU_WITH_UTF8 53 | # define NU_WITH_UTF8_READER /* UTF-8 decoding functions */ 54 | # define NU_WITH_UTF8_WRITER /* UTF-8 encoding functions */ 55 | #endif /* NU_WITH_UTF8 */ 56 | 57 | /* Enable CESU-8 decoding and encoding */ 58 | #ifdef NU_WITH_CESU8 59 | # define NU_WITH_CESU8_READER 60 | # define NU_WITH_CESU8_WRITER 61 | #endif /* NU_WITH_CESU8 */ 62 | 63 | /* Enable UTF-16LE decoding and encoding */ 64 | #ifdef NU_WITH_UTF16LE 65 | # define NU_WITH_UTF16LE_READER 66 | # define NU_WITH_UTF16LE_WRITER 67 | #endif /* NU_WITH_UTF16LE */ 68 | 69 | /* Enable UTF-16BE decoding and encoding */ 70 | #ifdef NU_WITH_UTF16BE 71 | # define NU_WITH_UTF16BE_READER 72 | # define NU_WITH_UTF16BE_WRITER 73 | #endif /* NU_WITH_UTF16BE */ 74 | 75 | /* Enable UTF-16HE decoding and encoding */ 76 | #ifdef NU_WITH_UTF16HE 77 | # define NU_WITH_UTF16HE_READER 78 | # define NU_WITH_UTF16HE_WRITER 79 | #endif /* NU_WITH_UTF16HE */ 80 | 81 | /* Enable all UTF-16 options */ 82 | #ifdef NU_WITH_UTF16 83 | # define NU_WITH_UTF16_READER 84 | # define NU_WITH_UTF16_WRITER 85 | #endif /* NU_WITH_UTF16 */ 86 | 87 | /* Enable UTF-16LE and BE decoders of UTF-16 decoder is requested */ 88 | #ifdef NU_WITH_UTF16_READER 89 | # define NU_WITH_UTF16LE_READER 90 | # define NU_WITH_UTF16BE_READER 91 | #endif /* NU_WITH_UTF16_READER */ 92 | 93 | /* Enable UTF-16LE and BE encoders of UTF-16 encoder is requested */ 94 | #ifdef NU_WITH_UTF16_WRITER 95 | # define NU_WITH_UTF16LE_WRITER 96 | # define NU_WITH_UTF16BE_WRITER 97 | #endif /* NU_WITH_UTF16_WRITER */ 98 | 99 | /* Enable UTF-32LE decoding and encoding */ 100 | #ifdef NU_WITH_UTF32LE 101 | # define NU_WITH_UTF32LE_READER 102 | # define NU_WITH_UTF32LE_WRITER 103 | #endif /* NU_WITH_UTF32LE */ 104 | 105 | /* Enable UTF-32BE decoding and encoding */ 106 | #ifdef NU_WITH_UTF32BE 107 | # define NU_WITH_UTF32BE_READER 108 | # define NU_WITH_UTF32BE_WRITER 109 | #endif /* NU_WITH_UTF32BE */ 110 | 111 | /* Enable UTF-32HE decoding and encoding */ 112 | #ifdef NU_WITH_UTF32HE 113 | # define NU_WITH_UTF32HE_READER 114 | # define NU_WITH_UTF32HE_WRITER 115 | #endif /* NU_WITH_UTF32HE */ 116 | 117 | /* Enable all UTF-32 options */ 118 | #ifdef NU_WITH_UTF32 119 | # define NU_WITH_UTF32_READER 120 | # define NU_WITH_UTF32_WRITER 121 | #endif /* NU_WITH_UTF32 */ 122 | 123 | /* Enable UTF-32LE and BE decoders of UTF-32 decoder is requested */ 124 | #ifdef NU_WITH_UTF32_READER 125 | # define NU_WITH_UTF32LE_READER 126 | # define NU_WITH_UTF32BE_READER 127 | #endif /* NU_WITH_UTF32_READER */ 128 | 129 | /* Enable UTF-32LE and BE encoders of UTF-32 encoder is requested */ 130 | #ifdef NU_WITH_UTF32_WRITER 131 | # define NU_WITH_UTF32LE_WRITER 132 | # define NU_WITH_UTF32BE_WRITER 133 | #endif /* NU_WITH_UTF32_WRITER */ 134 | 135 | /* Shortcut for all string functions */ 136 | #ifdef NU_WITH_STRINGS 137 | # define NU_WITH_Z_STRINGS /* 0-terminated string functions */ 138 | # define NU_WITH_N_STRINGS /* unterminated string functions */ 139 | #endif /* NU_WITH_STRINGS */ 140 | 141 | /* Shortcut for extra string functions */ 142 | #ifdef NU_WITH_EXTRA 143 | # define NU_WITH_Z_EXTRA /* extra functions for 0-terminated strings */ 144 | # define NU_WITH_N_EXTRA /* extra functions for unterminated strings */ 145 | #endif /* NU_WITH_STRINGS */ 146 | 147 | /* Enable collation functions */ 148 | #ifdef NU_WITH_COLLATION 149 | # define NU_WITH_Z_COLLATION /* collation functions for 0-terminated strings */ 150 | # define NU_WITH_N_COLLATION /* collation functions for unterminated strings */ 151 | #endif /* NU_WITH_COLLATION */ 152 | 153 | /* Requirements for collation functions on 0-terminated strings */ 154 | #ifdef NU_WITH_Z_COLLATION 155 | # define NU_WITH_Z_STRINGS 156 | # define NU_WITH_TOUPPER /* nu_toupper() */ 157 | #endif 158 | 159 | /* Requirements for collation functions 160 | * on unterminated strings */ 161 | #ifdef NU_WITH_N_COLLATION 162 | # define NU_WITH_N_STRINGS 163 | # define NU_WITH_TOUPPER 164 | #endif 165 | 166 | /* Requirements for casemap functions */ 167 | #ifdef NU_WITH_CASEMAP 168 | # define NU_WITH_TOLOWER /* nu_tolower() */ 169 | # define NU_WITH_TOUPPER 170 | # define NU_WITH_TOFOLD 171 | #endif /* NU_WITH_CASEMAP */ 172 | 173 | /* More requirements for collation functions all collation functions depends 174 | * on NU_WITH_DUCET */ 175 | #if (defined NU_WITH_Z_COLLATION) || (defined NU_WITH_N_COLLATION) 176 | # ifndef NU_WITH_DUCET 177 | # define NU_WITH_DUCET 178 | # endif 179 | #endif 180 | 181 | /* All collation and casemapping functions depends on NU_WITH_UDB */ 182 | #if (defined NU_WITH_Z_COLLATION) || (defined NU_WITH_N_COLLATION) \ 183 | || (defined NU_WITH_TOLOWER) || (defined NU_WITH_TOUPPER) || (defined NU_WITH_TOFOLD) \ 184 | || (defined NU_WITH_UNACCENT) 185 | # ifndef NU_WITH_UDB 186 | # define NU_WITH_UDB /* nu_udb_* functions, pretty much internal stuff */ 187 | # endif /* NU_WITH_UDB */ 188 | #endif 189 | 190 | /* DUCET implementation depends on NU_WITH_UDB */ 191 | #ifdef NU_WITH_DUCET 192 | # define NU_WITH_UDB 193 | #endif /* NU_WITH_DUCET */ 194 | 195 | /* NU_WITH_UDB depends on NU_WITH_UTF8_READER because internal encoding 196 | * of UDB is UTF-8 */ 197 | #ifdef NU_WITH_UDB 198 | # define NU_WITH_UTF8_READER 199 | #endif /* NU_WITH_UDB */ 200 | 201 | #endif /* NU_BUILD_CONFIG_H */ 202 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/defines.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_DEFINES_H 2 | #define NU_DEFINES_H 3 | 4 | /** @file 5 | */ 6 | 7 | /** @defgroup defines Defines 8 | */ 9 | 10 | #ifndef NU_EXPORT 11 | 12 | # ifdef _WIN32 13 | # define NU_EXPORT __declspec(dllexport) 14 | 15 | # elif __GNUC__ >= 4 16 | # ifdef NU_BUILD_STATIC 17 | # define NU_EXPORT __attribute__ ((visibility ("hidden"))) 18 | # else 19 | # define NU_EXPORT __attribute__ ((visibility ("default"))) 20 | # endif 21 | 22 | # else 23 | # define NU_EXPORT 24 | # endif 25 | 26 | #endif /* NU_EXPORT */ 27 | 28 | /** Integer version of Unicode specification implemented. 900 == 9.0.0 29 | * 30 | * @ingroup defines 31 | */ 32 | #define NU_UNICODE_VERSION 1000 33 | /** Special limit value to unset limit on string. Used internally by nunicode. 34 | * 35 | * @ingroup defines 36 | */ 37 | #define NU_UNLIMITED ((const void *)(-1)) 38 | 39 | #ifdef _MSC_VER 40 | #define ssize_t ptrdiff_t 41 | #endif 42 | 43 | #endif /* NU_DEFINES_H */ 44 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/ducet.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_DUCET_H 2 | #define NU_DUCET_H 3 | 4 | #include 5 | 6 | #include 7 | #include 8 | 9 | #if defined (__cplusplus) || defined (c_plusplus) 10 | extern "C" { 11 | #endif 12 | 13 | #ifdef NU_WITH_DUCET 14 | 15 | /** Get DUCET value of codepoint 16 | * 17 | * Normally, for unlisted codepoints, this function will return number greater 18 | * than max weight of listed codepoints, hence putting all unlisted codepoints 19 | * (not letters and not numbers) to the end of the sorted list (in codepoint 20 | * order). 21 | * 22 | * @ingroup udb 23 | * @param codepoint codepoint 24 | * @param weight previous weight for compound weight (not used here) 25 | * @param context pointer passed to nu_strcoll() 26 | * @return comparable weight of the codepoint 27 | */ 28 | NU_EXPORT 29 | int32_t nu_ducet_weight(uint32_t codepoint, int32_t *weight, void *context); 30 | 31 | #endif /* NU_WITH_DUCET */ 32 | 33 | #if defined (__cplusplus) || defined (c_plusplus) 34 | } 35 | #endif 36 | 37 | #endif /* NU_DUCET_H */ 38 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/mph.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_MPH_H 2 | #define NU_MPH_H 3 | 4 | /* Intentionally undocumented 5 | * 6 | * http://iswsa.acm.org/mphf/index.html 7 | */ 8 | 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | #if defined (__cplusplus) || defined (c_plusplus) 15 | extern "C" { 16 | #endif 17 | 18 | #ifdef NU_WITH_UDB 19 | 20 | /* those need to be the same values as used in MPH generation */ 21 | #define PRIME 0x01000193 22 | 23 | /** Calculate G offset from codepoint 24 | */ 25 | static inline 26 | uint32_t _nu_hash(uint32_t hash, uint32_t codepoint) { 27 | if (hash == 0) { 28 | hash = PRIME; 29 | } 30 | 31 | return hash ^ codepoint; 32 | } 33 | 34 | /** Get hash value of Unicode codepoint 35 | */ 36 | static inline 37 | uint32_t nu_mph_hash(const int16_t *G, size_t G_SIZE, 38 | uint32_t codepoint) { 39 | 40 | uint32_t h = _nu_hash(0, codepoint); 41 | int16_t offset = G[h % G_SIZE]; 42 | if (offset < 0) { 43 | return (uint32_t)(-offset - 1); 44 | } 45 | return (_nu_hash(offset, codepoint) % G_SIZE); 46 | } 47 | 48 | /** Lookup value in MPH 49 | */ 50 | static inline 51 | uint32_t nu_mph_lookup(const uint32_t *V_C, const uint16_t *V_I, 52 | uint32_t codepoint, uint32_t hash) { 53 | 54 | const uint32_t *c = (V_C + hash); 55 | const uint16_t *i = (V_I + hash); 56 | 57 | /* due to nature of minimal perfect hash, it will always 58 | * produce collision for codepoints outside of MPH original set. 59 | * thus VALUES_C contain original codepoint to check if 60 | * collision occurred */ 61 | 62 | return (*c != codepoint ? 0 : *i); 63 | } 64 | 65 | #endif /* NU_WITH_UDB */ 66 | 67 | #if defined (__cplusplus) || defined (c_plusplus) 68 | } 69 | #endif 70 | 71 | #endif /* NU_MPH_H */ 72 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/strcoll.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_STRCOLL_H 2 | #define NU_STRCOLL_H 3 | 4 | /** @defgroup collation Collation functions 5 | * 6 | * All functions in this group are following full Unicode collation rules, 7 | * i.e. nu_strstr(haystack, "Æ") will find "AE" in haystack and 8 | * nu_strstr(haystack, "ß") will find "ss". 9 | * 10 | * Same applies for *every* function, nu_strchr(str, 0x00DF), as you would 11 | * guess, will also find "ss" in str. 12 | * 13 | * Please expect this. 14 | * 15 | * Note on "n" functions variant: please see comment on this topic 16 | * in strings.h 17 | */ 18 | 19 | #include 20 | 21 | #include 22 | #include 23 | #include 24 | #include 25 | 26 | #if defined (__cplusplus) || defined (c_plusplus) 27 | extern "C" { 28 | #endif 29 | 30 | #ifdef NU_WITH_TOFOLD 31 | # define NU_FOLDING_FUNCTION nu_tofold 32 | #else 33 | # define NU_FOLDING_FUNCTION nu_toupper 34 | #endif /* NU_WITH_TOFOLD */ 35 | 36 | #ifdef NU_WITH_Z_COLLATION 37 | 38 | /** Locate codepoint in string 39 | * 40 | * @ingroup collation 41 | * @param encoded encoded string 42 | * @param c charater to locate 43 | * @param read read (decode) function for encoded string 44 | * @return pointer to codepoint in string or 0 45 | */ 46 | NU_EXPORT 47 | const char* nu_strchr(const char *encoded, uint32_t c, nu_read_iterator_t read); 48 | 49 | /** Locate codepoint in string ignoring case 50 | * 51 | * @ingroup collation 52 | * @see nu_strchr 53 | */ 54 | NU_EXPORT 55 | const char* nu_strcasechr(const char *encoded, uint32_t c, nu_read_iterator_t read); 56 | 57 | /** Locate codepoint in string in reverse direction 58 | * 59 | * @ingroup collation 60 | * @param encoded encoded string 61 | * @param c charater to locate 62 | * @param read read (decode) function for encoded string 63 | * @return pointer to codepoint in string or 0 64 | */ 65 | NU_EXPORT 66 | const char* nu_strrchr(const char *encoded, uint32_t c, nu_read_iterator_t read); 67 | 68 | /** Locate codepoint in string in reverse direction, case-insensitive 69 | * 70 | * @ingroup collation 71 | * @see nu_strrchr 72 | */ 73 | NU_EXPORT 74 | const char* nu_strrcasechr(const char *encoded, uint32_t c, nu_read_iterator_t read); 75 | 76 | /** Compare strings in case-sensitive manner. 77 | * 78 | * @ingroup collation 79 | * @param s1 first encoded strings 80 | * @param s2 second encoded strings 81 | * @param s1_read read (decode) function for first string 82 | * @param s2_read read (decode) function for second string 83 | * @return -1, 0, 1 84 | */ 85 | NU_EXPORT 86 | int nu_strcoll(const char *s1, const char *s2, 87 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read); 88 | 89 | /** Compare strings in case-insensitive manner. 90 | * 91 | * @ingroup collation 92 | * @see nu_strcoll 93 | */ 94 | NU_EXPORT 95 | int nu_strcasecoll(const char *s1, const char *s2, 96 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read); 97 | 98 | /** Find needle in haystack 99 | * 100 | * @ingroup collation 101 | * @param haystack encoded haystack 102 | * @param needle encoded needle 103 | * @param haystack_read haystack read (decode) function 104 | * @param needle_read needle read (decode) function 105 | * @return pointer to found string or 0, will return 106 | * haystack if needle is empty string 107 | */ 108 | NU_EXPORT 109 | const char* nu_strstr(const char *haystack, const char *needle, 110 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read); 111 | 112 | /** Find needle in haystack (case-insensitive) 113 | * 114 | * @ingroup collation 115 | * @see nu_strstr 116 | */ 117 | NU_EXPORT 118 | const char* nu_strcasestr(const char *haystack, const char *needle, 119 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read); 120 | 121 | #endif /* NU_WITH_Z_COLLATION */ 122 | 123 | #ifdef NU_WITH_N_COLLATION 124 | 125 | /** 126 | * @ingroup collation 127 | * @see nu_strchr 128 | */ 129 | NU_EXPORT 130 | const char* nu_strnchr(const char *encoded, size_t max_len, uint32_t c, 131 | nu_read_iterator_t read); 132 | 133 | /** 134 | * @ingroup collation 135 | * @see nu_strcasechr 136 | */ 137 | NU_EXPORT 138 | const char* nu_strcasenchr(const char *encoded, size_t max_len, uint32_t c, 139 | nu_read_iterator_t read); 140 | 141 | /** 142 | * @ingroup collation 143 | * @see nu_strrchr 144 | */ 145 | NU_EXPORT 146 | const char* nu_strrnchr(const char *encoded, size_t max_len, uint32_t c, 147 | nu_read_iterator_t read); 148 | 149 | /** 150 | * @ingroup collation 151 | * @see nu_strrcasechr 152 | */ 153 | NU_EXPORT 154 | const char* nu_strrcasenchr(const char *encoded, size_t max_len, uint32_t c, 155 | nu_read_iterator_t read); 156 | 157 | /** 158 | * @ingroup collation 159 | * @see nu_strcoll 160 | */ 161 | NU_EXPORT 162 | int nu_strncoll(const char *s1, size_t s1_max_len, 163 | const char *s2, size_t s2_max_len, 164 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read); 165 | 166 | /** 167 | * @ingroup collation 168 | * @see nu_strncoll 169 | */ 170 | NU_EXPORT 171 | int nu_strcasencoll(const char *s1, size_t s1_max_len, 172 | const char *s2, size_t s2_max_len, 173 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read); 174 | 175 | /** 176 | * @ingroup collation 177 | * @see nu_strstr 178 | */ 179 | NU_EXPORT 180 | const char* nu_strnstr(const char *haystack, size_t haystack_max_len, 181 | const char *needle, size_t needle_max_len, 182 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read); 183 | 184 | /** 185 | * @ingroup collation 186 | * @see nu_strcasestr 187 | */ 188 | NU_EXPORT 189 | const char* nu_strcasenstr(const char *haystack, size_t haystack_max_len, 190 | const char *needle, size_t needle_max_len, 191 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read); 192 | 193 | #endif /* NU_WITH_N_COLLATION */ 194 | 195 | #if defined (__cplusplus) || defined (c_plusplus) 196 | } 197 | #endif 198 | 199 | #endif /* NU_STRCOLL_H */ 200 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/strcoll_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_STRCOLL_INTERNAL_H 2 | #define NU_STRCOLL_INTERNAL_H 3 | 4 | /** @defgroup collation_internal Internal collation functions 5 | * 6 | * Functions in this group are mostly for the internal use. PLease use them 7 | * with care. 8 | */ 9 | 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #if defined (__cplusplus) || defined (c_plusplus) 16 | extern "C" { 17 | #endif 18 | 19 | /** Read (decode) iterator with transformation applied inside of it 20 | * 21 | * @ingroup collation_internal 22 | * @see nu_default_compound_read 23 | * @see nu_nocase_compound_read 24 | */ 25 | typedef const char* (*nu_compound_read_t)( 26 | const char *encoded, const char *encoded_limit, nu_read_iterator_t encoded_read, 27 | uint32_t *unicode, const char **tail); 28 | 29 | /** Weight unicode codepoint (or several codepoints) 30 | * 31 | * 0 should always be weighted to 0. If your weight function need more 32 | * than one codepoint - return negative value, which will be passed back to 33 | * this function along with next codepoint. 34 | * 35 | * When function decided on weight and returned positive result, it has to 36 | * fill weight with how many (Unicode) codepoints nunicode should rollback. 37 | * E.g. function consumed "ZZS" and decided weight (in Hungarian collation), 38 | * it fills 0 to \*weight because no rollback is needed. Then function 39 | * consumed "ZZZ" and no weight available for such contraction - it 40 | * returns weight for "Z" and fills \*weight with 2, to rollback 41 | * redundant "ZZ". 42 | * 43 | * If string suddenly ends before weight function can decide (string limit 44 | * reached), 0 will be passed additionally to the previous string to signal 45 | * end of the string. 46 | * 47 | * @ingroup collation_internal 48 | * @param u unicode codepoint to weight 49 | * @param weight 0 at first call or (on sequential calls) pointer to negative 50 | * weight previously returned by this function 51 | * @param context pointer passed to _nu_strcoll() or _nu_strstr() 52 | * @return positive codepoint weight or negative value if function need more 53 | * codepoints 54 | */ 55 | typedef int32_t (*nu_codepoint_weight_t)(uint32_t u, int32_t *weight, void *context); 56 | 57 | #if (defined NU_WITH_Z_COLLATION) || (defined NU_WITH_N_COLLATION) 58 | 59 | /** Default compound read, equal to simply calling encoded_read(encoded, &unicode) 60 | * 61 | * @ingroup collation_internal 62 | * @param encoded encoded string 63 | * @param encoded_limit upper limit for encoded. NU_UNLIMITED for 0-terminated 64 | * strings 65 | * @param encoded_read read (decode) function 66 | * @param unicode output unicode codepoint 67 | * @param tail output pointer to compound tail, should never be 0 68 | * @return pointer to next encoded codepoint 69 | */ 70 | static inline 71 | const char* nu_default_compound_read(const char *encoded, const char *encoded_limit, 72 | nu_read_iterator_t encoded_read, uint32_t *unicode, 73 | const char **tail) { 74 | (void)(encoded_limit); 75 | (void)(tail); 76 | 77 | return encoded_read(encoded, unicode); 78 | } 79 | 80 | /** Case-ignoring compound read, equal to calling 81 | * encoded_read(encoded, &unicode) with nu_toupper() applied internally 82 | * 83 | * @ingroup collation_internal 84 | * @param encoded encoded string 85 | * @param encoded_limit upper limit for encoded. NU_UNLIMITED for 0-terminated 86 | * strings 87 | * @param encoded_read read (decode) function 88 | * @param unicode output unicode codepoint 89 | * @param tail output pointer to compound tail, should never be 0 90 | * @return pointer to next encoded codepoint 91 | */ 92 | static inline 93 | const char* nu_nocase_compound_read(const char *encoded, const char *encoded_limit, 94 | nu_read_iterator_t encoded_read, uint32_t *unicode, 95 | const char **tail) { 96 | 97 | /* re-entry with tail != 0 */ 98 | if (*tail != 0) { 99 | *tail = nu_casemap_read(*tail, unicode); 100 | 101 | if (*unicode != 0) { 102 | return encoded; 103 | } 104 | 105 | *tail = 0; // fall thru 106 | } 107 | 108 | if (encoded >= encoded_limit) { 109 | *unicode = 0; 110 | return encoded; 111 | } 112 | 113 | const char *p = encoded_read(encoded, unicode); 114 | 115 | if (*unicode == 0) { 116 | return p; 117 | } 118 | 119 | const char *map = NU_FOLDING_FUNCTION(*unicode); 120 | if (map != 0) { 121 | *tail = nu_casemap_read(map, unicode); 122 | } 123 | 124 | return p; 125 | } 126 | 127 | /** Internal interface for nu_strcoll 128 | * 129 | * @ingroup collation_internal 130 | * @param lhs left-hand side encoded string 131 | * @param lhs_limit upper limit for lhs, use NU_UNLIMITED for 0-terminated 132 | * strings 133 | * @param rhs right-hand side encoded string 134 | * @param rhs_limit upper limit for rhs, use NU_UNLIMITED for 0-terminated 135 | * strings 136 | * @param it1 lhs read (decoding) function 137 | * @param it2 rhs read (decoding) function 138 | * @param com1 lhs compound read function 139 | * @param com2 rhs compound read function 140 | * @param weight codepoint weighting function 141 | * @param context pointer which will be passed to weight 142 | * @param collated_left (optional) number of codepoints collated in lhs 143 | * @param collated_right (optional) number of codepoints collated in rhs 144 | * 145 | * @see nu_strcoll 146 | * @see nu_default_compound_read 147 | * @see nu_nocase_compound_read 148 | * @see nu_ducet_weight 149 | */ 150 | NU_EXPORT 151 | int _nu_strcoll(const char *lhs, const char *lhs_limit, 152 | const char *rhs, const char *rhs_limit, 153 | nu_read_iterator_t it1, nu_read_iterator_t it2, 154 | nu_compound_read_t com1, nu_compound_read_t com2, 155 | nu_codepoint_weight_t weight, void *context, 156 | ssize_t *collated_left, ssize_t *collated_right); 157 | 158 | /** Internal interface for nu_strchr 159 | * 160 | * @ingroup collation_internal 161 | * @param lhs left-hand side encoded string 162 | * @param lhs_limit upper limit for lhs, use NU_UNLIMITED for 0-terminated 163 | * strings 164 | * @param c unicode codepoint to look for 165 | * @param read lhs read (decoding) function 166 | * @param com lhs compound read function 167 | * @param casemap casemapping function 168 | * @param casemap_read casemapping result decoding function 169 | * 170 | * @see nu_strchr 171 | * @see nu_default_compound_read 172 | * @see nu_nocase_compound_read 173 | * @see nu_toupper 174 | * @see nu_tolower 175 | */ 176 | NU_EXPORT 177 | const char* _nu_strchr(const char *lhs, const char *lhs_limit, 178 | uint32_t c, nu_read_iterator_t read, 179 | nu_compound_read_t com, 180 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read); 181 | 182 | /** Internal interface for nu_strchr 183 | * 184 | * @ingroup collation_internal 185 | * @see _nu_strchr 186 | */ 187 | NU_EXPORT 188 | const char* _nu_strrchr(const char *encoded, const char *limit, 189 | uint32_t c, nu_read_iterator_t read, 190 | nu_compound_read_t com, 191 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read); 192 | 193 | /** Internal interface for nu_strcoll 194 | * 195 | * @ingroup collation_internal 196 | * @param haystack encoded haystack 197 | * @param haystack_limit upper limit for haystack, use NU_UNLIMITED for 198 | * 0-terminated strings 199 | * @param needle encoded needle string 200 | * @param needle_limit upper limit for needle, use NU_UNLIMITED for 201 | * 0-terminated strings 202 | * @param it1 haystack read (decoding) function 203 | * @param it2 needle read (decoding) function 204 | * @param com1 haystack compound read function 205 | * @param com2 needle compound read function 206 | * @param casemap casemapping function 207 | * @param casemap_read casemapping result decoding function 208 | * @param weight codepoint weighting function 209 | * @param context pointer which will be passed to weight 210 | * 211 | * @see nu_strstr 212 | * @see nu_default_compound_read 213 | * @see nu_nocase_compound_read 214 | * @see nu_toupper 215 | * @see nu_tolower 216 | * @see nu_ducet_weight 217 | */ 218 | NU_EXPORT 219 | const char* _nu_strstr(const char *haystack, const char *haystack_limit, 220 | const char *needle, const char *needle_limit, 221 | nu_read_iterator_t it1, nu_read_iterator_t it2, 222 | nu_compound_read_t com1, nu_compound_read_t com2, 223 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read, 224 | nu_codepoint_weight_t weight, void *context); 225 | 226 | #endif /* (defined NU_WITH_Z_COLLATION) || (defined NU_WITH_N_COLLATION) */ 227 | 228 | #if defined (__cplusplus) || defined (c_plusplus) 229 | } 230 | #endif 231 | 232 | #endif /* NU_STRCOLL_INTERNAL_H */ 233 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/strings.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_STRINGS_H 2 | #define NU_STRINGS_H 3 | 4 | /** @defgroup strings String functions 5 | * 6 | * Note on "n" functions variant: "n" is in bytes in all functions, 7 | * note though that those are not for memory overrun control. 8 | * They are just for strings not having terminating 0 byte and those 9 | * functions won't go further than m-th *codepoint* in string, but might go 10 | * further than n-th byte in case of multibyte sequence. 11 | * 12 | * E.g.: ``nu_strnlen("абв", 3, nu_utf8_read);``. 13 | * Since codepoints are 2-byte sequences, nu_strnlen() won't go further than 2nd 14 | * codepoint, but will go further than 3rd byte while reading "б". 15 | */ 16 | 17 | #include 18 | #include 19 | 20 | #include 21 | #include 22 | 23 | #if defined (__cplusplus) || defined (c_plusplus) 24 | extern "C" { 25 | #endif 26 | 27 | /** 28 | * @defgroup iterators Iterators 29 | * @defgroup transformations Codepoint transformations 30 | * @defgroup transformations_internal Codepoint transformations (internal) 31 | */ 32 | 33 | /** Read (decode) iterator 34 | * 35 | * @ingroup iterators 36 | * @see nu_utf8_read 37 | */ 38 | typedef const char* (*nu_read_iterator_t)(const char *encoded, uint32_t *unicode); 39 | 40 | /** Read (decode) backwards iterator 41 | * 42 | * Arguments intentionally reversed to not mix this with nu_read_iterator_t. 43 | * Reverse read is not compatible with any of string functions. 44 | * 45 | * @ingroup iterators 46 | * @see nu_utf8_revread 47 | */ 48 | typedef const char* (*nu_revread_iterator_t)(uint32_t *unicode, const char *encoded); 49 | 50 | /** Write (encode) iterator 51 | * 52 | * @ingroup iterators 53 | * @see nu_utf8_write 54 | */ 55 | typedef char* (*nu_write_iterator_t)(uint32_t unicode, char *encoded); 56 | 57 | /** Transform codepoint 58 | * 59 | * @ingroup transformations 60 | * @see nu_toupper 61 | * @see nu_tolower 62 | */ 63 | typedef const char* (*nu_transformation_t)(uint32_t codepoint); 64 | 65 | /** Transform codepoint (used internally). This kind of transformation 66 | * delegates iteration on string to transformation implementation. 67 | * 68 | * @ingroup transformations_internal 69 | * @see _nu_toupper 70 | * @see _nu_tolower 71 | */ 72 | typedef const char* (*nu_transform_read_t)( 73 | const char *encoded, const char *limit, nu_read_iterator_t read, 74 | uint32_t *u, const char **transformed, 75 | void *context); 76 | 77 | #if (defined NU_WITH_Z_STRINGS) || (defined NU_WITH_N_STRINGS) 78 | 79 | #endif /* NU_WITH_Z_STRINGS NU_WITH_N_STRINGS */ 80 | 81 | #ifdef NU_WITH_Z_STRINGS 82 | 83 | /** Get decoded string codepoints length 84 | * 85 | * @ingroup strings 86 | * @param encoded encoded string 87 | * @param it decoding function 88 | * @return string length or negative error 89 | * 90 | * @see nu_strnlen 91 | */ 92 | NU_EXPORT 93 | ssize_t nu_strlen(const char *encoded, nu_read_iterator_t it); 94 | 95 | /** Get encoded string bytes length (encoding variant) 96 | * 97 | * @ingroup strings 98 | * @param unicode unicode codepoints 99 | * @param it encoding function 100 | * @return byte length or negative error 101 | * 102 | * @see nu_bytenlen 103 | */ 104 | NU_EXPORT 105 | ssize_t nu_bytelen(const uint32_t *unicode, nu_write_iterator_t it); 106 | 107 | /** Get encoded string bytes length 108 | * 109 | * @ingroup strings 110 | * @param encoded encoded string 111 | * @param it decoding function 112 | * @return string length or negative error 113 | */ 114 | NU_EXPORT 115 | ssize_t nu_strbytelen(const char *encoded, nu_read_iterator_t it); 116 | 117 | #endif /* NU_WITH_Z_STRINGS */ 118 | 119 | #ifdef NU_WITH_N_STRINGS 120 | 121 | /** 122 | * @ingroup strings 123 | * @see nu_strlen 124 | */ 125 | NU_EXPORT 126 | ssize_t nu_strnlen(const char *encoded, size_t max_len, nu_read_iterator_t it); 127 | 128 | /** 129 | * @ingroup strings 130 | * @see nu_bytelen 131 | */ 132 | NU_EXPORT 133 | ssize_t nu_bytenlen(const uint32_t *unicode, size_t max_len, 134 | nu_write_iterator_t it); 135 | 136 | #endif /* NU_WITH_N_STRINGS */ 137 | 138 | #if defined (__cplusplus) || defined (c_plusplus) 139 | } 140 | #endif 141 | 142 | #endif /* NU_STRINGS_H */ 143 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/udb.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_UDB_H 2 | #define NU_UDB_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | /** @defgroup udb Unicode database 14 | * 15 | * Note: never use it directly, it is subject to change in next releases 16 | */ 17 | 18 | #if defined (__cplusplus) || defined (c_plusplus) 19 | extern "C" { 20 | #endif 21 | 22 | #ifdef NU_WITH_UDB 23 | 24 | #define NU_UDB_DECODING_FUNCTION (nu_utf8_read) 25 | #define nu_udb_read (nu_utf8_read) 26 | 27 | /** Lookup value in UDB 28 | * 29 | * Similar to nu_udb_lookup(), but doesn't look into COMBINED 30 | * 31 | * @ingroup udb 32 | * @see nu_udb_lookup 33 | * @return raw value from VALUES_I or 0 if value wasn't found 34 | */ 35 | static inline 36 | uint32_t nu_udb_lookup_value(uint32_t codepoint, 37 | const int16_t *G, size_t G_SIZE, 38 | const uint32_t *VALUES_C, const uint16_t *VALUES_I) { 39 | 40 | uint32_t hash = nu_mph_hash(G, G_SIZE, codepoint); 41 | uint32_t value = nu_mph_lookup(VALUES_C, VALUES_I, codepoint, hash); 42 | 43 | return value; 44 | } 45 | 46 | /** Lookup data in UDB 47 | * 48 | * Returned data is encoded, therefore you need to use p = it(p, &u) to 49 | * fetch it. Returned string might contain more than 1 codepoint. 50 | * 51 | * @ingroup udb 52 | * @param codepoint unicode codepoint 53 | * @param G first MPH table 54 | * @param G_SIZE first table number of elements (original MPH set size) 55 | * @param VALUES_C codepoints array 56 | * @param VALUES_I offsets array 57 | * @param COMBINED joined values addressed by index stored in VALUES 58 | * @return looked up data or 0 59 | */ 60 | static inline 61 | const char* nu_udb_lookup(uint32_t codepoint, 62 | const int16_t *G, size_t G_SIZE, 63 | const uint32_t *VALUES_C, const uint16_t *VALUES_I, const uint8_t *COMBINED) { 64 | 65 | uint32_t combined_offset = nu_udb_lookup_value(codepoint, 66 | G, G_SIZE, VALUES_C, VALUES_I); 67 | 68 | if (combined_offset == 0) { 69 | return 0; 70 | } 71 | 72 | return (const char *)(COMBINED + combined_offset); 73 | } 74 | 75 | #endif /* NU_WITH_UDB */ 76 | 77 | #if defined (__cplusplus) || defined (c_plusplus) 78 | } 79 | #endif 80 | 81 | #endif /* NU_UDB_H */ 82 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/unaccent.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_UNACCENT_H 2 | #define NU_UNACCENT_H 3 | 4 | #include 5 | #include 6 | 7 | #if defined (__cplusplus) || defined (c_plusplus) 8 | extern "C" { 9 | #endif 10 | 11 | /** 12 | * @example unaccent.c 13 | */ 14 | 15 | #ifdef NU_WITH_UNACCENT 16 | 17 | /** Return unaccented value of codepoint. If codepoint is 18 | * accent (disacritic) itself, returns empty string. 19 | * 20 | * @note This is nunicode extenstion. 21 | * 22 | * @ingroup transformations 23 | * @param codepoint unicode codepoint 24 | * @return unaccented codepoint, 0 if mapping doesn't exist 25 | * and empty string if codepoint is accent 26 | */ 27 | NU_EXPORT 28 | const char* nu_tounaccent(uint32_t codepoint); 29 | 30 | /** Return unaccented value of codepoint. If codepoint is 31 | * accent (disacritic) itself, returns empty string. 32 | * 33 | * @note This is nunicode extenstion. 34 | * 35 | * @ingroup transformations_internal 36 | * @param encoded pointer to encoded string 37 | * @param limit memory limit of encoded string or NU_UNLIMITED 38 | * @param read read (decoding) function 39 | * @param u (optional) codepoint which was (or wasn't) transformed 40 | * @param transform output value of codepoint unaccented or 0 if 41 | * mapping doesn't exist, or empty string if codepoint is accent. 42 | * Can't be NULL, supposed to be decoded with nu_casemap_read 43 | * @param context not used 44 | * @return pointer to the next codepoint in string 45 | */ 46 | NU_EXPORT 47 | const char* _nu_tounaccent(const char *encoded, const char *limit, nu_read_iterator_t read, 48 | uint32_t *u, const char **transform, 49 | void *context); 50 | 51 | #endif /* NU_WITH_UNACCENT */ 52 | 53 | #if defined (__cplusplus) || defined (c_plusplus) 54 | } 55 | #endif 56 | 57 | #endif /* NU_UNACCENT_H */ 58 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/utf8.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_UTF8_H 2 | #define NU_UTF8_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | /** @defgroup utf8 UTF-8 support 12 | * 13 | * Note: There is no utf8_string[i] equivalent - it will be slow, 14 | * use nu_utf8_read() and nu_utf8_revread() instead 15 | * 16 | * @example utf8.c 17 | * @example revread.c 18 | */ 19 | 20 | #if defined (__cplusplus) || defined (c_plusplus) 21 | extern "C" { 22 | #endif 23 | 24 | #ifdef NU_WITH_UTF8_READER 25 | 26 | /** Read codepoint from UTF-8 string 27 | * 28 | * @ingroup utf8 29 | * @param utf8 pointer to UTF-8 encoded string 30 | * @param unicode output unicode codepoint or 0 31 | * @return pointer to next codepoint in UTF-8 string 32 | */ 33 | static inline 34 | const char* nu_utf8_read(const char *utf8, uint32_t *unicode) { 35 | uint32_t c = *(unsigned char *)(utf8); 36 | 37 | if (c >= 0x80) { 38 | if (c < 0xE0) { 39 | if (unicode != 0) { 40 | utf8_2b(utf8, unicode); 41 | } 42 | return utf8 + 2; 43 | } 44 | else if (c < 0xF0) { 45 | if (unicode != 0) { 46 | utf8_3b(utf8, unicode); 47 | } 48 | return utf8 + 3; 49 | } 50 | else { 51 | if (unicode != 0) { 52 | utf8_4b(utf8, unicode); 53 | } 54 | return utf8 + 4; 55 | } 56 | } 57 | else if (unicode != 0) { 58 | *unicode = c; 59 | } 60 | 61 | return utf8 + 1; 62 | } 63 | 64 | #ifdef NU_WITH_REVERSE_READ 65 | 66 | /** Read codepoint from UTF-8 string in backward direction 67 | * 68 | * Note that it is your responsibility to check that this call 69 | * is not going under beginning of encoded string. Normally you 70 | * shouldn't call it like this: nu_utf8_revread(&u, "hello"); which 71 | * will result in undefined behavior 72 | * 73 | * @ingroup utf8 74 | * @param unicode output unicode codepoint or 0 75 | * @param utf8 pointer to UTF-8 encoded string 76 | * @return pointer to previous codepoint in UTF-8 string 77 | */ 78 | static inline 79 | const char* nu_utf8_revread(uint32_t *unicode, const char *utf8) { 80 | /* valid UTF-8 has either 10xxxxxx (continuation byte) 81 | * or beginning of byte sequence */ 82 | const char *p = utf8 - 1; 83 | while (((unsigned char)(*p) & 0xC0) == 0x80) { /* skip every 0b10000000 */ 84 | --p; 85 | } 86 | 87 | if (unicode != 0) { 88 | nu_utf8_read(p, unicode); 89 | } 90 | 91 | return p; 92 | } 93 | 94 | #endif /* NU_WITH_REVERSE_READ */ 95 | 96 | #ifdef NU_WITH_VALIDATION 97 | 98 | /** Validate codepoint in string 99 | * 100 | * @ingroup utf8 101 | * @param encoded buffer with encoded string 102 | * @param max_len buffer length 103 | * @return codepoint length or 0 on error 104 | */ 105 | NU_EXPORT 106 | int nu_utf8_validread(const char *encoded, size_t max_len); 107 | 108 | #endif /* NU_WITH_VALIDATION */ 109 | #endif /* NU_WITH_UTF8_READER */ 110 | 111 | #ifdef NU_WITH_UTF8_WRITER 112 | 113 | /** Write unicode codepoints into UTF-8 encoded string 114 | * 115 | * @ingroup utf8 116 | * @param unicode unicode codepoint 117 | * @param utf8 pointer to buffer to write UTF-8 encoded text to, 118 | * should be large enough to hold encoded value 119 | * @return pointer to byte after last written 120 | */ 121 | NU_EXPORT 122 | char* nu_utf8_write(uint32_t unicode, char *utf8); 123 | 124 | #endif /* NU_WITH_UTF8_WRITER */ 125 | 126 | #if defined (__cplusplus) || defined (c_plusplus) 127 | } 128 | #endif 129 | 130 | #endif /* NU_UTF8_H */ 131 | -------------------------------------------------------------------------------- /vendor/nunicode/include/libnu/utf8_internal.h: -------------------------------------------------------------------------------- 1 | #ifndef NU_UTF8_INTERNAL_H 2 | #define NU_UTF8_INTERNAL_H 3 | 4 | #include 5 | 6 | static inline 7 | unsigned utf8_char_length(const char c) { 8 | const unsigned char uc = c; 9 | 10 | if ((uc & 0x80) == 0) return 1; 11 | if ((uc & 0xE0) == 0xC0) return 2; 12 | if ((uc & 0xF0) == 0xE0) return 3; 13 | if ((uc & 0xF8) == 0xF0) return 4; 14 | 15 | return 0; /* undefined */ 16 | } 17 | 18 | static inline 19 | void utf8_2b(const char *p, uint32_t *codepoint) { 20 | const unsigned char *up = (const unsigned char *)(p); 21 | 22 | /* UTF-8: 110xxxxx 10xxxxxx 23 | * |__ 1st unicode octet 24 | * 110xxx00 << 6 -> 00000xxx 00000000 | 25 | * -------- 26 | * 110000xx << 6 -> 00000xxx xx000000 |__ 2nd unicode octet 27 | * 10xxxxxx -> 00000xxx xxxxxxxx | 28 | * -------- */ 29 | *codepoint = (*(up) & 0x1C) << 6 30 | | ((*(up) & 0x03) << 6 | (*(up + 1) & 0x3F)); 31 | } 32 | 33 | static inline 34 | void utf8_3b(const char *p, uint32_t *codepoint) { 35 | const unsigned char *up = (const unsigned char *)(p); 36 | 37 | /* UTF-8: 1110xxxx 10xxxxxx 10xxxxxx 38 | * 39 | * 1110xxxx << 12 -> xxxx0000 0000000 |__ 1st unicode octet 40 | * 10xxxx00 << 6 -> xxxxxxxx 0000000 | 41 | * -------- 42 | * 100000xx << 6 -> xxxxxxxx xx00000 |__ 2nd unicode octet 43 | * 10xxxxxx -> xxxxxxxx xxxxxxx | 44 | * ------- */ 45 | *codepoint = 46 | ((*(up) & 0x0F) << 12 | (*(up + 1) & 0x3C) << 6) 47 | | ((*(up + 1) & 0x03) << 6 | (*(up + 2) & 0x3F)); 48 | } 49 | 50 | static inline 51 | void utf8_4b(const char *p, uint32_t *codepoint) { 52 | const unsigned char *up = (const unsigned char *)(p); 53 | 54 | /* UTF-8: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx 55 | * 56 | * 11110xxx << 18 -> 00xxx00 00000000 00000000 |__ 1st unicode octet 57 | * 10xx0000 << 12 -> 00xxxxx 00000000 00000000 | 58 | * ------- 59 | * 1000xxxx << 12 -> 00xxxxx xxxx0000 00000000 |__ 2nd unicode octet 60 | * 10xxxx00 << 6 -> 00xxxxx xxxxxxxx 00000000 | 61 | * -------- 62 | * 100000xx << 6 -> 00xxxxx xxxxxxxx xx000000 |__ 3rd unicode octet 63 | * 10xxxxxx -> 00xxxxx xxxxxxxx xxxxxxxx | 64 | * --------- */ 65 | *codepoint = 66 | ((*(up) & 0x07) << 18 | (*(up + 1) & 0x30) << 12) 67 | | ((*(up + 1) & 0x0F) << 12 | (*(up + 2) & 0x3C) << 6) 68 | | ((*(up + 2) & 0x03) << 6 | (*(up + 3) & 0x3F)); 69 | } 70 | 71 | static inline 72 | unsigned utf8_codepoint_length(uint32_t codepoint) { 73 | if (codepoint < 128) return 1; 74 | if (codepoint < 0x0800) return 2; 75 | if (codepoint < 0x10000) return 3; 76 | 77 | return 4; /* de facto max length in UTF-8 */ 78 | } 79 | 80 | static inline 81 | void b2_utf8(uint32_t codepoint, char *p) { 82 | unsigned char *up = (unsigned char *)(p); 83 | 84 | /* UNICODE: 00000xxx xxxxxxxx 85 | * 86 | * 00000xxx >> 6 -> 110xxx00 10000000 |__ 1st UTF-8 octet 87 | * xxxxxxxx >> 6 -> 110xxxxx 10000000 | 88 | * -------- 89 | * |__ 2nd UTF-8 octet 90 | * xxxxxxxx -> 110xxxxx 10xxxxxx | 91 | * -------- */ 92 | *(up) = (0xC0 | (codepoint & 0xFF00) >> 6 | (codepoint & 0xFF) >> 6); 93 | *(up + 1) = (0x80 | (codepoint & 0x3F)); 94 | } 95 | 96 | static inline 97 | void b3_utf8(uint32_t codepoint, char *p) { 98 | unsigned char *up = (unsigned char *)(p); 99 | 100 | /* UNICODE: xxxxxxxx xxxxxxxx 101 | * |__ 1st UTF-8 octet 102 | * xxxxxxxx >> 12 -> 1110xxxx 10000000 10000000 | 103 | * -------- 104 | * xxxxxxxx >> 6 -> 1110xxxx 10xxxx00 10000000 |__ 2nd UTF-8 octet 105 | * xxxxxxxx >> 6 -> 1110xxxx 10xxxxxx 10000000 | 106 | * -------- 107 | * |__ 3rd UTF-8 octet 108 | * xxxxxxxx -> 1110xxxx 10xxxxxx 10xxxxxx | 109 | * -------- */ 110 | *(up) = (0xE0 | (codepoint & 0xF000) >> 12); 111 | *(up + 1) = (0x80 | (codepoint & 0x0F00) >> 6 | (codepoint & 0xC0) >> 6); 112 | *(up + 2) = (0x80 | (codepoint & 0x3F)); 113 | } 114 | 115 | static inline 116 | void b4_utf8(uint32_t codepoint, char *p) { 117 | unsigned char *up = (unsigned char *)(p); 118 | 119 | /* UNICODE: 000xxxxx xxxxxxxx xxxxxxxx 120 | * |__ 1st UTF-8 octet 121 | * 000xxxxx >> 18 -> 11110xxx 1000000 10000000 10000000 | 122 | * -------- 123 | * 000xxxxx >> 12 -> 11110xxx 10xx000 10000000 10000000 |__ 2nd UTF-8 octet 124 | * xxxxxxxx >> 12 -> 11110xxx 10xxxxx 10000000 10000000 | 125 | * ------- 126 | * xxxxxxxx >> 6 -> 11110xxx 10xxxxx 10xxxxx0 10000000 |__ 3rd UTF-8 octet 127 | * xxxxxxxx >> 6 -> 11110xxx 10xxxxx 10xxxxxx 10000000 | 128 | * -------- 129 | * |__ 4th UTF-8 octet 130 | * xxxxxxxx -> 11110xxx 10xxxxx 10xxxxxx 10000000 | */ 131 | *(up) = (0xF0 | ((codepoint & 0x1C0000) >> 18)); 132 | *(up + 1) = (0x80 | (codepoint & 0x030000) >> 12 | (codepoint & 0x00E000) >> 12); 133 | *(up + 2) = (0x80 | (codepoint & 0x001F00) >> 6 | (codepoint & 0x0000E0) >> 6); 134 | *(up + 3) = (0x80 | (codepoint & 0x3F)); 135 | } 136 | 137 | static inline 138 | int utf8_validread_basic(const char *p, size_t max_len) { 139 | const unsigned char *up = (const unsigned char *)(p); 140 | 141 | /* it should be 0xxxxxxx or 110xxxxx or 1110xxxx or 11110xxx 142 | * latter should be followed by number of 10xxxxxx */ 143 | 144 | unsigned len = utf8_char_length(*p); 145 | 146 | /* codepoints longer than 6 bytes does not currently exist 147 | * and not currently supported 148 | * TODO: longer UTF-8 sequences support 149 | */ 150 | if (max_len < len) { 151 | return 0; 152 | } 153 | 154 | switch (len) { 155 | case 1: return 1; /* one byte codepoint */ 156 | case 2: return ((*(up + 1) & 0xC0) == 0x80 ? 2 : 0); 157 | case 3: return ((*(up + 1) & 0xC0) == 0x80 158 | && (*(up + 2) & 0xC0) == 0x80 ? 3 : 0); 159 | 160 | case 4: return ((*(up + 1) & 0xC0) == 0x80 161 | && (*(up + 2) & 0xC0) == 0x80 162 | && (*(up + 3) & 0xC0) == 0x80 ? 4 : 0); 163 | } 164 | 165 | return 0; 166 | } 167 | 168 | #endif /* NU_UTF8_INTERNAL_H */ 169 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/ducet.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | 6 | #ifdef NU_WITH_DUCET 7 | 8 | #include "gen/_ducet.c" 9 | 10 | #ifndef NU_DISABLE_CONTRACTIONS 11 | # include "gen/_ducet_switch.c" 12 | #else 13 | const size_t _NU_DUCET_CONTRACTIONS = 0; 14 | #endif 15 | 16 | static size_t _nu_ducet_weights_count() { 17 | return NU_DUCET_G_SIZE + _NU_DUCET_CONTRACTIONS; 18 | } 19 | 20 | int32_t nu_ducet_weight(uint32_t codepoint, int32_t *weight, void *context) { 21 | (void)(weight); 22 | (void)(context); 23 | 24 | assert(_nu_ducet_weights_count() < 0x7FFFFFFF - 0x10FFFF); 25 | 26 | #ifndef NU_DISABLE_CONTRACTIONS 27 | int32_t switch_value = _nu_ducet_weight_switch(codepoint, weight, context); 28 | /* weight switch should return weight (if any) and fill value of *weight 29 | * with fallback (if needed). returned value of 0 is impossible result - this 30 | * special case is already handled above, this return value indicates that switch 31 | * couldn't find weight for a codepoint */ 32 | if (switch_value != 0) { 33 | return switch_value; 34 | } 35 | #endif 36 | 37 | /* special case switch after contractions switch 38 | * to let state-machine figure out its state on abort */ 39 | if (codepoint == 0) { 40 | return 0; 41 | } 42 | 43 | uint32_t mph_value = nu_udb_lookup_value(codepoint, NU_DUCET_G, NU_DUCET_G_SIZE, 44 | NU_DUCET_VALUES_C, NU_DUCET_VALUES_I); 45 | 46 | return (mph_value != 0 47 | ? (int32_t)(mph_value) 48 | : (int32_t)(codepoint + _nu_ducet_weights_count())); 49 | 50 | /* ISO/IEC 14651 requests that codepoints with undefined weight should be 51 | * sorted before max weight in collation table. This way all codepoints 52 | * defined in ducet would have weight under a value of _nu_ducet_weights_count(), 53 | * all undefined codepoints would have weight under 54 | * 0x10FFFF + _nu_ducet_weights_count() - 1, max weight will be 55 | * 0x10FFFF + _nu_ducet_weights_count() */ 56 | 57 | /* Regarding integer overflow: 58 | * 59 | * int32_t can hold 0xFFFFFFFF / 2 = 0x7FFFFFFF positive numbers, this 60 | * function can safely offset codepoint value up to +2146369536 without 61 | * risk of overflow. Thus max collation table size supported is 62 | * 2146369536 (0x7FFFFFFF - 0x10FFFF) */ 63 | } 64 | 65 | #endif /* NU_WITH_DUCET */ 66 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/strcoll.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | 8 | #if (defined NU_WITH_Z_COLLATION) || (defined NU_WITH_N_COLLATION) 9 | 10 | int32_t _compound_weight(int32_t w, 11 | const char **encoded, const char *limit, 12 | nu_read_iterator_t read, nu_compound_read_t com, 13 | const char **tail, 14 | nu_codepoint_weight_t weight, void *context) { 15 | 16 | const char *tailp = *tail; 17 | 18 | const char *p = *encoded; 19 | int32_t new_w = w; 20 | int32_t consumed = 1; /* one codepoint was consumed at the top of the stack (_nu_strcoll) */ 21 | 22 | while (p < limit) { 23 | uint32_t u = 0; 24 | 25 | const char *np = com(p, limit, read, &u, &tailp); 26 | new_w = weight(u, &w, context); 27 | 28 | /* after this point, w might hold rollback value 29 | * and new_w holds actual weight */ 30 | 31 | ++consumed; 32 | 33 | if (new_w >= 0) { 34 | /* if w == 0 or w == 1, then *p or *np is already pointing 35 | * to needed place, otherwise re-read encoded in the forward 36 | * direction preserving correctness of tail pointer */ 37 | if (w != 0 && w != 1) { 38 | assert(consumed + w > 1); 39 | 40 | np = *encoded; 41 | tailp = *tail; 42 | 43 | for (int32_t i = 0; i < consumed - w; ++i) { 44 | np = com(np, limit, read, 0, &tailp); 45 | } 46 | 47 | w = 0; 48 | } 49 | 50 | *encoded = (w == 0 ? np : p); 51 | *tail = tailp; 52 | 53 | break; 54 | } 55 | 56 | p = np; 57 | w = new_w; 58 | } 59 | 60 | if (new_w < 0) { 61 | new_w = weight(0, &w, context); 62 | } 63 | 64 | assert(new_w >= 0); 65 | 66 | return new_w; 67 | } 68 | 69 | inline 70 | int _nu_strcoll(const char *lhs, const char *lhs_limit, 71 | const char *rhs, const char *rhs_limit, 72 | nu_read_iterator_t it1, nu_read_iterator_t it2, 73 | nu_compound_read_t com1, nu_compound_read_t com2, 74 | nu_codepoint_weight_t weight, void *context, 75 | ssize_t *collated_left, ssize_t *collated_right) { 76 | 77 | int cmp = 0; 78 | 79 | const char *lp = lhs, *rp = rhs; 80 | const char *ltailp = 0, *rtailp = 0; 81 | 82 | uint32_t u1 = 0, u2 = 0; 83 | 84 | while ((lp < lhs_limit && rp < rhs_limit) 85 | || (ltailp != 0 && rp < rhs_limit) 86 | || (rtailp != 0 && lp < lhs_limit)) { 87 | 88 | lp = com1(lp, lhs_limit, it1, &u1, <ailp); 89 | rp = com2(rp, rhs_limit, it2, &u2, &rtailp); 90 | 91 | #ifdef NU_DISABLE_CONTRACTIONS 92 | /* if contractions are disabled, then same codepoints 93 | * will produce same weights and there is no need 94 | * to weight each, i.e. weight(u1) == weight(u2) and 95 | * collation may proceed to next codepoints */ 96 | if (u1 != u2) { 97 | #endif 98 | int32_t w1 = weight(u1, 0, context); 99 | int32_t w2 = weight(u2, 0, context); 100 | 101 | if (w1 < 0) { 102 | w1 = _compound_weight(w1, &lp, lhs_limit, 103 | it1, com1, <ailp, 104 | weight, context); 105 | } 106 | 107 | if (w2 < 0) { 108 | w2 = _compound_weight(w2, &rp, rhs_limit, 109 | it2, com2, &rtailp, 110 | weight, context); 111 | } 112 | 113 | assert(w1 >= 0); 114 | assert(w2 >= 0); 115 | 116 | if (w1 < w2) { 117 | cmp = -1; 118 | break; 119 | } 120 | else if (w1 > w2) { 121 | cmp = 1; 122 | break; 123 | } 124 | 125 | #ifdef NU_DISABLE_CONTRACTIONS 126 | } 127 | #endif 128 | 129 | if (u1 == 0 || u2 == 0) { 130 | break; 131 | } 132 | } 133 | 134 | /* collated_left and collated_right should count 135 | * number of successfully collated bytes, not taking 136 | * into account limits. therefore if cmp != 0, 137 | * number of collated bytes is decreased by (at least) 1 138 | * and cmp is limits-fixed afterwards */ 139 | 140 | if (collated_left != 0) { 141 | *collated_left = (lp - lhs) - (cmp == 0 ? 0 : 1); 142 | } 143 | 144 | if (collated_right != 0) { 145 | *collated_right = (rp - rhs) - (cmp == 0 ? 0 : 1); 146 | } 147 | 148 | if (cmp == 0) { 149 | if (rp < rhs_limit && lp >= lhs_limit) { 150 | cmp = -1; 151 | } 152 | else if (lp < lhs_limit && rp >= rhs_limit) { 153 | cmp = 1; 154 | } 155 | } 156 | 157 | return cmp; 158 | } 159 | 160 | inline 161 | const char* _nu_strchr(const char *lhs, const char *lhs_limit, 162 | uint32_t c, nu_read_iterator_t read, 163 | nu_compound_read_t com, 164 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read) { 165 | 166 | const char *p = lhs; 167 | const char *tail = 0; 168 | uint32_t u = 0; 169 | 170 | const char *rhs = 0; 171 | 172 | if (casemap != 0) { 173 | rhs = casemap(c); 174 | if (rhs != 0) { 175 | rhs = casemap_read(rhs, &c); /* read new lead codepoint */ 176 | } 177 | } 178 | 179 | while (p < lhs_limit) { 180 | const char *np = com(p, lhs_limit, read, &u, &tail); 181 | 182 | if (u == 0) { 183 | break; 184 | } 185 | 186 | if (u == c) { 187 | if (rhs == 0) { 188 | return p; 189 | } 190 | 191 | /* rhs != 0 */ 192 | 193 | const char *rp = rhs; 194 | uint32_t u2 = 0; 195 | 196 | do { 197 | rp = casemap_read(rp, &u2); 198 | 199 | if (u2 == 0) { 200 | return p; /* succ exit point */ 201 | } 202 | 203 | if (np >= lhs_limit) { 204 | return 0; 205 | } 206 | 207 | np = com(np, lhs_limit, read, &u, &tail); 208 | 209 | if (u == 0) { 210 | return 0; 211 | } 212 | 213 | if (u != u2) { 214 | break; 215 | } 216 | } 217 | while (u2 != 0); 218 | } 219 | 220 | p = np; 221 | } 222 | 223 | return 0; 224 | } 225 | 226 | inline 227 | const char* _nu_strrchr(const char *encoded, const char *limit, 228 | uint32_t c, nu_read_iterator_t read, 229 | nu_compound_read_t com, 230 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read) { 231 | 232 | /* there is probably not much sense in finding string end by decoding it 233 | * and then reverse read string again to find last codepoint, therefore 234 | * this is a sequence of _nu_strchr() in forward direction 235 | * 236 | * please let me know if i'm wrong */ 237 | 238 | const char *p = encoded; 239 | const char *last = 0; 240 | 241 | while (p < limit) { 242 | p = _nu_strchr(p, limit, c, read, com, casemap, casemap_read); 243 | 244 | if (p == 0) { 245 | return last; 246 | } 247 | 248 | last = p; 249 | p = read(p, 0); /* skip one codepoint and continue */ 250 | } 251 | 252 | return last; 253 | } 254 | 255 | inline 256 | const char* _nu_strstr(const char *haystack, const char *haystack_limit, 257 | const char *needle, const char *needle_limit, 258 | nu_read_iterator_t it1, nu_read_iterator_t it2, 259 | nu_compound_read_t com1, nu_compound_read_t com2, 260 | nu_casemapping_t casemap, nu_read_iterator_t casemap_read, 261 | nu_codepoint_weight_t weight, void *context) { 262 | 263 | uint32_t n0 = 0; 264 | if (needle_limit != needle) { 265 | it2(needle, &n0); 266 | } 267 | 268 | if (needle_limit == needle || n0 == 0) { 269 | return haystack; 270 | } 271 | 272 | ssize_t needle_len = (needle_limit != NU_UNLIMITED 273 | ? (needle_limit - needle) 274 | : nu_strbytelen(needle, it2)); 275 | 276 | const char *h0 = haystack; 277 | do { 278 | h0 = _nu_strchr(h0, haystack_limit, 279 | n0, it1, 280 | com1, 281 | casemap, casemap_read); 282 | 283 | if (h0 == 0) { 284 | break; 285 | } 286 | 287 | ssize_t collated_left = 0, collated_right = 0; 288 | _nu_strcoll(h0, haystack_limit, needle, needle_limit, 289 | it1, it2, 290 | com1, com2, 291 | weight, context, 292 | &collated_left, &collated_right); 293 | 294 | /* it doesn't matter what collate result is 295 | * if whole needle was successfully collated */ 296 | if (collated_right >= needle_len) { 297 | return h0; 298 | } 299 | 300 | /* skip one codepoint in haystack */ 301 | if (h0 < haystack_limit) { 302 | h0 = it1(h0, 0); 303 | } 304 | } 305 | while (h0 != 0 && h0 < haystack_limit); 306 | 307 | return 0; 308 | } 309 | 310 | #ifdef NU_WITH_Z_COLLATION 311 | 312 | const char* nu_strchr(const char *encoded, uint32_t c, nu_read_iterator_t read) { 313 | return _nu_strchr(encoded, NU_UNLIMITED, 314 | c, read, 315 | nu_default_compound_read, 316 | 0, 0); 317 | } 318 | 319 | const char* nu_strcasechr(const char *encoded, uint32_t c, nu_read_iterator_t read) { 320 | return _nu_strchr(encoded, NU_UNLIMITED, 321 | c, read, 322 | nu_nocase_compound_read, 323 | NU_FOLDING_FUNCTION, nu_casemap_read); 324 | } 325 | 326 | const char* nu_strrchr(const char *encoded, uint32_t c, nu_read_iterator_t read) { 327 | return _nu_strrchr(encoded, NU_UNLIMITED, 328 | c, read, 329 | nu_default_compound_read, 330 | 0, 0); 331 | } 332 | 333 | const char* nu_strrcasechr(const char *encoded, uint32_t c, nu_read_iterator_t read) { 334 | return _nu_strrchr(encoded, NU_UNLIMITED, c, read, 335 | nu_nocase_compound_read, 336 | NU_FOLDING_FUNCTION, nu_casemap_read); 337 | } 338 | 339 | int nu_strcoll(const char *s1, const char *s2, 340 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read) { 341 | return _nu_strcoll(s1, NU_UNLIMITED, s2, NU_UNLIMITED, 342 | s1_read, s2_read, 343 | nu_default_compound_read, nu_default_compound_read, 344 | nu_ducet_weight, 0, 345 | 0, 0); 346 | } 347 | 348 | int nu_strcasecoll(const char *s1, const char *s2, 349 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read) { 350 | return _nu_strcoll(s1, NU_UNLIMITED, s2, NU_UNLIMITED, 351 | s1_read, s2_read, 352 | nu_nocase_compound_read, nu_nocase_compound_read, 353 | nu_ducet_weight, 0, 354 | 0, 0); 355 | } 356 | 357 | const char* nu_strstr(const char *haystack, const char *needle, 358 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read) { 359 | return _nu_strstr(haystack, NU_UNLIMITED, needle, NU_UNLIMITED, 360 | haystack_read, needle_read, 361 | nu_default_compound_read, nu_default_compound_read, 362 | 0, 0, 363 | nu_ducet_weight, 0); 364 | } 365 | 366 | const char* nu_strcasestr(const char *haystack, const char *needle, 367 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read) { 368 | return _nu_strstr(haystack, NU_UNLIMITED, needle, NU_UNLIMITED, 369 | haystack_read, needle_read, 370 | nu_nocase_compound_read, nu_nocase_compound_read, 371 | NU_FOLDING_FUNCTION, nu_casemap_read, 372 | nu_ducet_weight, 0); 373 | } 374 | 375 | #endif /* NU_WITH_Z_COLLATION */ 376 | 377 | #ifdef NU_WITH_N_COLLATION 378 | 379 | const char* nu_strnchr(const char *encoded, size_t max_len, uint32_t c, nu_read_iterator_t read) { 380 | return _nu_strchr(encoded, encoded + max_len, 381 | c, read, 382 | nu_default_compound_read, 383 | 0, 0); 384 | } 385 | 386 | const char* nu_strcasenchr(const char *encoded, size_t max_len, uint32_t c, nu_read_iterator_t read) { 387 | return _nu_strchr(encoded, encoded + max_len, 388 | c, read, 389 | nu_nocase_compound_read, 390 | NU_FOLDING_FUNCTION, nu_casemap_read); 391 | } 392 | 393 | const char* nu_strrnchr(const char *encoded, size_t max_len, uint32_t c, nu_read_iterator_t read) { 394 | return _nu_strrchr(encoded, encoded + max_len, 395 | c, read, 396 | nu_default_compound_read, 397 | 0, 0); 398 | } 399 | 400 | const char* nu_strrcasenchr(const char *encoded, size_t max_len, uint32_t c, 401 | nu_read_iterator_t read) { 402 | return _nu_strrchr(encoded, encoded + max_len, 403 | c, read, 404 | nu_nocase_compound_read, 405 | NU_FOLDING_FUNCTION, nu_casemap_read); 406 | } 407 | 408 | int nu_strncoll(const char *s1, size_t s1_max_len, 409 | const char *s2, size_t s2_max_len, 410 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read) { 411 | return _nu_strcoll(s1, s1 + s1_max_len, s2, s2 + s2_max_len, 412 | s1_read, s2_read, 413 | nu_default_compound_read, nu_default_compound_read, 414 | nu_ducet_weight, 0, 415 | 0, 0); 416 | } 417 | 418 | int nu_strcasencoll(const char *s1, size_t s1_max_len, 419 | const char *s2, size_t s2_max_len, 420 | nu_read_iterator_t s1_read, nu_read_iterator_t s2_read) { 421 | return _nu_strcoll(s1, s1 + s1_max_len, s2, s2 + s2_max_len, 422 | s1_read, s2_read, 423 | nu_nocase_compound_read, nu_nocase_compound_read, 424 | nu_ducet_weight, 0, 425 | 0, 0); 426 | } 427 | 428 | const char* nu_strnstr(const char *haystack, size_t haystack_max_len, 429 | const char *needle, size_t needle_max_len, 430 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read) { 431 | return _nu_strstr(haystack, haystack + haystack_max_len, 432 | needle, needle + needle_max_len, 433 | haystack_read, needle_read, 434 | nu_default_compound_read, nu_default_compound_read, 435 | 0, 0, 436 | nu_ducet_weight, 0); 437 | } 438 | 439 | const char* nu_strcasenstr(const char *haystack, size_t haystack_max_len, 440 | const char *needle, size_t needle_max_len, 441 | nu_read_iterator_t haystack_read, nu_read_iterator_t needle_read) { 442 | return _nu_strstr(haystack, haystack + haystack_max_len, 443 | needle, needle + needle_max_len, 444 | haystack_read, needle_read, 445 | nu_nocase_compound_read, nu_nocase_compound_read, 446 | NU_FOLDING_FUNCTION, nu_casemap_read, 447 | nu_ducet_weight, 0); 448 | } 449 | 450 | #endif /* NU_WITH_N_COLLATION */ 451 | 452 | #endif /* NU_WITH_Z_COLLATION || NU_WITH_N_COLLATION */ 453 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/strings.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #if defined (NU_WITH_Z_STRINGS) || defined(NU_WITH_N_STRINGS) 5 | 6 | static ssize_t _nu_strlen(const char *encoded, const char *limit, nu_read_iterator_t it) { 7 | ssize_t len = 0; 8 | 9 | const char *p = encoded; 10 | while (p < limit) { 11 | uint32_t u = 0; 12 | p = it(p, &u); 13 | 14 | if (u == 0) { 15 | break; 16 | } 17 | 18 | ++len; 19 | } 20 | 21 | return len; 22 | } 23 | 24 | static ssize_t _nu_bytelen(const uint32_t *unicode, const uint32_t *limit, nu_write_iterator_t it) { 25 | ssize_t len = 0; 26 | 27 | const uint32_t *p = unicode; 28 | while (p < limit) { 29 | if (*p == 0) { 30 | break; 31 | } 32 | 33 | /* nu_write_iterator_t will return offset relative to 0 34 | * which is effectively bytes length of codepoint */ 35 | size_t byte_len = (size_t)it(*p, 0); 36 | len += byte_len; 37 | 38 | ++p; 39 | } 40 | 41 | return len; 42 | } 43 | 44 | static ssize_t _nu_strbytelen(const char *encoded, const char *limit, nu_read_iterator_t it) { 45 | uint32_t u = 0; 46 | const char *p = encoded; 47 | 48 | while (p < limit) { 49 | const char *np = it(p, &u); 50 | 51 | if (u == 0) { 52 | return (p - encoded); 53 | } 54 | 55 | p = np; 56 | } 57 | 58 | return 0; 59 | } 60 | 61 | #endif /* NU_WITH_N_STRINGS || NU_WITH_Z_STRINGS */ 62 | 63 | #ifdef NU_WITH_Z_STRINGS 64 | 65 | ssize_t nu_strlen(const char *encoded, nu_read_iterator_t it) { 66 | return _nu_strlen(encoded, NU_UNLIMITED, it); 67 | } 68 | 69 | ssize_t nu_bytelen(const uint32_t *unicode, nu_write_iterator_t it) { 70 | return _nu_bytelen(unicode, NU_UNLIMITED, it); 71 | } 72 | 73 | ssize_t nu_strbytelen(const char *encoded, nu_read_iterator_t it) { 74 | return _nu_strbytelen(encoded, NU_UNLIMITED, it); 75 | } 76 | 77 | #endif /* NU_WITH_Z_STRINGS */ 78 | 79 | #ifdef NU_WITH_N_STRINGS 80 | 81 | ssize_t nu_strnlen(const char *encoded, size_t max_len, nu_read_iterator_t it) { 82 | return _nu_strlen(encoded, encoded + max_len, it); 83 | } 84 | 85 | ssize_t nu_bytenlen(const uint32_t *unicode, size_t max_len, nu_write_iterator_t it) { 86 | return _nu_bytelen(unicode, unicode + max_len, it); 87 | } 88 | 89 | #endif /* NU_WITH_N_STRINGS */ 90 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/tolower.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #ifdef NU_WITH_TOLOWER 6 | 7 | #include 8 | #include "gen/_tolower.c" 9 | 10 | /* in nu_casemap_read (UTF-8), zero-terminated */ 11 | static const char *__nu_final_sigma = "ς"; 12 | 13 | const char* nu_tolower(uint32_t codepoint) { 14 | return _nu_to_something(codepoint, NU_TOLOWER_G, NU_TOLOWER_G_SIZE, 15 | NU_TOLOWER_VALUES_C, NU_TOLOWER_VALUES_I, NU_TOLOWER_COMBINED); 16 | } 17 | 18 | const char* _nu_tolower(const char *encoded, const char *limit, nu_read_iterator_t read, 19 | uint32_t *u, const char **transform, 20 | void *context) { 21 | 22 | (void)(context); 23 | 24 | uint32_t _u = 0; 25 | const char *np = read(encoded, &_u); 26 | 27 | if (u != 0) { 28 | *u = _u; 29 | } 30 | 31 | /* handling of 0x03A3 ('Σ') 32 | * 33 | * this is the only language-independent exception described in 34 | * SpecialCasing.txt (Unicode 7.0) */ 35 | 36 | assert(nu_casemap_read == nu_utf8_read); 37 | 38 | if (_u == 0x03A3) { 39 | if (np >= limit) { 40 | *transform = __nu_final_sigma; 41 | return np; 42 | } 43 | 44 | uint32_t nu = 0; 45 | read(np, &nu); 46 | 47 | if (nu == 0) { 48 | *transform = __nu_final_sigma; 49 | return np; 50 | } 51 | } 52 | 53 | *transform = nu_tolower(_u); 54 | 55 | return np; 56 | } 57 | 58 | #endif /* NU_WITH_TOLOWER */ 59 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/tounaccent.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include 4 | 5 | #ifdef NU_WITH_UNACCENT 6 | 7 | #include 8 | #include "gen/_tounaccent.c" 9 | 10 | const char* nu_tounaccent(uint32_t codepoint) { 11 | typedef struct { 12 | uint32_t block_start; 13 | uint32_t block_end; 14 | } block_t; 15 | 16 | static const block_t blocks[] = { 17 | { 0x0300, 0x036F }, /* Combining Diacritical Marks */ 18 | { 0x1AB0, 0x1AFF }, /* Combining Diacritical Marks Extended */ 19 | { 0x20D0, 0x20FF }, /* Combining Diacritical Marks for Symbols */ 20 | { 0x1DC0, 0x1DFF }, /* Combining Diacritical Marks Supplement */ 21 | }; 22 | static const size_t blocks_count = sizeof(blocks) / sizeof(*blocks); 23 | 24 | /* check if codepoint itself is a diacritic, 25 | * return empty string in that case 26 | * (transform into empty string */ 27 | assert(nu_casemap_read == nu_utf8_read); 28 | for (size_t i = 0; i < blocks_count; ++i) { 29 | if (codepoint >= blocks[i].block_start && codepoint <= blocks[i].block_end) { 30 | return ""; /* return zero-terminated empty string in nu_casemap_read (utf-8) */ 31 | } 32 | } 33 | 34 | return _nu_to_something(codepoint, NU_TOUNACCENT_G, NU_TOUNACCENT_G_SIZE, 35 | NU_TOUNACCENT_VALUES_C, NU_TOUNACCENT_VALUES_I, NU_TOUNACCENT_COMBINED); 36 | } 37 | 38 | const char* _nu_tounaccent(const char *encoded, const char *limit, nu_read_iterator_t read, 39 | uint32_t *u, const char **transform, 40 | void *context) { 41 | 42 | (void)(limit); 43 | (void)(context); 44 | 45 | uint32_t _u = 0; 46 | const char *np = read(encoded, &_u); 47 | 48 | *transform = nu_tounaccent(_u); 49 | 50 | if (u != 0) { 51 | *u = _u; 52 | } 53 | 54 | return np; 55 | } 56 | 57 | #endif /* NU_WITH_UNACCENT */ 58 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/toupper.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef NU_WITH_TOUPPER 4 | 5 | #include 6 | #include "gen/_toupper.c" 7 | 8 | const char* nu_toupper(uint32_t codepoint) { 9 | return _nu_to_something(codepoint, NU_TOUPPER_G, NU_TOUPPER_G_SIZE, 10 | NU_TOUPPER_VALUES_C, NU_TOUPPER_VALUES_I, NU_TOUPPER_COMBINED); 11 | } 12 | 13 | const char* _nu_toupper(const char *encoded, const char *limit, nu_read_iterator_t read, 14 | uint32_t *u, const char **transform, 15 | void *context) { 16 | 17 | (void)(limit); 18 | (void)(context); 19 | 20 | uint32_t _u = 0; 21 | const char *np = read(encoded, &_u); 22 | 23 | *transform = nu_toupper(_u); 24 | 25 | if (u != 0) { 26 | *u = _u; 27 | } 28 | 29 | return np; 30 | } 31 | 32 | #endif /* NU_WITH_TOUPPER */ 33 | -------------------------------------------------------------------------------- /vendor/nunicode/src/libnu/utf8.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #ifdef NU_WITH_UTF8_READER 4 | #ifdef NU_WITH_VALIDATION 5 | 6 | int nu_utf8_validread(const char *encoded, size_t max_len) { 7 | int len = utf8_validread_basic(encoded, max_len); 8 | 9 | if (len <= 0) { 10 | return 0; 11 | } 12 | 13 | /* Unicode core spec, D92, Table 3-7 14 | */ 15 | 16 | switch (len) { 17 | /* case 1: single byte sequence can't be > 0x7F and produce len == 1 18 | */ 19 | 20 | case 2: { 21 | uint8_t p1 = *(const unsigned char *)(encoded); 22 | 23 | if (p1 < 0xC2) { /* 2-byte sequences with p1 > 0xDF are 3-byte sequences */ 24 | return 0; 25 | } 26 | 27 | /* the rest will be handled by utf8_validread_basic() */ 28 | 29 | break; 30 | } 31 | 32 | case 3: { 33 | uint8_t p1 = *(const unsigned char *)(encoded); 34 | 35 | /* 3-byte sequences with p1 < 0xE0 are 2-byte sequences, 36 | * 3-byte sequences with p1 > 0xEF are 4-byte sequences */ 37 | 38 | uint8_t p2 = *(const unsigned char *)(encoded + 1); 39 | 40 | if (p1 == 0xE0 && p2 < 0xA0) { 41 | return 0; 42 | } 43 | else if (p1 == 0xED && p2 > 0x9F) { 44 | return 0; 45 | } 46 | 47 | /* (p2 < 0x80 || p2 > 0xBF) and p3 will be covered 48 | * by utf8_validread_basic() */ 49 | 50 | break; 51 | } 52 | 53 | case 4: { 54 | uint8_t p1 = *(const unsigned char *)(encoded); 55 | 56 | if (p1 > 0xF4) { /* 4-byte sequence with p1 < 0xF0 are 3-byte sequences */ 57 | return 0; 58 | } 59 | 60 | uint8_t p2 = *(const unsigned char *)(encoded + 1); 61 | 62 | if (p1 == 0xF0 && p2 < 0x90) { 63 | return 0; 64 | } 65 | 66 | /* (p2 < 0x80 || p2 > 0xBF) and the rest (p3, p4) 67 | * will be covered by utf8_validread_basic() */ 68 | 69 | break; 70 | } 71 | 72 | } /* switch */ 73 | 74 | return len; 75 | } 76 | 77 | #endif /* NU_WITH_VALIDATION */ 78 | #endif /* NU_WITH_UTF8_READER */ 79 | 80 | #ifdef NU_WITH_UTF8_WRITER 81 | 82 | char* nu_utf8_write(uint32_t unicode, char *utf8) { 83 | unsigned codepoint_len = utf8_codepoint_length(unicode); 84 | 85 | if (utf8 != 0) { 86 | switch (codepoint_len) { 87 | case 1: *utf8 = (char)(unicode); break; 88 | case 2: b2_utf8(unicode, utf8); break; 89 | case 3: b3_utf8(unicode, utf8); break; 90 | default: b4_utf8(unicode, utf8); break; /* len == 4 */ 91 | } 92 | } 93 | 94 | return utf8 + codepoint_len; 95 | } 96 | 97 | #endif /* NU_WITH_UTF8_WRITER */ 98 | -------------------------------------------------------------------------------- /vendor/nunicode/version.txt: -------------------------------------------------------------------------------- 1 | 1.8 2 | --------------------------------------------------------------------------------