├── .babelrc ├── .eslintrc ├── .gitignore ├── .npmignore ├── .spelling ├── .travis.yml ├── CHANGELOG.md ├── appveyor.yml ├── bin └── mdspell ├── data ├── README_en_AU.txt ├── README_en_GB.txt ├── README_en_US-large.txt ├── README_es_ANY.txt ├── en-GB.aff ├── en-GB.dic ├── en_AU.aff ├── en_AU.dic ├── en_US-large.aff ├── en_US-large.dic ├── es_ANY.aff └── es_ANY.dic ├── es5 ├── cli-interactive.js ├── cli.js ├── context.js ├── filters.js ├── index.js ├── markdown-parser.js ├── multi-file-processor.js ├── relative-file-processor.js ├── report-generator.js ├── spell-config.js ├── spellcheck.js ├── tracking-replacement.js ├── word-parser.js ├── word-replacer.js └── write-corrections.js ├── es6 ├── cli-interactive.js ├── cli.js ├── context.js ├── filters.js ├── index.js ├── markdown-parser.js ├── multi-file-processor.js ├── relative-file-processor.js ├── report-generator.js ├── spell-config.js ├── spellcheck.js ├── tracking-replacement.js ├── word-parser.js ├── word-replacer.js └── write-corrections.js ├── gulpfile.js ├── package.json ├── readme.md └── test ├── cli-interactive.js ├── filters.js ├── fixture ├── .gitattributes ├── test.md └── test2.md ├── index.js ├── markdown-parser.js ├── multi-file-processor.js ├── relative-file-processor.js ├── spell-config.js ├── spellcheck.js ├── tracking-replacement.js └── word-parser.js /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": ["es2015-loose"] 3 | } -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "es6": true, 4 | "node": true 5 | }, 6 | "parserOptions": { 7 | "sourceType": "module" 8 | }, 9 | "rules": { 10 | "no-alert": 2, 11 | "no-array-constructor": 0, 12 | "no-bitwise": 0, 13 | "no-caller": 2, 14 | "no-catch-shadow": 2, 15 | "no-class-assign": 2, 16 | "no-cond-assign": 2, 17 | "no-console": 0, 18 | "no-const-assign": 2, 19 | "no-constant-condition": 2, 20 | "no-continue": 0, 21 | "no-control-regex": 2, 22 | "no-debugger": 2, 23 | "no-delete-var": 2, 24 | "no-div-regex": 0, 25 | "no-dupe-keys": 2, 26 | "no-dupe-args": 2, 27 | "no-duplicate-case": 2, 28 | "no-else-return": 1, 29 | "no-empty": 2, 30 | "no-empty-character-class": 2, 31 | "no-eq-null": 0, 32 | "no-eval": 2, 33 | "no-ex-assign": 2, 34 | "no-extend-native": 2, 35 | "no-extra-bind": 1, 36 | "no-extra-boolean-cast": 2, 37 | "no-extra-parens": 0, 38 | "no-extra-semi": 2, 39 | "no-fallthrough": 2, 40 | "no-floating-decimal": 1, 41 | "no-func-assign": 2, 42 | "no-implicit-coercion": 1, 43 | "no-implied-eval": 1, 44 | "no-inline-comments": 0, 45 | "no-inner-declarations": [2, "functions"], 46 | "no-invalid-regexp": 2, 47 | "no-invalid-this": 1, 48 | "no-irregular-whitespace": 2, 49 | "no-iterator": 1, 50 | "no-label-var": 1, 51 | "no-labels": 1, 52 | "no-lone-blocks": 1, 53 | "no-lonely-if": 1, 54 | "no-loop-func": 1, 55 | "no-mixed-requires": [1, false], 56 | "no-mixed-spaces-and-tabs": [2, false], 57 | "linebreak-style": [0, "unix"], 58 | "no-multi-spaces": 1, 59 | "no-multi-str": 0, 60 | "no-multiple-empty-lines": [1, {"max": 2}], 61 | "no-native-reassign": 2, 62 | "no-negated-in-lhs": 2, 63 | "no-nested-ternary": 1, 64 | "no-new": 1, 65 | "no-new-func": 1, 66 | "no-new-object": 1, 67 | "no-new-require": 1, 68 | "no-new-wrappers": 1, 69 | "no-obj-calls": 2, 70 | "no-octal": 2, 71 | "no-octal-escape": 0, 72 | "no-param-reassign": 0, 73 | "no-path-concat": 0, 74 | "no-plusplus": 0, 75 | "no-process-env": 0, 76 | "no-process-exit": 0, 77 | "no-proto": 0, 78 | "no-redeclare": 2, 79 | "no-regex-spaces": 2, 80 | "no-restricted-modules": 0, 81 | "no-return-assign": 1, 82 | "no-script-url": 0, 83 | "no-self-compare": 1, 84 | "no-sequences": 1, 85 | "no-shadow": 1, 86 | "no-shadow-restricted-names": 1, 87 | "no-spaced-func": 1, 88 | "no-sparse-arrays": 2, 89 | "no-sync": 0, 90 | "no-ternary": 0, 91 | "no-trailing-spaces": 1, 92 | "no-this-before-super": 1, 93 | "no-throw-literal": 1, 94 | "no-undef": 2, 95 | "no-undef-init": 1, 96 | "no-undefined": 0, 97 | "no-unexpected-multiline": 1, 98 | "no-underscore-dangle": 0, 99 | "no-unneeded-ternary": 0, 100 | "no-unreachable": 2, 101 | "no-unused-expressions": 1, 102 | "no-unused-vars": [2, {"vars": "all", "args": "after-used"}], 103 | "no-use-before-define": [2, "nofunc"], 104 | "no-useless-call": 2, 105 | "no-void": 0, 106 | "no-var": 1, 107 | "no-warning-comments": [1, { "terms": ["todo", "fixme", "xxx"], "location": "start" }], 108 | "no-with": 1, 109 | 110 | "array-bracket-spacing": [1, "never"], 111 | "arrow-parens": 1, 112 | "arrow-spacing": 1, 113 | "accessor-pairs": 1, 114 | "block-scoped-var": 0, 115 | "brace-style": [1, "stroustrup"], 116 | "callback-return": [0, []], 117 | "camelcase": [2, {"properties": "always"}], 118 | "comma-dangle": [2, "never"], 119 | "comma-spacing": 2, 120 | "comma-style": 1, 121 | "complexity": [1, 15], 122 | "computed-property-spacing": [0, "never"], 123 | "consistent-return": 0, 124 | "consistent-this": [0, "that"], 125 | "constructor-super": 1, 126 | "curly": [1, "all"], 127 | "default-case": 1, 128 | "dot-location": [1, "property"], 129 | "dot-notation": [1, { "allowKeywords": true }], 130 | "eol-last": 0, 131 | "eqeqeq": 1, 132 | "func-names": 0, 133 | "func-style": [0, "declaration"], 134 | "generator-star-spacing": 0, 135 | "guard-for-in": 1, 136 | "handle-callback-err": 2, 137 | "id-length": [0, {"min": 3, "max": 30, "exceptions":["i", "j", "a", "b", "e"]}], 138 | "indent": [1, 2, {"SwitchCase": 1}], 139 | "init-declarations": 0, 140 | "key-spacing": [1, { "beforeColon": false, "afterColon": true }], 141 | "lines-around-comment": 0, 142 | "max-depth": [1, 4], 143 | "max-len": [1, 160, 2], 144 | "max-nested-callbacks": [1, 3], 145 | "max-params": [1, 5], 146 | "max-statements": [1, 30], 147 | "new-cap": 1, 148 | "new-parens": 1, 149 | "newline-after-var": 0, 150 | "object-curly-spacing": [1, "always"], 151 | "object-shorthand": 0, 152 | "one-var": [1, { 153 | "initialized": "never" 154 | }], 155 | "operator-assignment": [0, "always"], 156 | "operator-linebreak": [1, "after"], 157 | "padded-blocks": 0, 158 | "prefer-const": 0, 159 | "prefer-spread": 0, 160 | "prefer-reflect": 0, 161 | "quote-props": 0, 162 | //"quotes": [1, "single"], 163 | "radix": 0, 164 | "id-match": 0, 165 | "require-yield": 0, 166 | "semi": [1, "always"], 167 | "semi-spacing": [1, {"before": false, "after": true}], 168 | "sort-vars": 0, 169 | "keyword-spacing": [1, { "before": true, "after": true }], 170 | "space-before-blocks": [1, "always"], 171 | "space-before-function-paren": [1, "never"], 172 | "space-in-parens": [1, "never"], 173 | "space-infix-ops": 1, 174 | "space-unary-ops": [1, { "words": true, "nonwords": false }], 175 | "spaced-comment": 0, 176 | "strict": [2, "global"], 177 | "use-isnan": 2, 178 | "valid-jsdoc": 0, 179 | "valid-typeof": 2, 180 | "vars-on-top": 0, 181 | "wrap-iife": 0, 182 | "wrap-regex": 0, 183 | "yoda": [0, "never"] 184 | } 185 | } 186 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | node_modules 3 | .DS_Store 4 | __docs 5 | package-lock.json 6 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | test 2 | es6 3 | .travis.yml 4 | .appveyor.yml 5 | .spelling 6 | gulpfile.js 7 | .gitignore 8 | .eslintrc 9 | -------------------------------------------------------------------------------- /.spelling: -------------------------------------------------------------------------------- 1 | # markdown-spellcheck spelling configuration file 2 | # Format - lines beginning # are comments 3 | # global dictionary is at the start, file overrides afterwards 4 | # one word per line, to define a file override use ' - filename' 5 | # where filename is relative to this configuration file 6 | grunt-mdspell 7 | - CHANGELOG.md 8 | v0.8.0 9 | v0.9.0 10 | v0.7.0 11 | v0.9.1 12 | v1.0.0 13 | v1.1.0 14 | v1.2.0 15 | v1.3.0 16 | v1.3.1 17 | Kramdown 18 | v0.10.0 19 | v0.11.0 20 | cryllic 21 | index.js 22 | rereleased 23 | unix 24 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "4" 4 | - "6" 5 | - "8" 6 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | v1.3.1 2 | 3 | 21-11-2017 4 | 5 | * Rereleased the package with unix lines endings (LF) 6 | 7 | v1.3.0 8 | 9 | 15-11-2017 10 | 11 | * Pass options from index.js to the spellChecker 12 | * Fix Jekyll front matter being spellchecked if it contained a regular expression (#107) 13 | * Fix content between Jekyll front matter and in-content headings not being spellchecked 14 | 15 | v1.2.0 16 | 17 | 12-11-2017 18 | 19 | * Only ignore test between two horizontal rules if it looks like Jekyll front matter 20 | 21 | v1.1.0 22 | 23 | 12-11-2017 24 | 25 | * Allow markdown relative spelling files 26 | 27 | v1.0.0 28 | 29 | 05-08-2017 30 | 31 | * Added support for Australian English 32 | * Upgraded packages 33 | * Dropped support for node < 4 34 | 35 | v0.11.0 36 | 37 | 03-06-2016 38 | 39 | * Support for Cyrillic characters 40 | 41 | v0.10.0 42 | 43 | 17-03-2016 44 | 45 | * Upgrade English GB dictionary 46 | * Support for Kramdown style code blocks 47 | 48 | v0.9.1 49 | 50 | 12-03-2016 51 | 52 | * Support for additional UTF characters 53 | 54 | v0.9.0 55 | 56 | 09-01-2016 57 | 58 | * Fix issue correcting words when filtering 59 | * Add support for negated patterns 60 | 61 | v0.8.0 62 | 63 | 04-01-2016 64 | 65 | * Upgrade Babel and fix bad ES6 import 66 | * Fix showing bad spelling report at end of processing 67 | * Fix allowing double backtick in markdown 68 | * Fix saving words with `\u2019` apostrophe 69 | * Upgrade English GB dictionary 70 | 71 | v0.7.0 72 | 73 | 27-12-2015 74 | 75 | * Upgrade dependencies 76 | * Upgrade English GB dictionary 77 | * Add Spanish dictionary 78 | 79 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | version: "{build}" 2 | 3 | clone_depth: 10 4 | 5 | environment: 6 | matrix: 7 | - nodejs_version: '4' 8 | - nodejs_version: '' 9 | 10 | install: 11 | - ps: Install-Product node $env:nodejs_version 12 | - npm i npm@3 -g 13 | - npm install 14 | 15 | build: off 16 | 17 | test_script: 18 | - node --version && npm --version 19 | - npm test 20 | 21 | cache: 22 | - node_modules # local npm modules 23 | 24 | matrix: 25 | fast_finish: true 26 | -------------------------------------------------------------------------------- /bin/mdspell: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | require('../es5/cli'); -------------------------------------------------------------------------------- /data/README_en_AU.txt: -------------------------------------------------------------------------------- 1 | en_AU Hunspell Dictionary 2 | Version 2017.01.22 3 | Sun Jan 22 17:22:40 2017 -0500 [0c64f5c] 4 | http://wordlist.sourceforge.net 5 | 6 | README file for English Hunspell dictionaries derived from SCOWL. 7 | 8 | These dictionaries are created using the speller/make-hunspell-dict 9 | script in SCOWL. 10 | 11 | The following dictionaries are available: 12 | 13 | en_US (American) 14 | en_CA (Canadian) 15 | en_GB-ise (British with "ise" spelling) 16 | en_GB-ize (British with "ize" spelling) 17 | en_AU (Australian) 18 | 19 | en_US-large 20 | en_CA-large 21 | en_GB-large (with both "ise" and "ize" spelling) 22 | en_AU-large 23 | 24 | The normal (non-large) dictionaries correspond to SCOWL size 60 and, 25 | to encourage consistent spelling, generally only include one spelling 26 | variant for a word. The large dictionaries correspond to SCOWL size 27 | 70 and may include multiple spelling for a word when both variants are 28 | considered almost equal. The larger dictionaries however (1) have not 29 | been as carefully checked for errors as the normal dictionaries and 30 | thus may contain misspelled or invalid words; and (2) contain 31 | uncommon, yet valid, words that might cause problems as they are 32 | likely to be misspellings of more common words (for example, "ort" and 33 | "calender"). 34 | 35 | To get an idea of the difference in size, here are 25 random words 36 | only found in the large dictionary for American English: 37 | 38 | Bermejo Freyr's Guenevere Hatshepsut Nottinghamshire arrestment 39 | crassitudes crural dogwatches errorless fetial flaxseeds godroon 40 | incretion jalapeño's kelpie kishkes neuroglias pietisms pullulation 41 | stemwinder stenoses syce thalassic zees 42 | 43 | The en_US, en_CA and en_AU are the official dictionaries for Hunspell. 44 | The en_GB and large dictionaries are made available on an experimental 45 | basis. If you find them useful please send me a quick email at 46 | kevina@gnu.org. 47 | 48 | If none of these dictionaries suite you (for example, maybe you want 49 | the normal dictionary that also includes common variants) additional 50 | dictionaries can be generated at http://app.aspell.net/create or by 51 | modifying speller/make-hunspell-dict in SCOWL. Please do let me know 52 | if you end up publishing a customized dictionary. 53 | 54 | If a word is not found in the dictionary or a word is there you think 55 | shouldn't be, you can lookup the word up at http://app.aspell.net/lookup 56 | to help determine why that is. 57 | 58 | General comments on these list can be sent directly to me at 59 | kevina@gnu.org or to the wordlist-devel mailing lists 60 | (https://lists.sourceforge.net/lists/listinfo/wordlist-devel). If you 61 | have specific issues with any of these dictionaries please file a bug 62 | report at https://github.com/kevina/wordlist/issues. 63 | 64 | IMPORTANT CHANGES INTRODUCED In 2016.11.20: 65 | 66 | New Australian dictionaries thanks to the work of Benjamin Titze 67 | (btitze@protonmail.ch). 68 | 69 | IMPORTANT CHANGES INTRODUCED IN 2016.04.24: 70 | 71 | The dictionaries are now in UTF-8 format instead of ISO-8859-1. This 72 | was required to handle smart quotes correctly. 73 | 74 | IMPORTANT CHANGES INTRODUCED IN 2016.01.19: 75 | 76 | "SET UTF8" was changes to "SET UTF-8" in the affix file as some 77 | versions of Hunspell do not recognize "UTF8". 78 | 79 | ADDITIONAL NOTES: 80 | 81 | The NOSUGGEST flag was added to certain taboo words. While I made an 82 | honest attempt to flag the strongest taboo words with the NOSUGGEST 83 | flag, I MAKE NO GUARANTEE THAT I FLAGGED EVERY POSSIBLE TABOO WORD. 84 | The list was originally derived from Németh László, however I removed 85 | some words which, while being considered taboo by some dictionaries, 86 | are not really considered swear words in today's society. 87 | 88 | COPYRIGHT, SOURCES, and CREDITS: 89 | 90 | The English dictionaries come directly from SCOWL 91 | and is thus under the same copyright of SCOWL. The affix file is 92 | a heavily modified version of the original english.aff file which was 93 | released as part of Geoff Kuenning's Ispell and as such is covered by 94 | his BSD license. Part of SCOWL is also based on Ispell thus the 95 | Ispell copyright is included with the SCOWL copyright. 96 | 97 | The collective work is Copyright 2000-2016 by Kevin Atkinson as well 98 | as any of the copyrights mentioned below: 99 | 100 | Copyright 2000-2016 by Kevin Atkinson 101 | 102 | Permission to use, copy, modify, distribute and sell these word 103 | lists, the associated scripts, the output created from the scripts, 104 | and its documentation for any purpose is hereby granted without fee, 105 | provided that the above copyright notice appears in all copies and 106 | that both that copyright notice and this permission notice appear in 107 | supporting documentation. Kevin Atkinson makes no representations 108 | about the suitability of this array for any purpose. It is provided 109 | "as is" without express or implied warranty. 110 | 111 | Alan Beale also deserves special credit as he has, 112 | in addition to providing the 12Dicts package and being a major 113 | contributor to the ENABLE word list, given me an incredible amount of 114 | feedback and created a number of special lists (those found in the 115 | Supplement) in order to help improve the overall quality of SCOWL. 116 | 117 | The 10 level includes the 1000 most common English words (according to 118 | the Moby (TM) Words II [MWords] package), a subset of the 1000 most 119 | common words on the Internet (again, according to Moby Words II), and 120 | frequently class 16 from Brian Kelk's "UK English Wordlist 121 | with Frequency Classification". 122 | 123 | The MWords package was explicitly placed in the public domain: 124 | 125 | The Moby lexicon project is complete and has 126 | been place into the public domain. Use, sell, 127 | rework, excerpt and use in any way on any platform. 128 | 129 | Placing this material on internal or public servers is 130 | also encouraged. The compiler is not aware of any 131 | export restrictions so freely distribute world-wide. 132 | 133 | You can verify the public domain status by contacting 134 | 135 | Grady Ward 136 | 3449 Martha Ct. 137 | Arcata, CA 95521-4884 138 | 139 | grady@netcom.com 140 | grady@northcoast.com 141 | 142 | The "UK English Wordlist With Frequency Classification" is also in the 143 | Public Domain: 144 | 145 | Date: Sat, 08 Jul 2000 20:27:21 +0100 146 | From: Brian Kelk 147 | 148 | > I was wondering what the copyright status of your "UK English 149 | > Wordlist With Frequency Classification" word list as it seems to 150 | > be lacking any copyright notice. 151 | 152 | There were many many sources in total, but any text marked 153 | "copyright" was avoided. Locally-written documentation was one 154 | source. An earlier version of the list resided in a filespace called 155 | PUBLIC on the University mainframe, because it was considered public 156 | domain. 157 | 158 | Date: Tue, 11 Jul 2000 19:31:34 +0100 159 | 160 | > So are you saying your word list is also in the public domain? 161 | 162 | That is the intention. 163 | 164 | The 20 level includes frequency classes 7-15 from Brian's word list. 165 | 166 | The 35 level includes frequency classes 2-6 and words appearing in at 167 | least 11 of 12 dictionaries as indicated in the 12Dicts package. All 168 | words from the 12Dicts package have had likely inflections added via 169 | my inflection database. 170 | 171 | The 12Dicts package and Supplement is in the Public Domain. 172 | 173 | The WordNet database, which was used in the creation of the 174 | Inflections database, is under the following copyright: 175 | 176 | This software and database is being provided to you, the LICENSEE, 177 | by Princeton University under the following license. By obtaining, 178 | using and/or copying this software and database, you agree that you 179 | have read, understood, and will comply with these terms and 180 | conditions.: 181 | 182 | Permission to use, copy, modify and distribute this software and 183 | database and its documentation for any purpose and without fee or 184 | royalty is hereby granted, provided that you agree to comply with 185 | the following copyright notice and statements, including the 186 | disclaimer, and that the same appear on ALL copies of the software, 187 | database and documentation, including modifications that you make 188 | for internal use or for distribution. 189 | 190 | WordNet 1.6 Copyright 1997 by Princeton University. All rights 191 | reserved. 192 | 193 | THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON 194 | UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR 195 | IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON 196 | UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT- 197 | ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE 198 | LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT INFRINGE ANY 199 | THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. 200 | 201 | The name of Princeton University or Princeton may not be used in 202 | advertising or publicity pertaining to distribution of the software 203 | and/or database. Title to copyright in this software, database and 204 | any associated documentation shall at all times remain with 205 | Princeton University and LICENSEE agrees to preserve same. 206 | 207 | The 40 level includes words from Alan's 3esl list found in version 4.0 208 | of his 12dicts package. Like his other stuff the 3esl list is also in the 209 | public domain. 210 | 211 | The 50 level includes Brian's frequency class 1, words appearing 212 | in at least 5 of 12 of the dictionaries as indicated in the 12Dicts 213 | package, and uppercase words in at least 4 of the previous 12 214 | dictionaries. A decent number of proper names is also included: The 215 | top 1000 male, female, and Last names from the 1990 Census report; a 216 | list of names sent to me by Alan Beale; and a few names that I added 217 | myself. Finally a small list of abbreviations not commonly found in 218 | other word lists is included. 219 | 220 | The name files form the Census report is a government document which I 221 | don't think can be copyrighted. 222 | 223 | The file special-jargon.50 uses common.lst and word.lst from the 224 | "Unofficial Jargon File Word Lists" which is derived from "The Jargon 225 | File". All of which is in the Public Domain. This file also contain 226 | a few extra UNIX terms which are found in the file "unix-terms" in the 227 | special/ directory. 228 | 229 | The 55 level includes words from Alan's 2of4brif list found in version 230 | 4.0 of his 12dicts package. Like his other stuff the 2of4brif is also 231 | in the public domain. 232 | 233 | The 60 level includes all words appearing in at least 2 of the 12 234 | dictionaries as indicated by the 12Dicts package. 235 | 236 | The 70 level includes Brian's frequency class 0 and the 74,550 common 237 | dictionary words from the MWords package. The common dictionary words, 238 | like those from the 12Dicts package, have had all likely inflections 239 | added. The 70 level also included the 5desk list from version 4.0 of 240 | the 12Dics package which is in the public domain. 241 | 242 | The 80 level includes the ENABLE word list, all the lists in the 243 | ENABLE supplement package (except for ABLE), the "UK Advanced Cryptics 244 | Dictionary" (UKACD), the list of signature words from the YAWL package, 245 | and the 10,196 places list from the MWords package. 246 | 247 | The ENABLE package, mainted by M\Cooper , 248 | is in the Public Domain: 249 | 250 | The ENABLE master word list, WORD.LST, is herewith formally released 251 | into the Public Domain. Anyone is free to use it or distribute it in 252 | any manner they see fit. No fee or registration is required for its 253 | use nor are "contributions" solicited (if you feel you absolutely 254 | must contribute something for your own peace of mind, the authors of 255 | the ENABLE list ask that you make a donation on their behalf to your 256 | favorite charity). This word list is our gift to the Scrabble 257 | community, as an alternate to "official" word lists. Game designers 258 | may feel free to incorporate the WORD.LST into their games. Please 259 | mention the source and credit us as originators of the list. Note 260 | that if you, as a game designer, use the WORD.LST in your product, 261 | you may still copyright and protect your product, but you may *not* 262 | legally copyright or in any way restrict redistribution of the 263 | WORD.LST portion of your product. This *may* under law restrict your 264 | rights to restrict your users' rights, but that is only fair. 265 | 266 | UKACD, by J Ross Beresford , is under the 267 | following copyright: 268 | 269 | Copyright (c) J Ross Beresford 1993-1999. All Rights Reserved. 270 | 271 | The following restriction is placed on the use of this publication: 272 | if The UK Advanced Cryptics Dictionary is used in a software package 273 | or redistributed in any form, the copyright notice must be 274 | prominently displayed and the text of this document must be included 275 | verbatim. 276 | 277 | There are no other restrictions: I would like to see the list 278 | distributed as widely as possible. 279 | 280 | The 95 level includes the 354,984 single words, 256,772 compound 281 | words, 4,946 female names and the 3,897 male names, and 21,986 names 282 | from the MWords package, ABLE.LST from the ENABLE Supplement, and some 283 | additional words found in my part-of-speech database that were not 284 | found anywhere else. 285 | 286 | Accent information was taken from UKACD. 287 | 288 | The VarCon package was used to create the American, British, Canadian, 289 | and Australian word list. It is under the following copyright: 290 | 291 | Copyright 2000-2016 by Kevin Atkinson 292 | 293 | Permission to use, copy, modify, distribute and sell this array, the 294 | associated software, and its documentation for any purpose is hereby 295 | granted without fee, provided that the above copyright notice appears 296 | in all copies and that both that copyright notice and this permission 297 | notice appear in supporting documentation. Kevin Atkinson makes no 298 | representations about the suitability of this array for any 299 | purpose. It is provided "as is" without express or implied warranty. 300 | 301 | Copyright 2016 by Benjamin Titze 302 | 303 | Permission to use, copy, modify, distribute and sell this array, the 304 | associated software, and its documentation for any purpose is hereby 305 | granted without fee, provided that the above copyright notice appears 306 | in all copies and that both that copyright notice and this permission 307 | notice appear in supporting documentation. Benjamin Titze makes no 308 | representations about the suitability of this array for any 309 | purpose. It is provided "as is" without express or implied warranty. 310 | 311 | Since the original words lists come from the Ispell distribution: 312 | 313 | Copyright 1993, Geoff Kuenning, Granada Hills, CA 314 | All rights reserved. 315 | 316 | Redistribution and use in source and binary forms, with or without 317 | modification, are permitted provided that the following conditions 318 | are met: 319 | 320 | 1. Redistributions of source code must retain the above copyright 321 | notice, this list of conditions and the following disclaimer. 322 | 2. Redistributions in binary form must reproduce the above copyright 323 | notice, this list of conditions and the following disclaimer in the 324 | documentation and/or other materials provided with the distribution. 325 | 3. All modifications to the source code must be clearly marked as 326 | such. Binary redistributions based on modified source code 327 | must be clearly marked as modified versions in the documentation 328 | and/or other materials provided with the distribution. 329 | (clause 4 removed with permission from Geoff Kuenning) 330 | 5. The name of Geoff Kuenning may not be used to endorse or promote 331 | products derived from this software without specific prior 332 | written permission. 333 | 334 | THIS SOFTWARE IS PROVIDED BY GEOFF KUENNING AND CONTRIBUTORS ``AS IS'' AND 335 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 336 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 337 | ARE DISCLAIMED. IN NO EVENT SHALL GEOFF KUENNING OR CONTRIBUTORS BE LIABLE 338 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 339 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 340 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 341 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 342 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 343 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 344 | SUCH DAMAGE. 345 | 346 | Build Date: Sun Jan 22 17:42:52 EST 2017 347 | Wordlist Command: mk-list --accents=strip en_AU 60 348 | -------------------------------------------------------------------------------- /data/README_en_US-large.txt: -------------------------------------------------------------------------------- 1 | en_US-large Hunspell Dictionary 2 | Version 2015.08.24 3 | Mon Aug 24 16:39:36 2015 -0400 [79c892e] 4 | http://wordlist.sourceforge.net 5 | 6 | README file for English Hunspell dictionaries derived from SCOWL. 7 | 8 | These dictionaries are created using the speller/make-hunspell-dict 9 | script in SCOWL. 10 | 11 | The following dictionaries are available: 12 | 13 | en_US (American) 14 | en_CA (Canadian) 15 | en_GB-ise (British with "ise" spelling) 16 | en_GB-ize (British with "ize" spelling) 17 | 18 | en_US-large 19 | en_CA-large 20 | en_GB-large (with both "ise" and "ize" spelling) 21 | 22 | The normal (non-large) dictionaries correspond to SCOWL size 60 and, 23 | to encourage consistent spelling, generally only include one spelling 24 | variant for a word. The large dictionaries correspond to SCOWL size 25 | 70 and may include multiple spelling for a word when both variants are 26 | considered almost equal. Also, the general quality of the larger 27 | dictionaries may also be less as they are not as carefully checked for 28 | errors as the normal dictionaries. 29 | 30 | To get an idea of the difference in size, here are 25 random words 31 | only found in the large dictionary for American English: 32 | 33 | Bermejo Freyr's Guenevere Hatshepsut Nottinghamshire arrestment 34 | crassitudes crural dogwatches errorless fetial flaxseeds godroon 35 | incretion jalapeño's kelpie kishkes neuroglias pietisms pullulation 36 | stemwinder stenoses syce thalassic zees 37 | 38 | The en_US and en_CA are the official dictionaries for Hunspell. The 39 | en_GB and large dictionaries are made available on an experimental 40 | basis. If you find them useful please send me a quick email at 41 | kevina@gnu.org. 42 | 43 | If none of these dictionaries suite you (for example, maybe you want 44 | the larger dictionary but only use spelling of a word) additional 45 | dictionaries can be generated at http://app.aspell.net/create or by 46 | modifying speller/make-hunspell-dict in SCOWL. Please do let me know 47 | if you end up publishing a customized dictionary. 48 | 49 | If a word is not found in the dictionary or a word is there you think 50 | shouldn't be, you can lookup the word up at http://app.aspell.net/lookup 51 | to help determine why that is. 52 | 53 | General comments on these list can be sent directly to me at 54 | kevina@gnu.org or to the wordlist-devel mailing lists 55 | (https://lists.sourceforge.net/lists/listinfo/wordlist-devel). If you 56 | have specific issues with any of these dictionaries please file a bug 57 | report at https://github.com/kevina/wordlist/issues. 58 | 59 | IMPORTANT CHANGES FROM 2015.02.15: 60 | 61 | The dictionaries are now in UTF-8 format instead of ISO-8859-1. This 62 | was required to handle smart quotes correctly. 63 | 64 | ADDITIONAL NOTES: 65 | 66 | The NOSUGGEST flag was added to certain taboo words. While I made an 67 | honest attempt to flag the strongest taboo words with the NOSUGGEST 68 | flag, I MAKE NO GUARANTEE THAT I FLAGGED EVERY POSSIBLE TABOO WORD. 69 | The list was originally derived from Németh László, however I removed 70 | some words which, while being considered taboo by some dictionaries, 71 | are not really considered swear words in today's society. 72 | 73 | COPYRIGHT, SOURCES, and CREDITS: 74 | 75 | The English dictionaries come directly from SCOWL 76 | and is thus under the same copyright of SCOWL. The affix file is 77 | a heavily modified version of the original english.aff file which was 78 | released as part of Geoff Kuenning's Ispell and as such is covered by 79 | his BSD license. Part of SCOWL is also based on Ispell thus the 80 | Ispell copyright is included with the SCOWL copyright. 81 | 82 | The collective work is Copyright 2000-2015 by Kevin Atkinson as well 83 | as any of the copyrights mentioned below: 84 | 85 | Copyright 2000-2015 by Kevin Atkinson 86 | 87 | Permission to use, copy, modify, distribute and sell these word 88 | lists, the associated scripts, the output created from the scripts, 89 | and its documentation for any purpose is hereby granted without fee, 90 | provided that the above copyright notice appears in all copies and 91 | that both that copyright notice and this permission notice appear in 92 | supporting documentation. Kevin Atkinson makes no representations 93 | about the suitability of this array for any purpose. It is provided 94 | "as is" without express or implied warranty. 95 | 96 | Alan Beale also deserves special credit as he has, 97 | in addition to providing the 12Dicts package and being a major 98 | contributor to the ENABLE word list, given me an incredible amount of 99 | feedback and created a number of special lists (those found in the 100 | Supplement) in order to help improve the overall quality of SCOWL. 101 | 102 | The 10 level includes the 1000 most common English words (according to 103 | the Moby (TM) Words II [MWords] package), a subset of the 1000 most 104 | common words on the Internet (again, according to Moby Words II), and 105 | frequently class 16 from Brian Kelk's "UK English Wordlist 106 | with Frequency Classification". 107 | 108 | The MWords package was explicitly placed in the public domain: 109 | 110 | The Moby lexicon project is complete and has 111 | been place into the public domain. Use, sell, 112 | rework, excerpt and use in any way on any platform. 113 | 114 | Placing this material on internal or public servers is 115 | also encouraged. The compiler is not aware of any 116 | export restrictions so freely distribute world-wide. 117 | 118 | You can verify the public domain status by contacting 119 | 120 | Grady Ward 121 | 3449 Martha Ct. 122 | Arcata, CA 95521-4884 123 | 124 | grady@netcom.com 125 | grady@northcoast.com 126 | 127 | The "UK English Wordlist With Frequency Classification" is also in the 128 | Public Domain: 129 | 130 | Date: Sat, 08 Jul 2000 20:27:21 +0100 131 | From: Brian Kelk 132 | 133 | > I was wondering what the copyright status of your "UK English 134 | > Wordlist With Frequency Classification" word list as it seems to 135 | > be lacking any copyright notice. 136 | 137 | There were many many sources in total, but any text marked 138 | "copyright" was avoided. Locally-written documentation was one 139 | source. An earlier version of the list resided in a filespace called 140 | PUBLIC on the University mainframe, because it was considered public 141 | domain. 142 | 143 | Date: Tue, 11 Jul 2000 19:31:34 +0100 144 | 145 | > So are you saying your word list is also in the public domain? 146 | 147 | That is the intention. 148 | 149 | The 20 level includes frequency classes 7-15 from Brian's word list. 150 | 151 | The 35 level includes frequency classes 2-6 and words appearing in at 152 | least 11 of 12 dictionaries as indicated in the 12Dicts package. All 153 | words from the 12Dicts package have had likely inflections added via 154 | my inflection database. 155 | 156 | The 12Dicts package and Supplement is in the Public Domain. 157 | 158 | The WordNet database, which was used in the creation of the 159 | Inflections database, is under the following copyright: 160 | 161 | This software and database is being provided to you, the LICENSEE, 162 | by Princeton University under the following license. By obtaining, 163 | using and/or copying this software and database, you agree that you 164 | have read, understood, and will comply with these terms and 165 | conditions.: 166 | 167 | Permission to use, copy, modify and distribute this software and 168 | database and its documentation for any purpose and without fee or 169 | royalty is hereby granted, provided that you agree to comply with 170 | the following copyright notice and statements, including the 171 | disclaimer, and that the same appear on ALL copies of the software, 172 | database and documentation, including modifications that you make 173 | for internal use or for distribution. 174 | 175 | WordNet 1.6 Copyright 1997 by Princeton University. All rights 176 | reserved. 177 | 178 | THIS SOFTWARE AND DATABASE IS PROVIDED "AS IS" AND PRINCETON 179 | UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR 180 | IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PRINCETON 181 | UNIVERSITY MAKES NO REPRESENTATIONS OR WARRANTIES OF MERCHANT- 182 | ABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE 183 | LICENSED SOFTWARE, DATABASE OR DOCUMENTATION WILL NOT INFRINGE ANY 184 | THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. 185 | 186 | The name of Princeton University or Princeton may not be used in 187 | advertising or publicity pertaining to distribution of the software 188 | and/or database. Title to copyright in this software, database and 189 | any associated documentation shall at all times remain with 190 | Princeton University and LICENSEE agrees to preserve same. 191 | 192 | The 40 level includes words from Alan's 3esl list found in version 4.0 193 | of his 12dicts package. Like his other stuff the 3esl list is also in the 194 | public domain. 195 | 196 | The 50 level includes Brian's frequency class 1, words appearing 197 | in at least 5 of 12 of the dictionaries as indicated in the 12Dicts 198 | package, and uppercase words in at least 4 of the previous 12 199 | dictionaries. A decent number of proper names is also included: The 200 | top 1000 male, female, and Last names from the 1990 Census report; a 201 | list of names sent to me by Alan Beale; and a few names that I added 202 | myself. Finally a small list of abbreviations not commonly found in 203 | other word lists is included. 204 | 205 | The name files form the Census report is a government document which I 206 | don't think can be copyrighted. 207 | 208 | The file special-jargon.50 uses common.lst and word.lst from the 209 | "Unofficial Jargon File Word Lists" which is derived from "The Jargon 210 | File". All of which is in the Public Domain. This file also contain 211 | a few extra UNIX terms which are found in the file "unix-terms" in the 212 | special/ directory. 213 | 214 | The 55 level includes words from Alan's 2of4brif list found in version 215 | 4.0 of his 12dicts package. Like his other stuff the 2of4brif is also 216 | in the public domain. 217 | 218 | The 60 level includes all words appearing in at least 2 of the 12 219 | dictionaries as indicated by the 12Dicts package. 220 | 221 | The 70 level includes Brian's frequency class 0 and the 74,550 common 222 | dictionary words from the MWords package. The common dictionary words, 223 | like those from the 12Dicts package, have had all likely inflections 224 | added. The 70 level also included the 5desk list from version 4.0 of 225 | the 12Dics package which is in the public domain. 226 | 227 | The 80 level includes the ENABLE word list, all the lists in the 228 | ENABLE supplement package (except for ABLE), the "UK Advanced Cryptics 229 | Dictionary" (UKACD), the list of signature words from the YAWL package, 230 | and the 10,196 places list from the MWords package. 231 | 232 | The ENABLE package, mainted by M\Cooper , 233 | is in the Public Domain: 234 | 235 | The ENABLE master word list, WORD.LST, is herewith formally released 236 | into the Public Domain. Anyone is free to use it or distribute it in 237 | any manner they see fit. No fee or registration is required for its 238 | use nor are "contributions" solicited (if you feel you absolutely 239 | must contribute something for your own peace of mind, the authors of 240 | the ENABLE list ask that you make a donation on their behalf to your 241 | favorite charity). This word list is our gift to the Scrabble 242 | community, as an alternate to "official" word lists. Game designers 243 | may feel free to incorporate the WORD.LST into their games. Please 244 | mention the source and credit us as originators of the list. Note 245 | that if you, as a game designer, use the WORD.LST in your product, 246 | you may still copyright and protect your product, but you may *not* 247 | legally copyright or in any way restrict redistribution of the 248 | WORD.LST portion of your product. This *may* under law restrict your 249 | rights to restrict your users' rights, but that is only fair. 250 | 251 | UKACD, by J Ross Beresford , is under the 252 | following copyright: 253 | 254 | Copyright (c) J Ross Beresford 1993-1999. All Rights Reserved. 255 | 256 | The following restriction is placed on the use of this publication: 257 | if The UK Advanced Cryptics Dictionary is used in a software package 258 | or redistributed in any form, the copyright notice must be 259 | prominently displayed and the text of this document must be included 260 | verbatim. 261 | 262 | There are no other restrictions: I would like to see the list 263 | distributed as widely as possible. 264 | 265 | The 95 level includes the 354,984 single words, 256,772 compound 266 | words, 4,946 female names and the 3,897 male names, and 21,986 names 267 | from the MWords package, ABLE.LST from the ENABLE Supplement, and some 268 | additional words found in my part-of-speech database that were not 269 | found anywhere else. 270 | 271 | Accent information was taken from UKACD. 272 | 273 | My VARCON package was used to create the American, British, and 274 | Canadian word list. 275 | 276 | Since the original word lists used in the VARCON package came 277 | from the Ispell distribution they are under the Ispell copyright: 278 | 279 | Copyright 1993, Geoff Kuenning, Granada Hills, CA 280 | All rights reserved. 281 | 282 | Redistribution and use in source and binary forms, with or without 283 | modification, are permitted provided that the following conditions 284 | are met: 285 | 286 | 1. Redistributions of source code must retain the above copyright 287 | notice, this list of conditions and the following disclaimer. 288 | 2. Redistributions in binary form must reproduce the above copyright 289 | notice, this list of conditions and the following disclaimer in the 290 | documentation and/or other materials provided with the distribution. 291 | 3. All modifications to the source code must be clearly marked as 292 | such. Binary redistributions based on modified source code 293 | must be clearly marked as modified versions in the documentation 294 | and/or other materials provided with the distribution. 295 | (clause 4 removed with permission from Geoff Kuenning) 296 | 5. The name of Geoff Kuenning may not be used to endorse or promote 297 | products derived from this software without specific prior 298 | written permission. 299 | 300 | THIS SOFTWARE IS PROVIDED BY GEOFF KUENNING AND CONTRIBUTORS ``AS 301 | IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 302 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 303 | FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GEOFF 304 | KUENNING OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 305 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 306 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 307 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 308 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 309 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 310 | ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 311 | POSSIBILITY OF SUCH DAMAGE. 312 | 313 | Build Date: Mon Aug 24 16:42:04 EDT 2015 314 | Wordlist Command: mk-list -v1 --accents=both en_US 70 315 | -------------------------------------------------------------------------------- /data/README_es_ANY.txt: -------------------------------------------------------------------------------- 1 | **************************************************************************** 2 | ** ** 3 | ** Diccionario para corrección ortográfica en español de OpenOffice.org ** 4 | ** ** 5 | **************************************************************************** 6 | ** VERSIÓN GENÉRICA PARA TODAS LAS LOCALIZACIONES ** 7 | **************************************************************************** 8 | 9 | Versión 0.8 10 | 11 | SUMARIO 12 | 13 | 1. AUTOR 14 | 2. LICENCIA 15 | 3. INSTALACIÓN 16 | 4. COLABORACIÓN 17 | 5. AGRADECIMIENTOS 18 | 19 | 20 | 1. AUTOR 21 | 22 | Este diccionario ha sido desarrollado inicialmente por Santiago Bosio; 23 | quien actualmente coordina el desarrollo de todos los diccionarios localizados. 24 | 25 | Si desea contactar al autor, por favor envíe sus mensajes mediante correo 26 | electrónico a: 27 | 28 | santiago.bosio gmail com 29 | (reemplace por @ y por . al enviar su mensaje) 30 | 31 | El diccionario es un desarrollo completamente nuevo, y NO ESTÁ BASADO en el 32 | trabajo de Jesús Carretero y Santiago Rodríguez, ni en la versión adaptada al 33 | formato de MySpell por Richard Holt. 34 | 35 | 36 | 2. LICENCIA 37 | 38 | Este diccionario para corrección ortográfica, integrado por el fichero 39 | de afijos y la lista de palabras (es_ANY[.aff|.dic]) se distribuye 40 | bajo un triple esquema de licencias disjuntas: GNU GPL versión 3 o posterior, 41 | GNU LGPL versión 3 o posterior, ó MPL versión 1.1 o posterior. Puede 42 | seleccionar libremente bajo cuál de estas licencias utilizará este diccionario. 43 | Encontrará copias de las licencias adjuntas en este mismo paquete. 44 | 45 | 3. INSTALACIÓN 46 | 47 | NOTA: en el resto del documento, cada aparición de OpenOffice.org puede 48 | entenderse referida también a LibreOffice. No obstante, tenga en cuenta que 49 | la primera versión de LibreOffice es compatible con OpenOffice.org 3.x, por 50 | lo que las menciones a OpenOffice.org 1.x y 2.x no son aplicables a LibreOffice. 51 | 52 | En OpenOffice.org versión 3.x y superior, utilice el administrador de 53 | extensiones, seleccionando para instalar directamente el fichero con 54 | extensión ".oxt". 55 | 56 | Para instalar en OpenOffice.org versión 1.x ó 2.x, deberá realizar una 57 | instalación manual siguiendo estas instrucciones: 58 | 59 | a) Copie el fichero de afijos y la lista de palabras en la carpeta de 60 | instalación de diccionarios. 61 | 62 | Si tiene permisos de administrador, puede instalar el diccionario de 63 | manera que esté disponible para todos los usuarios, copiando los ficheros al 64 | directorio de diccionarios de la suite. Este directorio depende de la 65 | plataforma de instalación. Podrá ubicarlo si ingresa en el ítem Opciones 66 | del menú Herramientas. Despliegue la primera lista, etiquetada "OpenOffice.org" 67 | y seleccione el ítem Rutas. La carpeta donde debe copiar los ficheros se 68 | denomina "ooo", y la encontrará bajo el directorio que figura en la lista de 69 | rutas con el tipo "Lingüística". 70 | 71 | En caso de no contar con permisos de administrador, igualmente puede 72 | realizar una instalación para su usuario particular, copiando los ficheros al 73 | directorio que figura en la lista de rutas con el tipo "Diccionarios definidos 74 | por el usuario". 75 | 76 | Estos directorios de configuración usualmente están ocultos. Deberá ajustar 77 | las opciones del administrador de ficheros que utiliza para que se muestren 78 | este tipo de ficheros o directorios. Consulte la ayuda para su plataforma en 79 | caso que no sepa cómo hacerlo. 80 | 81 | b) Edite la lista de diccionarios disponibles para añadir el nuevo diccionario. 82 | 83 | En el directorio donde copió los diccionarios encontrará un fichero de 84 | texto denominado "dictionary.lst". Modifíquelo como se indica a continuación, 85 | utilizando el editor de textos de su preferencia. 86 | 87 | El formato de la lista permite definir tres tipos de diccionarios 88 | diferentes: de corrección ortográfica (DICT), de sinónimos (THES) o de 89 | separación silábica (HYPH). 90 | 91 | En este caso creará un nuevo ítem de tipo DICT. Para cada entrada de este 92 | tipo, debe definir el lenguaje y la región (utilizando códigos ISO estándares), 93 | y especificar el nombre base de los ficheros que definen el diccionario. Para 94 | el español, el código ISO de lenguaje se escribe "es" (en minúsculas, sin las 95 | comillas). El código de región depende de cómo tenga configurado su sistema 96 | (por lo general será el del país donde reside), elegible entre uno de los 97 | siguientes: 98 | 99 | Argentina: "AR" Honduras: "HN" 100 | Bolivia: "BO" México: "MX" 101 | Chile: "CL" Nicaragua: "NI" 102 | Colombia: "CO" Panamá: "PA" 103 | Costa Rica: "CR" Perú: "PE" 104 | Cuba: "CU" Puerto Rico: "PR" 105 | Rep. Dominicana: "DO" Paraguay: "PY" 106 | Ecuador: "EC" El Salvador: "SV" 107 | España: "ES" Uruguay: "UY" 108 | Guatemala: "GT" Venezuela: "VE" 109 | 110 | (El código de región se escribe en mayúsculas sin las comillas). 111 | 112 | El nombre base del fichero es igual al del fichero de diccionario o al de 113 | afijos, sin la extensión (.dic o .aff). 114 | 115 | Por ejemplo, si ha descargado el paquete localizado para Argentina 116 | (es_AR.zip), al descomprimirlo obtendrá los ficheros 'es_AR.dic' y 117 | 'es_AR.aff'. Después de copiarlos en el directorio correspondiente, la nueva 118 | línea que deberá crear en el fichero 'dictionary.lst' es: 119 | 120 | DICT es AR es_AR 121 | 122 | c) Reinicie OpenOffice.org. 123 | 124 | Guarde y cierre todos los documentos que tenga abiertos. Si utiliza la 125 | plataforma de Microsoft Windows y tiene el inicio rápido de OpenOffice.org 126 | activado, ciérrelo también. 127 | 128 | Inicie nuevamente alguna de las aplicaciones de OpenOffice.org (cualquiera 129 | de ellas servirá). 130 | 131 | d) Configure las opciones de lingüística del programa. 132 | 133 | Ingrese nuevamente al ítem Opciones del menú Herramientas y despliegue el 134 | árbol "Configuración de idioma". 135 | 136 | Entre las opciones del ítem "Idiomas" hay una lista donde se configura el 137 | idioma occidental utilizado como idioma predeterminado para los documentos 138 | nuevos. 139 | 140 | Elija de esa lista el idioma y región que configuró en el fichero 141 | 'dictionary.lst'. Para el ejemplo utilizado sería "Español (Argentina)". 142 | 143 | Verifique que esta entrada de la lista aparezca con un pequeño tilde azul 144 | y las letras ABC a su izquierda; esto indica que existe un diccionario de 145 | corrección ortográfica instalado para esa localización. Si la marca no 146 | aparece, debe haber cometido algún error en los pasos previos (el más común es 147 | que haya dejado alguna ventana o el inicio rápido de OpenOffice.org 148 | abiertos). 149 | 150 | Si necesitara ayuda para realizar cualquiera de estos pasos, envíe un 151 | mensaje al encargado de mantenimiento del diccionario, o a las listas de 152 | correo del proyecto OpenOffice.org en español (http://es.openoffice.org/). 153 | 154 | 155 | 4. COLABORACIÓN 156 | 157 | Este diccionario es resultado del trabajo colaborativo de muchas personas. 158 | La buena noticia es que ¡usted también puede participar! 159 | 160 | ¿Tiene dudas o sugerencias? ¿Desearía ver palabras agregadas, o que se 161 | realizaran correcciones? Sólo debe contactar al encargado de mantenimiento de 162 | este diccionario, a través de su correo electrónico, quien se encargará de 163 | evacuar sus dudas, o de realizar las modificaciones necesarias para la próxima 164 | versión del diccionario. 165 | 166 | 167 | 5. AGRADECIMIENTOS 168 | 169 | Hay varias personas que han colaborado con aportes o sugerencias a la 170 | creación de este diccionario. Se agradece especialmente a: 171 | 172 | - Richard Holt. 173 | - Marcelo Garrone. 174 | - Kevin Hendricks. 175 | - Juan Rey Saura. 176 | - Carlos Dávila. 177 | - Román Gelbort. 178 | - J. Eduardo Moreno. 179 | - Gonzalo Higuera Díaz. 180 | - Ricardo Palomares Martínez. 181 | - Sergio Medina. 182 | - Ismael Olea. 183 | - Alejandro Moreno. 184 | - Alexandro Colorado. 185 | - Andrés Sánchez. 186 | - Juan Rafael Fernández García. 187 | - KNTRO 188 | - Ricardo Berlasso. 189 | - y a todos los integrantes de la comunidad en español que proponen mejoras 190 | a este diccionario. 191 | -------------------------------------------------------------------------------- /data/en-GB.aff: -------------------------------------------------------------------------------- 1 | # Affix file for British English MySpell dictionary. 2 | # Also suitable as basis for Commonwealth and European English. 3 | # Built from scratch for MySpell. Released under LGPL. 4 | # 5 | # Sources used to verify the spelling of the words 6 | # Marco Pinto included in the dictionary: 7 | # 1) Oxford Dictionaries; 8 | # 2) Collins Dictionary; 9 | # 3) Macmillan Dictionary; 10 | # 4) Wiktionary (used with caution); 11 | # 5) Wikipedia (used with caution); 12 | # 6) Physical dictionaries. 13 | # 14 | # David Bartlett, Andrew Brown, Kevin Atkinson, Marco A.G.Pinto. 15 | # R 2.34, 2016-03-01 16 | 17 | SET UTF-8 18 | 19 | TRY esiaénrtolcdugmfphbyvkw-'.zqjxSNRTLCGDMFPHBEAUYOIVKWóöâôZQJXÅçèîêàïüäñ 20 | 21 | REP 27 22 | REP f ph 23 | REP ph f 24 | REP f gh 25 | REP f ugh 26 | REP gh f 27 | REP ff ugh 28 | REP uf ough 29 | REP uff ough 30 | REP k ch 31 | REP ch k 32 | REP dg j 33 | REP j dg 34 | REP w ugh 35 | REP ness ity 36 | REP leness ility 37 | REP ness ivity 38 | REP eness ity 39 | REP og ogue 40 | REP ck qu 41 | REP ck que 42 | REP eg e.g. 43 | REP ie i.e. 44 | REP t ght 45 | REP ght t 46 | REP ok OK 47 | REP ts ce 48 | REP ce ts 49 | 50 | PFX A Y 2 51 | PFX A 0 re [^e] 52 | PFX A 0 re- e 53 | PFX a Y 1 54 | PFX a 0 mis . 55 | PFX I Y 4 56 | PFX I 0 il l 57 | PFX I 0 ir r 58 | PFX I 0 im [bmp] 59 | PFX I 0 in [^blmpr] 60 | PFX c Y 1 61 | PFX c 0 over . 62 | PFX U Y 1 63 | PFX U 0 un . 64 | PFX C Y 2 65 | PFX C 0 de [^e] 66 | PFX C 0 de- e 67 | PFX E Y 1 68 | PFX E 0 dis . 69 | PFX F Y 5 70 | PFX F 0 com [bmp] 71 | PFX F 0 co [aeiouh] 72 | PFX F 0 cor r 73 | PFX F 0 col l 74 | PFX F 0 con [^abehilmopru]. 75 | PFX K Y 1 76 | PFX K 0 pre . 77 | PFX e Y 1 78 | PFX e 0 out . 79 | PFX f Y 2 80 | PFX f 0 under [^r] 81 | PFX f 0 under- r 82 | PFX O Y 1 83 | PFX O 0 non- . 84 | PFX 4 Y 1 85 | PFX 4 0 trans . 86 | 87 | SFX V Y 15 88 | SFX V 0 tive [aio] 89 | SFX V b ptive b 90 | SFX V d sive d 91 | SFX V be ptive be 92 | SFX V e tive ce 93 | SFX V de sive de 94 | SFX V ke cative ke 95 | SFX V e ptive me 96 | SFX V e ive [st]e 97 | SFX V e ative [^bcdkmst]e 98 | SFX V 0 lative [aeiou]l 99 | SFX V 0 ative [^aeiou]l 100 | SFX V 0 ive [st] 101 | SFX V y icative y 102 | SFX V 0 ative [^abdeilosty] 103 | SFX v Y 15 104 | SFX v 0 tively [aio] 105 | SFX v b ptively b 106 | SFX v d sively d 107 | SFX v be ptively be 108 | SFX v e tively ce 109 | SFX v de sively de 110 | SFX v ke catively ke 111 | SFX v e ptively me 112 | SFX v e ively [st]e 113 | SFX v e atively [^bcdkmst]e 114 | SFX v 0 latively [aeiou]l 115 | SFX v 0 atively [^aeiou]l 116 | SFX v 0 ively [st] 117 | SFX v y icatively y 118 | SFX v 0 atively [^abdeilosty] 119 | SFX u Y 15 120 | SFX u 0 tiveness [aio] 121 | SFX u b ptiveness b 122 | SFX u d siveness d 123 | SFX u be ptiveness be 124 | SFX u e tiveness ce 125 | SFX u de siveness de 126 | SFX u ke cativeness ke 127 | SFX u e ptiveness me 128 | SFX u e iveness [st]e 129 | SFX u e ativeness [^bcdkmst]e 130 | SFX u 0 lativeness [aeiou]l 131 | SFX u 0 ativeness [^aeiou]l 132 | SFX u 0 iveness [st] 133 | SFX u y icativeness y 134 | SFX u 0 ativeness [^abdeilosty] 135 | SFX N Y 26 136 | SFX N b ption b 137 | SFX N d sion d 138 | SFX N be ption be 139 | SFX N e tion ce 140 | SFX N de sion de 141 | SFX N ke cation ke 142 | SFX N e ption ume 143 | SFX N e mation [^u]me 144 | SFX N e ion [^o]se 145 | SFX N e ition ose 146 | SFX N e ation [iou]te 147 | SFX N e ion [^iou]te 148 | SFX N e ation [^bcdkmst]e 149 | SFX N el ulsion el 150 | SFX N 0 lation [aiou]l 151 | SFX N 0 ation [^aeiou]l 152 | SFX N 0 mation [aeiou]m 153 | SFX N 0 ation [^aeiou]m 154 | SFX N er ration er 155 | SFX N 0 ation [^e]r 156 | SFX N 0 ion [sx] 157 | SFX N t ssion mit 158 | SFX N 0 ion [^m]it 159 | SFX N 0 ation [^i]t 160 | SFX N y ication y 161 | SFX N 0 ation [^bdelmrstxy] 162 | SFX n Y 28 163 | SFX n 0 tion a 164 | SFX n e tion ce 165 | SFX n ke cation ke 166 | SFX n e ation [iou]te 167 | SFX n e ion [^iou]te 168 | SFX n e ation [^ckt]e 169 | SFX n el ulsion el 170 | SFX n 0 lation [aiou]l 171 | SFX n 0 ation [^aeiou]l 172 | SFX n er ration er 173 | SFX n 0 ation [^e]r 174 | SFX n y ation py 175 | SFX n y ication [^p]y 176 | SFX n 0 ation [^aelry] 177 | SFX n 0 tions a 178 | SFX n e tions ce 179 | SFX n ke cations ke 180 | SFX n e ations [iou]te 181 | SFX n e ions [^iou]te 182 | SFX n e ations [^ckt]e 183 | SFX n el ulsions el 184 | SFX n 0 lations [aiou]l 185 | SFX n 0 ations [^aeiou]l 186 | SFX n er rations er 187 | SFX n 0 ations [^e]r 188 | SFX n y ations py 189 | SFX n y ications [^p]y 190 | SFX n 0 ations [^aelry] 191 | SFX X Y 26 192 | SFX X b ptions b 193 | SFX X d sions d 194 | SFX X be ptions be 195 | SFX X e tions ce 196 | SFX X ke cations ke 197 | SFX X de sions de 198 | SFX X e ptions ume 199 | SFX X e mations [^u]me 200 | SFX X e ions [^o]se 201 | SFX X e itions ose 202 | SFX X e ations [iou]te 203 | SFX X e ions [^iou]te 204 | SFX X e ations [^bcdkmst]e 205 | SFX X el ulsions el 206 | SFX X 0 lations [aiou]l 207 | SFX X 0 ations [^aeiou]l 208 | SFX X 0 mations [aeiou]m 209 | SFX X 0 ations [^aeiou]m 210 | SFX X er rations er 211 | SFX X 0 ations [^e]r 212 | SFX X 0 ions [sx] 213 | SFX X t ssions mit 214 | SFX X 0 ions [^m]it 215 | SFX X 0 ations [^i]t 216 | SFX X y ications y 217 | SFX X 0 ations [^bdelmrstxy] 218 | SFX x Y 40 219 | SFX x b ptional b 220 | SFX x d sional d 221 | SFX x be ptional be 222 | SFX x e tional ce 223 | SFX x ke cational ke 224 | SFX x de sional de 225 | SFX x e ional [^o]se 226 | SFX x e itional ose 227 | SFX x e ional te 228 | SFX x e ational [^bcdkst]e 229 | SFX x el ulsional el 230 | SFX x 0 lational [aiou]l 231 | SFX x 0 ational [^aeiou]l 232 | SFX x er rational er 233 | SFX x 0 ational [^e]r 234 | SFX x 0 ional [sx] 235 | SFX x 0 ional [^n]t 236 | SFX x 0 ational nt 237 | SFX x y icational y 238 | SFX x 0 ational [^bdelrstxy] 239 | SFX x b ptionally b 240 | SFX x d sionally d 241 | SFX x be ptionally be 242 | SFX x e tionally ce 243 | SFX x ke cationally ke 244 | SFX x de sionally de 245 | SFX x e ionally [^o]se 246 | SFX x e itionally ose 247 | SFX x e ionally te 248 | SFX x e ationally [^bcdkst]e 249 | SFX x el ulsionally el 250 | SFX x 0 lationally [aiou]l 251 | SFX x 0 ationally [^aeiou]l 252 | SFX x er rationally er 253 | SFX x 0 ationally [^e]r 254 | SFX x 0 ionally [sx] 255 | SFX x 0 ionally [^n]t 256 | SFX x 0 ationally nt 257 | SFX x y icationally y 258 | SFX x 0 ationally [^bdelrstxy] 259 | SFX H N 13 260 | SFX H y ieth y 261 | SFX H ree ird ree 262 | SFX H ve fth ve 263 | SFX H e th [^ev]e 264 | SFX H 0 h t 265 | SFX H 0 th [^ety] 266 | SFX H y ieths y 267 | SFX H ree irds ree 268 | SFX H ve fths ve 269 | SFX H e ths [^ev]e 270 | SFX H 0 hs t 271 | SFX H 0 ths [^ety] 272 | SFX H 0 fold . 273 | SFX Y Y 9 274 | SFX Y 0 ally ic 275 | SFX Y 0 ly [^i]c 276 | SFX Y e y [^aeiou]le 277 | SFX Y 0 ly [aeiou]le 278 | SFX Y 0 ly [^l]e 279 | SFX Y 0 y [^aeiou]l 280 | SFX Y y ily [^aeiou]y 281 | SFX Y 0 ly [aeiou][ly] 282 | SFX Y 0 ly [^cely] 283 | SFX G Y 24 284 | SFX G e ing [^eioy]e 285 | SFX G 0 ing [eoy]e 286 | SFX G ie ying ie 287 | SFX G 0 bing [^aeio][aeiou]b 288 | SFX G 0 king [^aeio][aeiou]c 289 | SFX G 0 ding [^aeio][aeiou]d 290 | SFX G 0 fing [^aeio][aeiou]f 291 | SFX G 0 ging [^aeio][aeiou]g 292 | SFX G 0 king [^aeio][aeiou]k 293 | SFX G 0 ling [^aeio][eiou]l 294 | SFX G 0 ing [aeio][eiou]l 295 | SFX G 0 ling [^aeo]al 296 | SFX G 0 ing [aeo]al 297 | SFX G 0 ming [^aeio][aeiou]m 298 | SFX G 0 ning [^aeio][aeiou]n 299 | SFX G 0 ping [^aeio][aeiou]p 300 | SFX G 0 ring [^aeio][aeiou]r 301 | SFX G 0 sing [^aeio][aeiou]s 302 | SFX G 0 ting [^aeio][aeiou]t 303 | SFX G 0 ving [^aeio][aeiou]v 304 | SFX G 0 zing [^aeio][aeiou]z 305 | SFX G 0 ing [aeio][aeiou][bcdfgkmnprstvz] 306 | SFX G 0 ing [^aeiou][bcdfgklmnprstvz] 307 | SFX G 0 ing [^ebcdfgklmnprstvz] 308 | SFX J Y 25 309 | SFX J e ings [^eioy]e 310 | SFX J 0 ings [eoy]e 311 | SFX J ie yings ie 312 | SFX J 0 bings [^aeio][aeiou]b 313 | SFX J 0 king [^aeio][aeiou]c 314 | SFX J 0 dings [^aeio][aeiou]d 315 | SFX J 0 fings [^aeio][aeiou]f 316 | SFX J 0 gings [^aeio][aeiou]g 317 | SFX J 0 kings [^aeio][aeiou]k 318 | SFX J 0 lings [^aeio][eiou]l 319 | SFX J 0 ings [aeio][eiou]l 320 | SFX J 0 lings [^aeo]al 321 | SFX J 0 ings [aeo]al 322 | SFX J 0 mings [^aeio][aeiou]m 323 | SFX J 0 nings [^aeio][aiou]n 324 | SFX J 0 pings [^aeio][aeiou]p 325 | SFX J 0 rings [^aeio][aiou]r 326 | SFX J 0 sings [^aeio][aeiou]s 327 | SFX J 0 tings [^aeio][aiou]t 328 | SFX J 0 vings [^aeio][aeiou]v 329 | SFX J 0 zings [^aeio][aeiou]z 330 | SFX J 0 ings [^aeio]e[nrt] 331 | SFX J 0 ings [aeio][aeiou][bcdfgkmnprstvz] 332 | SFX J 0 ings [^aeiou][bcdfgklmnprstvz] 333 | SFX J 0 ings [^ebcdfgklmnprstvz] 334 | SFX k Y 8 335 | SFX k e ingly [^eioy]e 336 | SFX k 0 ingly [eoy]e 337 | SFX k ie yingly ie 338 | SFX k 0 kingly [^aeio][aeiou]c 339 | SFX k 0 lingly [^aeio][aeiou]l 340 | SFX k 0 ingly [aeio][aeiou][cl] 341 | SFX k 0 ingly [^aeiou][cl] 342 | SFX k 0 ingly [^ecl] 343 | SFX D Y 25 344 | SFX D 0 d [^e]e 345 | SFX D e d ee 346 | SFX D 0 bed [^aeio][aeiou]b 347 | SFX D 0 ked [^aeio][aeiou]c 348 | SFX D 0 ded [^aeio][aeiou]d 349 | SFX D 0 fed [^aeio][aeiou]f 350 | SFX D 0 ged [^aeio][aeiou]g 351 | SFX D 0 ked [^aeio][aeiou]k 352 | SFX D 0 led [^aeio][eiou]l 353 | SFX D 0 ed [aeio][eiou]l 354 | SFX D 0 led [^aeo]al 355 | SFX D 0 ed [aeo]al 356 | SFX D 0 med [^aeio][aeiou]m 357 | SFX D 0 ned [^aeio][aeiou]n 358 | SFX D 0 ped [^aeio][aeiou]p 359 | SFX D 0 red [^aeio][aeiou]r 360 | SFX D 0 sed [^aeio][aeiou]s 361 | SFX D 0 ted [^aeio][aeiou]t 362 | SFX D 0 ved [^aeio][aeiou]v 363 | SFX D 0 zed [^aeio][aeiou]z 364 | SFX D y ied [^aeiou]y 365 | SFX D 0 ed [aeiou]y 366 | SFX D 0 ed [aeio][aeiou][bcdfgkmnprstvz] 367 | SFX D 0 ed [^aeiou][bcdfgklmnprstvz] 368 | SFX D 0 ed [^ebcdfgklmnprstvyz] 369 | SFX d Y 16 370 | SFX d 0 d e 371 | SFX d 0 ked [^aeio][aeiou]c 372 | SFX d 0 led [^aeio][aeiou]l 373 | SFX d y ied [^aeiou]y 374 | SFX d 0 ed [aeiou]y 375 | SFX d 0 ed [aeio][aeiou][cl] 376 | SFX d 0 ed [^aeiou][cl] 377 | SFX d 0 ed [^ecly] 378 | SFX d e ing [^eioy]e 379 | SFX d 0 ing [eoy]e 380 | SFX d ie ying ie 381 | SFX d 0 king [^aeio][aeiou]c 382 | SFX d 0 ling [^aeio][aeiou]l 383 | SFX d 0 ing [aeio][aeiou][cl] 384 | SFX d 0 ing [^aeiou][cl] 385 | SFX d 0 ing [^ecl] 386 | SFX h Y 22 387 | SFX h 0 dly e 388 | SFX h 0 bedly [^aeio][aeiou]b 389 | SFX h 0 kedly [^aeio][aeiou]c 390 | SFX h 0 dedly [^aeio][aeiou]d 391 | SFX h 0 fedly [^aeio][aeiou]f 392 | SFX h 0 gedly [^aeio][aeiou]g 393 | SFX h 0 kedly [^aeio][aeiou]k 394 | SFX h 0 ledly [^aeio][aeiou]l 395 | SFX h 0 medly [^aeio][aeiou]m 396 | SFX h 0 nedly [^aeio][aiou]n 397 | SFX h 0 pedly [^aeio][aeiou]p 398 | SFX h 0 redly [^aeio][aiou]r 399 | SFX h 0 sedly [^aeio][aeiou]s 400 | SFX h 0 tedly [^aeio][aiou]t 401 | SFX h 0 vedly [^aeio][aeiou]v 402 | SFX h 0 zedly [^aeio][aeiou]z 403 | SFX h 0 edly [^aeio]e[nrt] 404 | SFX h y iedly [^aeiou]y 405 | SFX h 0 edly [aeiou]y 406 | SFX h 0 edly [aeio][aeiou][bcdfgklmnprstvz] 407 | SFX h 0 edly [^aeiou][bcdfgklmnprstvz] 408 | SFX h 0 edly [^ebcdfgklmnprstvyz] 409 | SFX i Y 22 410 | SFX i 0 dness e 411 | SFX i 0 bedness [^aeio][aeiou]b 412 | SFX i 0 kedness [^aeio][aeiou]c 413 | SFX i 0 dedness [^aeio][aeiou]d 414 | SFX i 0 fedness [^aeio][aeiou]f 415 | SFX i 0 gedness [^aeio][aeiou]g 416 | SFX i 0 kedness [^aeio][aeiou]k 417 | SFX i 0 ledness [^aeio][aeiou]l 418 | SFX i 0 medness [^aeio][aeiou]m 419 | SFX i 0 nedness [^aeio][aiou]n 420 | SFX i 0 pedness [^aeio][aeiou]p 421 | SFX i 0 redness [^aeio][aiou]r 422 | SFX i 0 sedness [^aeio][aeiou]s 423 | SFX i 0 tedness [^aeio][aiou]t 424 | SFX i 0 vedness [^aeio][aeiou]v 425 | SFX i 0 zedness [^aeio][aeiou]z 426 | SFX i 0 edness [^aeio]e[nrt] 427 | SFX i y iedness [^aeiou]y 428 | SFX i 0 edness [aeiou]y 429 | SFX i 0 edness [aeio][aeiou][bcdfgklmnprstvz] 430 | SFX i 0 edness [^aeiou][bcdfgklmnprstvz] 431 | SFX i 0 edness [^ebcdfgklmnprstvyz] 432 | SFX T Y 42 433 | SFX T 0 r e 434 | SFX T 0 st e 435 | SFX T 0 ber [^aeio][aeiou]b 436 | SFX T 0 best [^aeio][aeiou]b 437 | SFX T 0 ker [^aeio][aeiou]c 438 | SFX T 0 kest [^aeio][aeiou]c 439 | SFX T 0 der [^aeio][aeiou]d 440 | SFX T 0 dest [^aeio][aeiou]d 441 | SFX T 0 fer [^aeio][aeiou]f 442 | SFX T 0 fest [^aeio][aeiou]f 443 | SFX T 0 ger [^aeio][aeiou]g 444 | SFX T 0 gest [^aeio][aeiou]g 445 | SFX T 0 ker [^aeio][aeiou]k 446 | SFX T 0 kest [^aeio][aeiou]k 447 | SFX T 0 ler [^aeio][aeiou]l 448 | SFX T 0 lest [^aeio][aeiou]l 449 | SFX T 0 mer [^aeio][aeiou]m 450 | SFX T 0 mest [^aeio][aeiou]m 451 | SFX T 0 ner [^aeio][aeiou]n 452 | SFX T 0 nest [^aeio][aeiou]n 453 | SFX T 0 per [^aeio][aeiou]p 454 | SFX T 0 pest [^aeio][aeiou]p 455 | SFX T 0 rer [^aeio][aeiou]r 456 | SFX T 0 rest [^aeio][aeiou]r 457 | SFX T 0 ser [^aeio][aeiou]s 458 | SFX T 0 sest [^aeio][aeiou]s 459 | SFX T 0 ter [^aeio][aeiou]t 460 | SFX T 0 test [^aeio][aeiou]t 461 | SFX T 0 ver [^aeio][aeiou]v 462 | SFX T 0 vest [^aeio][aeiou]v 463 | SFX T 0 zer [^aeio][aeiou]z 464 | SFX T 0 zest [^aeio][aeiou]z 465 | SFX T y ier [^aeiou]y 466 | SFX T y iest [^aeiou]y 467 | SFX T 0 er [aeiou]y 468 | SFX T 0 est [aeiou]y 469 | SFX T 0 er [aeio][aeiou][bcdfgklmnprstvz] 470 | SFX T 0 er [^aeiou][bcdfgklmnprstvz] 471 | SFX T 0 er [^ebcdfgklmnprstvyz] 472 | SFX T 0 est [aeio][aeiou][bcdfgklmnprstvz] 473 | SFX T 0 est [^aeiou][bcdfgklmnprstvz] 474 | SFX T 0 est [^ebcdfgklmnprstvyz] 475 | SFX R Y 72 476 | SFX R 0 r e 477 | SFX R 0 rs e 478 | SFX R 0 ber [^aeio][aeiou]b 479 | SFX R 0 bers [^aeio][aeiou]b 480 | SFX R 0 ker [^aeio][aeiou]c 481 | SFX R 0 kers [^aeio][aeiou]c 482 | SFX R 0 der [^aeio][aeiou]d 483 | SFX R 0 ders [^aeio][aeiou]d 484 | SFX R 0 fer [^aeio][aeiou]f 485 | SFX R 0 fers [^aeio][aeiou]f 486 | SFX R 0 ger [^aeio][aeiou]g 487 | SFX R 0 gers [^aeio][aeiou]g 488 | SFX R 0 ker [^aeio][aeiou]k 489 | SFX R 0 kers [^aeio][aeiou]k 490 | SFX R 0 ler [^aeio][eiou]l 491 | SFX R 0 er [aeio][eiou]l 492 | SFX R 0 ler [^aeo]al 493 | SFX R 0 er [aeo]al 494 | SFX R 0 lers [^aeio][eiou]l 495 | SFX R 0 ers [aeio][eiou]l 496 | SFX R 0 lers [^aeo]al 497 | SFX R 0 ers [aeo]al 498 | SFX R 0 mer [^aeio][aeiou]m 499 | SFX R 0 mers [^aeio][aeiou]m 500 | SFX R 0 ner [^aeio][aeiou]n 501 | SFX R 0 ners [^aeio][aeiou]n 502 | SFX R 0 per [^aeio][aeiou]p 503 | SFX R 0 pers [^aeio][aeiou]p 504 | SFX R 0 rer [^aeio][aeiou]r 505 | SFX R 0 rers [^aeio][aeiou]r 506 | SFX R 0 ser [^aeio][aeiou]s 507 | SFX R 0 sers [^aeio][aeiou]s 508 | SFX R 0 ter [^aeio][aeiou]t 509 | SFX R 0 ters [^aeio][aeiou]t 510 | SFX R 0 ver [^aeio][aeiou]v 511 | SFX R 0 vers [^aeio][aeiou]v 512 | SFX R 0 zer [^aeio][aeiou]z 513 | SFX R 0 zers [^aeio][aeiou]z 514 | SFX R y ier [^aeiou]y 515 | SFX R y iers [^aeiou]y 516 | SFX R 0 er [aeiou]y 517 | SFX R 0 ers [aeiou]y 518 | SFX R 0 er [aeio][aeiou][bcdfgkmnprstvz] 519 | SFX R 0 ers [aeio][aeiou][bcdfgkmnprstvz] 520 | SFX R 0 er [^aeiou][bcdfgklmnprstvz] 521 | SFX R 0 ers [^aeiou][bcdfgklmnprstvz] 522 | SFX R 0 er [^ebcdfgklmnprstvyz] 523 | SFX R 0 ers [^ebcdfgklmnprstvyz] 524 | SFX R 0 r's e 525 | SFX R 0 ber's [^aeio][aeiou]b 526 | SFX R 0 ker's [^aeio][aeiou]c 527 | SFX R 0 der's [^aeio][aeiou]d 528 | SFX R 0 fer's [^aeio][aeiou]f 529 | SFX R 0 ger's [^aeio][aeiou]g 530 | SFX R 0 ker's [^aeio][aeiou]k 531 | SFX R 0 ler's [^aeio][eiou]l 532 | SFX R 0 er's [aeio][eiou]l 533 | SFX R 0 ler's [^aeo]al 534 | SFX R 0 er's [aeo]al 535 | SFX R 0 mer's [^aeio][aeiou]m 536 | SFX R 0 ner's [^aeio][aeiou]n 537 | SFX R 0 per's [^aeio][aeiou]p 538 | SFX R 0 rer's [^aeio][aeiou]r 539 | SFX R 0 ser's [^aeio][aeiou]s 540 | SFX R 0 ter's [^aeio][aeiou]t 541 | SFX R 0 ver's [^aeio][aeiou]v 542 | SFX R 0 zer's [^aeio][aeiou]z 543 | SFX R y ier's [^aeiou]y 544 | SFX R 0 er's [aeiou]y 545 | SFX R 0 er's [aeio][aeiou][bcdfgkmnprstvz] 546 | SFX R 0 er's [^aeiou][bcdfgklmnprstvz] 547 | SFX R 0 er's [^ebcdfgklmnprstvyz] 548 | SFX r Y 24 549 | SFX r 0 r e 550 | SFX r 0 ler [^aeio][aeiou]l 551 | SFX r 0 ker [^aeio][aeiou]c 552 | SFX r y ier [^aeiou]y 553 | SFX r 0 er [aeiou]y 554 | SFX r 0 er [aeio][aeiou][cl] 555 | SFX r 0 er [^aeiou][cl] 556 | SFX r 0 er [^ecly] 557 | SFX r 0 rs e 558 | SFX r 0 lers [^aeio][aeiou]l 559 | SFX r 0 kers [^aeio][aeiou]c 560 | SFX r y iers [^aeiou]y 561 | SFX r 0 ers [aeiou]y 562 | SFX r 0 ers [aeio][aeiou][cl] 563 | SFX r 0 ers [^aeiou][cl] 564 | SFX r 0 ers [^ecly] 565 | SFX r 0 r's e 566 | SFX r 0 ler's [^aeio][aeiou]l 567 | SFX r 0 ker's [^aeio][aeiou]c 568 | SFX r y ier's [^aeiou]y 569 | SFX r 0 er's [aeiou]y 570 | SFX r 0 er's [aeio][aeiou][cl] 571 | SFX r 0 er's [^aeiou][cl] 572 | SFX r 0 er's [^ecly] 573 | SFX S Y 9 574 | SFX S y ies [^aeiou]y 575 | SFX S 0 s [aeiou]y 576 | SFX S 0 es [sxz] 577 | SFX S 0 es [cs]h 578 | SFX S 0 s [^cs]h 579 | SFX S 0 s [ae]u 580 | SFX S 0 x [ae]u 581 | SFX S 0 s [^ae]u 582 | SFX S 0 s [^hsuxyz] 583 | SFX P Y 6 584 | SFX P y iness [^aeiou]y 585 | SFX P 0 ness [aeiou]y 586 | SFX P 0 ness [^y] 587 | SFX P y iness's [^aeiou]y 588 | SFX P 0 ness's [aeiou]y 589 | SFX P 0 ness's [^y] 590 | SFX m Y 20 591 | SFX m 0 sman [bdknmt] 592 | SFX m 0 sman [aeiou][bdklmnt]e 593 | SFX m 0 man [^aeiou][bdklmnt]e 594 | SFX m 0 man [^bdklmnt]e 595 | SFX m 0 man [^bdeknmt] 596 | SFX m 0 smen [bdknmt] 597 | SFX m 0 smen [aeiou][bdklmnt]e 598 | SFX m 0 men [^aeiou][bdklmnt]e 599 | SFX m 0 men [^bdklmnt]e 600 | SFX m 0 men [^bdeknmt] 601 | SFX m 0 sman's [bdknmt] 602 | SFX m 0 sman's [aeiou][bdklmnt]e 603 | SFX m 0 man's [^aeiou][bdklmnt]e 604 | SFX m 0 man's [^bdklmnt]e 605 | SFX m 0 man's [^bdeknmt] 606 | SFX m 0 smen's [bdknmt] 607 | SFX m 0 smen's [aeiou][bdklmnt]e 608 | SFX m 0 men's [^aeiou][bdklmnt]e 609 | SFX m 0 men's [^bdklmnt]e 610 | SFX m 0 men's [^bdeknmt] 611 | SFX 5 Y 15 612 | SFX 5 0 swoman [bdknmt] 613 | SFX 5 0 swoman [aeiou][bdklmnt]e 614 | SFX 5 0 woman [^aeiou][bdklmnt]e 615 | SFX 5 0 woman [^bdklmnt]e 616 | SFX 5 0 woman [^bdeknmt] 617 | SFX 5 0 swomen [bdknmt] 618 | SFX 5 0 swomen [aeiou][bdklmnt]e 619 | SFX 5 0 women [^aeiou][bdklmnt]e 620 | SFX 5 0 women [^bdklmnt]e 621 | SFX 5 0 women [^bdeknmt] 622 | SFX 5 0 swoman's [bdknmt] 623 | SFX 5 0 swoman's [aeiou][bdklmnt]e 624 | SFX 5 0 woman's [^aeiou][bdklmnt]e 625 | SFX 5 0 woman's [^bdklmnt]e 626 | SFX 5 0 woman's [^bdeknmt] 627 | SFX 6 Y 3 628 | SFX 6 y iful [^aeiou]y 629 | SFX 6 0 ful [aeiou]y 630 | SFX 6 0 ful [^y] 631 | SFX j Y 3 632 | SFX j y ifully [^aeiou]y 633 | SFX j 0 fully [aeiou]y 634 | SFX j 0 fully [^y] 635 | SFX p Y 5 636 | SFX p y iless [^aeiou]y 637 | SFX p 0 less [aeiou]y 638 | SFX p 0 ess ll 639 | SFX p 0 less [^l]l 640 | SFX p 0 less [^ly] 641 | SFX Q Y 44 642 | SFX Q 0 tise a 643 | SFX Q e ise [^l]e 644 | SFX Q le ilise [^aeiou]le 645 | SFX Q e ise [aeiou]le 646 | SFX Q um ise um 647 | SFX Q 0 ise [^u]m 648 | SFX Q s se is 649 | SFX Q 0 ise [^i]s 650 | SFX Q y ise [^aeiou]y 651 | SFX Q 0 ise [aeiou]y 652 | SFX Q 0 ise [^aemsy] 653 | SFX Q 0 tises a 654 | SFX Q e ises [^l]e 655 | SFX Q le ilises [^aeiou]le 656 | SFX Q e ises [aeiou]le 657 | SFX Q um ises um 658 | SFX Q 0 ises [^u]m 659 | SFX Q s ses is 660 | SFX Q 0 ises [^i]s 661 | SFX Q y ises [^aeiou]y 662 | SFX Q 0 ises [aeiou]y 663 | SFX Q 0 ises [^aemsy] 664 | SFX Q 0 tised a 665 | SFX Q e ised [^l]e 666 | SFX Q le ilised [^aeiou]le 667 | SFX Q e ised [aeiou]le 668 | SFX Q um ised um 669 | SFX Q 0 ised [^u]m 670 | SFX Q s sed is 671 | SFX Q 0 ised [^i]s 672 | SFX Q y ised [^aeiou]y 673 | SFX Q 0 ised [aeiou]y 674 | SFX Q 0 ised [^aemsy] 675 | SFX Q 0 tising a 676 | SFX Q e ising [^l]e 677 | SFX Q le ilising [^aeiou]le 678 | SFX Q e ising [aeiou]le 679 | SFX Q um ising um 680 | SFX Q 0 ising [^u]m 681 | SFX Q s sing is 682 | SFX Q 0 ising [^i]s 683 | SFX Q y ising [^aeiou]y 684 | SFX Q 0 ising [aeiou]y 685 | SFX Q 0 ising [^aemsy] 686 | SFX 8 Y 44 687 | SFX 8 0 tize a 688 | SFX 8 e ize [^l]e 689 | SFX 8 le ilize [^aeiou]le 690 | SFX 8 e ize [aeiou]le 691 | SFX 8 um ize um 692 | SFX 8 0 ize [^u]m 693 | SFX 8 s ze is 694 | SFX 8 0 ize [^i]s 695 | SFX 8 y ize [^aeiou]y 696 | SFX 8 0 ize [aeiou]y 697 | SFX 8 0 ize [^aemsy] 698 | SFX 8 0 tizes a 699 | SFX 8 e izes [^l]e 700 | SFX 8 le ilizes [^aeiou]le 701 | SFX 8 e izes [aeiou]le 702 | SFX 8 um izes um 703 | SFX 8 0 izes [^u]m 704 | SFX 8 s zes is 705 | SFX 8 0 izes [^i]s 706 | SFX 8 y izes [^aeiou]y 707 | SFX 8 0 izes [aeiou]y 708 | SFX 8 0 izes [^aemsy] 709 | SFX 8 0 tized a 710 | SFX 8 e ized [^l]e 711 | SFX 8 le ilized [^aeiou]le 712 | SFX 8 e ized [aeiou]le 713 | SFX 8 um ized um 714 | SFX 8 0 ized [^u]m 715 | SFX 8 s zed is 716 | SFX 8 0 ized [^i]s 717 | SFX 8 y ized [^aeiou]y 718 | SFX 8 0 ized [aeiou]y 719 | SFX 8 0 ized [^aemsy] 720 | SFX 8 0 tizing a 721 | SFX 8 e izing [^l]e 722 | SFX 8 le ilizing [^aeiou]le 723 | SFX 8 e izing [aeiou]le 724 | SFX 8 um izing um 725 | SFX 8 0 izing [^u]m 726 | SFX 8 s zing is 727 | SFX 8 0 izing [^i]s 728 | SFX 8 y izing [^aeiou]y 729 | SFX 8 0 izing [aeiou]y 730 | SFX 8 0 izing [^aemsy] 731 | SFX q Y 22 732 | SFX q 0 tisation a 733 | SFX q e isation [^l]e 734 | SFX q le ilisation [^aeiou]le 735 | SFX q e isation [aeiou]le 736 | SFX q um isation um 737 | SFX q 0 isation [^u]m 738 | SFX q s sation is 739 | SFX q 0 isation [^i]s 740 | SFX q y isation [^aeiou]y 741 | SFX q 0 isation [aeiou]y 742 | SFX q 0 isation [^aemsy] 743 | SFX q 0 tisations a 744 | SFX q e isations [^l]e 745 | SFX q le ilisations [^aeiou]le 746 | SFX q e isations [aeiou]le 747 | SFX q um isations um 748 | SFX q 0 isations [^u]m 749 | SFX q s sations is 750 | SFX q 0 isations [^i]s 751 | SFX q y isations [^aeiou]y 752 | SFX q 0 isations [aeiou]y 753 | SFX q 0 isations [^aemsy] 754 | SFX - Y 22 755 | SFX - 0 tization a 756 | SFX - e ization [^l]e 757 | SFX - le ilization [^aeiou]le 758 | SFX - e ization [aeiou]le 759 | SFX - um ization um 760 | SFX - 0 ization [^u]m 761 | SFX - s zation is 762 | SFX - 0 ization [^i]s 763 | SFX - y ization [^aeiou]y 764 | SFX - 0 ization [aeiou]y 765 | SFX - 0 ization [^aemsy] 766 | SFX - 0 tizations a 767 | SFX - e izations [^l]e 768 | SFX - le ilizations [^aeiou]le 769 | SFX - e izations [aeiou]le 770 | SFX - um izations um 771 | SFX - 0 izations [^u]m 772 | SFX - s zations is 773 | SFX - 0 izations [^i]s 774 | SFX - y izations [^aeiou]y 775 | SFX - 0 izations [aeiou]y 776 | SFX - 0 izations [^aemsy] 777 | SFX s Y 33 778 | SFX s 0 tiser a 779 | SFX s e iser [^l]e 780 | SFX s le iliser [^aeiou]le 781 | SFX s e iser [aeiou]le 782 | SFX s um iser um 783 | SFX s 0 iser [^u]m 784 | SFX s s ser is 785 | SFX s 0 iser [^i]s 786 | SFX s y iser [^aeiou]y 787 | SFX s 0 iser [aeiou]y 788 | SFX s 0 iser [^aemsy] 789 | SFX s 0 tisers a 790 | SFX s e isers [^l]e 791 | SFX s le ilisers [^aeiou]le 792 | SFX s e isers [aeiou]le 793 | SFX s um isers um 794 | SFX s 0 isers [^u]m 795 | SFX s s sers is 796 | SFX s 0 isers [^i]s 797 | SFX s y isers [^aeiou]y 798 | SFX s 0 isers [aeiou]y 799 | SFX s 0 isers [^aemsy] 800 | SFX s 0 tiser's a 801 | SFX s e iser's [^l]e 802 | SFX s le iliser's [^aeiou]le 803 | SFX s e iser's [aeiou]le 804 | SFX s um iser's um 805 | SFX s 0 iser's [^u]m 806 | SFX s s ser's is 807 | SFX s 0 iser's [^i]s 808 | SFX s y iser's [^aeiou]y 809 | SFX s 0 iser's [aeiou]y 810 | SFX s 0 iser's [^aemsy] 811 | SFX 9 Y 33 812 | SFX 9 0 tizer a 813 | SFX 9 e izer [^l]e 814 | SFX 9 le ilizer [^aeiou]le 815 | SFX 9 e izer [aeiou]le 816 | SFX 9 um izer um 817 | SFX 9 0 izer [^u]m 818 | SFX 9 s zer is 819 | SFX 9 0 izer [^i]s 820 | SFX 9 y izer [^aeiou]y 821 | SFX 9 0 izer [aeiou]y 822 | SFX 9 0 izer [^aemsy] 823 | SFX 9 0 tizers a 824 | SFX 9 e izers [^l]e 825 | SFX 9 le ilizers [^aeiou]le 826 | SFX 9 e izers [aeiou]le 827 | SFX 9 um izers um 828 | SFX 9 0 izers [^u]m 829 | SFX 9 s zers is 830 | SFX 9 0 izers [^i]s 831 | SFX 9 y izers [^aeiou]y 832 | SFX 9 0 izers [aeiou]y 833 | SFX 9 0 izers [^aemsy] 834 | SFX 9 0 tizer's a 835 | SFX 9 e izer's [^l]e 836 | SFX 9 le ilizer's [^aeiou]le 837 | SFX 9 e izer's [aeiou]le 838 | SFX 9 um izer's um 839 | SFX 9 0 izer's [^u]m 840 | SFX 9 s zer's is 841 | SFX 9 0 izer's [^i]s 842 | SFX 9 y izer's [^aeiou]y 843 | SFX 9 0 izer's [aeiou]y 844 | SFX 9 0 izer's [^aemsy] 845 | SFX t Y 22 846 | SFX t 0 tisable a 847 | SFX t e isable [^l]e 848 | SFX t le ilisable [^aeiou]le 849 | SFX t e isable [aeiou]le 850 | SFX t um isable um 851 | SFX t 0 isable [^u]m 852 | SFX t s sable is 853 | SFX t 0 isable [^i]s 854 | SFX t y isable [^aeiou]y 855 | SFX t 0 isable [aeiou]y 856 | SFX t 0 isable [^aemsy] 857 | SFX t 0 tisability a 858 | SFX t e isability [^l]e 859 | SFX t le ilisability [^aeiou]le 860 | SFX t e isability [aeiou]le 861 | SFX t um isability um 862 | SFX t 0 isability [^u]m 863 | SFX t s sability is 864 | SFX t 0 isability [^i]s 865 | SFX t y isability [^aeiou]y 866 | SFX t 0 isability [aeiou]y 867 | SFX t 0 isability [^aemsy] 868 | SFX + Y 22 869 | SFX + 0 tizable a 870 | SFX + e izable [^l]e 871 | SFX + le ilizable [^aeiou]le 872 | SFX + e izable [aeiou]le 873 | SFX + um izable um 874 | SFX + 0 izable [^u]m 875 | SFX + s zable is 876 | SFX + 0 izable [^i]s 877 | SFX + y izable [^aeiou]y 878 | SFX + 0 izable [aeiou]y 879 | SFX + 0 izable [^aemsy] 880 | SFX + 0 tizability a 881 | SFX + e izability [^l]e 882 | SFX + le ilizability [^aeiou]le 883 | SFX + e izability [aeiou]le 884 | SFX + um izability um 885 | SFX + 0 izability [^u]m 886 | SFX + s zability is 887 | SFX + 0 izability [^i]s 888 | SFX + y izability [^aeiou]y 889 | SFX + 0 izability [aeiou]y 890 | SFX + 0 izability [^aemsy] 891 | SFX M Y 1 892 | SFX M 0 's . 893 | SFX B Y 48 894 | SFX B e able [^acegilotu]e 895 | SFX B 0 able [acegilou]e 896 | SFX B te ble ate 897 | SFX B e able [^a]te 898 | SFX B 0 bable [^aeio][aeiou]b 899 | SFX B 0 kable [^aeio][aeiou]c 900 | SFX B 0 dable [^aeio][aeiou]d 901 | SFX B 0 fable [^aeio][aeiou]f 902 | SFX B 0 gable [^aeio][aeiou]g 903 | SFX B 0 kable [^aeio][aeiou]k 904 | SFX B 0 lable [^aeio][aeiou]l 905 | SFX B 0 mable [^aeio][aeiou]m 906 | SFX B 0 nable [^aeio][aeiou]n 907 | SFX B 0 pable [^aeio][aeiou]p 908 | SFX B 0 rable [^aeio][aeiou]r 909 | SFX B 0 sable [^aeio][aeiou]s 910 | SFX B 0 table [^aeio][aeiou]t 911 | SFX B 0 vable [^aeio][aeiou]v 912 | SFX B 0 zable [^aeio][aeiou]z 913 | SFX B 0 able [aeio][aeiou][bcdfgklmnprstvz] 914 | SFX B 0 able [^aeiou][bcdfgklmnprstvz] 915 | SFX B y iable [^aeiou]y 916 | SFX B 0 able [aeiou]y 917 | SFX B 0 able [^ebcdfgklmnprstvzy] 918 | SFX B e ability [^acegilotu]e 919 | SFX B 0 ability [acegilou]e 920 | SFX B te bility ate 921 | SFX B e ability [^a]te 922 | SFX B 0 bability [^aeio][aeiou]b 923 | SFX B 0 kability [^aeio][aeiou]c 924 | SFX B 0 dability [^aeio][aeiou]d 925 | SFX B 0 fability [^aeio][aeiou]f 926 | SFX B 0 gability [^aeio][aeiou]g 927 | SFX B 0 kability [^aeio][aeiou]k 928 | SFX B 0 lability [^aeio][aeiou]l 929 | SFX B 0 mability [^aeio][aeiou]m 930 | SFX B 0 nability [^aeio][aeiou]n 931 | SFX B 0 pability [^aeio][aeiou]p 932 | SFX B 0 rability [^aeio][aeiou]r 933 | SFX B 0 sability [^aeio][aeiou]s 934 | SFX B 0 tability [^aeio][aeiou]t 935 | SFX B 0 vability [^aeio][aeiou]v 936 | SFX B 0 zability [^aeio][aeiou]z 937 | SFX B 0 ability [aeio][aeiou][bcdfgklmnprstvz] 938 | SFX B 0 ability [^aeiou][bcdfgklmnprstvz] 939 | SFX B y iability [^aeiou]y 940 | SFX B 0 ability [aeiou]y 941 | SFX B 0 ability [^ebcdfgklmnprstvzy] 942 | SFX 7 Y 9 943 | SFX 7 e able [acegilou]e 944 | SFX 7 0 able [^acegilou]e 945 | SFX 7 0 kable [^aeio][aeiou]c 946 | SFX 7 0 lable [^aeio][aeiou]l 947 | SFX 7 0 able [aeio][aeiou][cl] 948 | SFX 7 0 able [^aeiou][cl] 949 | SFX 7 y iable [^aeiou]y 950 | SFX 7 0 able [aeiou]y 951 | SFX 7 0 able [^cely] 952 | SFX g Y 9 953 | SFX g e ability [^acegilou]e 954 | SFX g 0 ability [acegilou]e 955 | SFX g 0 kability [^aeio][aeiou]c 956 | SFX g 0 lability [^aeio][aeiou]l 957 | SFX g 0 ability [aeio][aeiou][cl] 958 | SFX g 0 ability [^aeiou][cl] 959 | SFX g y iability [^aeiou]y 960 | SFX g 0 ability [aeiou]y 961 | SFX g 0 ability [^cely] 962 | SFX l Y 9 963 | SFX l e ably [^acegilou]e 964 | SFX l 0 ably [acegilou]e 965 | SFX l 0 kably [^aeio][aeiou]c 966 | SFX l 0 lably [^aeio][aeiou]l 967 | SFX l 0 ably [aeio][aeiou][cl] 968 | SFX l 0 ably [^aeiou][cl] 969 | SFX l y iably [^aeiou]y 970 | SFX l 0 ably [aeiou]y 971 | SFX l 0 ably [^cely] 972 | SFX b Y 3 973 | SFX b e ible [^aeiou]e 974 | SFX b 0 ible [aeiou]e 975 | SFX b 0 ible [^e] 976 | SFX L Y 12 977 | SFX L 0 ament m 978 | SFX L y iment [^aeiou]y 979 | SFX L 0 ment [aeiou]y 980 | SFX L 0 ment [^my] 981 | SFX L 0 aments m 982 | SFX L y iments [^aeiou]y 983 | SFX L 0 ments [aeiou]y 984 | SFX L 0 ments [^my] 985 | SFX L 0 ament's m 986 | SFX L y iment's [^aeiou]y 987 | SFX L 0 ment's [aeiou]y 988 | SFX L 0 ment's [^my] 989 | SFX Z Y 22 990 | SFX Z e y [^aeiouy]e 991 | SFX Z 0 y [aeiouy]e 992 | SFX Z 0 ey [aiouy] 993 | SFX Z 0 by [^aeio][aeiou]b 994 | SFX Z 0 ky [^aeio][aeiou]c 995 | SFX Z 0 dy [^aeio][aeiou]d 996 | SFX Z 0 fy [^aeio][aeiou]f 997 | SFX Z 0 gy [^aeio][aeiou]g 998 | SFX Z 0 ky [^aeio][aeiou]k 999 | SFX Z 0 ly [^aeio][aeiou]l 1000 | SFX Z 0 my [^aeio][aeiou]m 1001 | SFX Z 0 ny [^aeio][aiou]n 1002 | SFX Z 0 py [^aeio][aeiou]p 1003 | SFX Z 0 ry [^aeio][aiou]r 1004 | SFX Z 0 sy [^aeio][aeiou]s 1005 | SFX Z 0 ty [^aeio][aiou]t 1006 | SFX Z 0 vy [^aeio][aeiou]v 1007 | SFX Z 0 zy [^aeio][aeiou]z 1008 | SFX Z 0 y [^aeio]e[nrt] 1009 | SFX Z 0 y [aeio][aeiou][bcdfgklmnprstvz] 1010 | SFX Z 0 y [^aeiou][bcdfgklmnprstvz] 1011 | SFX Z 0 y [^aebcdfgiklmnoprstuvyz] 1012 | SFX 2 Y 21 1013 | SFX 2 e iness [^aeiouy]e 1014 | SFX 2 0 iness [aeiouy]e 1015 | SFX 2 0 biness [^aeio][aeiou]b 1016 | SFX 2 0 kiness [^aeio][aeiou]c 1017 | SFX 2 0 diness [^aeio][aeiou]d 1018 | SFX 2 0 finess [^aeio][aeiou]f 1019 | SFX 2 0 giness [^aeio][aeiou]g 1020 | SFX 2 0 kiness [^aeio][aeiou]k 1021 | SFX 2 0 liness [^aeio][aeiou]l 1022 | SFX 2 0 miness [^aeio][aeiou]m 1023 | SFX 2 0 niness [^aeio][aiou]n 1024 | SFX 2 0 piness [^aeio][aeiou]p 1025 | SFX 2 0 riness [^aeio][aiou]r 1026 | SFX 2 0 siness [^aeio][aeiou]s 1027 | SFX 2 0 tiness [^aeio][aiou]t 1028 | SFX 2 0 viness [^aeio][aeiou]v 1029 | SFX 2 0 ziness [^aeio][aeiou]z 1030 | SFX 2 0 iness [^aeio]e[nrt] 1031 | SFX 2 0 iness [aeio][aeiou][bcdfgklmnprstvz] 1032 | SFX 2 0 iness [^aeiou][bcdfgklmnprstvz] 1033 | SFX 2 0 iness [^ebcdfgklmnprstvz] 1034 | SFX z Y 24 1035 | SFX z e ily [^aeiouy]e 1036 | SFX z 0 ily [aeiouy]e 1037 | SFX z 0 ily [aiou]y 1038 | SFX z ey ily ey 1039 | SFX z y ily [^aeiou]y 1040 | SFX z 0 bily [^aeio][aeiou]b 1041 | SFX z 0 kily [^aeio][aeiou]c 1042 | SFX z 0 dily [^aeio][aeiou]d 1043 | SFX z 0 fily [^aeio][aeiou]f 1044 | SFX z 0 gily [^aeio][aeiou]g 1045 | SFX z 0 kily [^aeio][aeiou]k 1046 | SFX z 0 lily [^aeio][aeiou]l 1047 | SFX z 0 mily [^aeio][aeiou]m 1048 | SFX z 0 nily [^aeio][aiou]n 1049 | SFX z 0 pily [^aeio][aeiou]p 1050 | SFX z 0 rily [^aeio][aiou]r 1051 | SFX z 0 sily [^aeio][aeiou]s 1052 | SFX z 0 tily [^aeio][aiou]t 1053 | SFX z 0 vily [^aeio][aeiou]v 1054 | SFX z 0 zily [^aeio][aeiou]z 1055 | SFX z 0 ily [^aeio]e[nrt] 1056 | SFX z 0 ily [aeio][aeiou][bcdfgklmnprstvyz] 1057 | SFX z 0 ily [^aeiou][bcdfgklmnprstvyz] 1058 | SFX z 0 ily [^ebcdfgklmnprstvyz] 1059 | SFX y Y 15 1060 | SFX y e ory te 1061 | SFX y e atory [mr]e 1062 | SFX y e ary se 1063 | SFX y 0 ry [^mrst]e 1064 | SFX y 0 ory [^aeous]t 1065 | SFX y 0 ry [aeous]t 1066 | SFX y 0 ery h 1067 | SFX y 0 atory [^i]m 1068 | SFX y im matory im 1069 | SFX y 0 ory s 1070 | SFX y 0 ary ion 1071 | SFX y 0 ry [^i]on 1072 | SFX y 0 nery [aiu]n 1073 | SFX y 0 ry [^aiou]n 1074 | SFX y 0 ry [^ehmstn] 1075 | SFX O Y 12 1076 | SFX O 0 l a 1077 | SFX O e al [^bcgv]e 1078 | SFX O e ial [bcgv]e 1079 | SFX O 0 ial [bcrx] 1080 | SFX O um al um 1081 | SFX O 0 al [^u]m 1082 | SFX O y al ty 1083 | SFX O y ial [^t]y 1084 | SFX O 0 ual [px]t 1085 | SFX O 0 tal [iu]t 1086 | SFX O 0 al [^ipux]t 1087 | SFX O 0 al [^aebcrtxmy] 1088 | SFX o Y 12 1089 | SFX o 0 lly a 1090 | SFX o e ally [^bcgv]e 1091 | SFX o e ially [bcgv]e 1092 | SFX o 0 ially [bcrx] 1093 | SFX o um ally um 1094 | SFX o 0 ally [^u]m 1095 | SFX o y ally ty 1096 | SFX o y ially [^t]y 1097 | SFX o 0 ually [px]t 1098 | SFX o 0 tally [iu]t 1099 | SFX o 0 ally [^ipux]t 1100 | SFX o 0 ally [^aebcrtxmy] 1101 | SFX W Y 21 1102 | SFX W ce tific ce 1103 | SFX W e atic me 1104 | SFX W se tic se 1105 | SFX W le ic ble 1106 | SFX W e ic [^b]le 1107 | SFX W e ic [^clms]e 1108 | SFX W 0 lic [ay]l 1109 | SFX W 0 ic [^ay]l 1110 | SFX W us ic us 1111 | SFX W 0 tic [^u]s 1112 | SFX W er ric er 1113 | SFX W 0 ic [^e]r 1114 | SFX W 0 atic [aeiou]m 1115 | SFX W 0 ic [^aeiou]m 1116 | SFX W 0 tic ma 1117 | SFX W a ic [^m]a 1118 | SFX W y etic thy 1119 | SFX W y ic [^t]hy 1120 | SFX W y tic sy 1121 | SFX W y ic [^hs]y 1122 | SFX W 0 ic [^aelmrsy] 1123 | SFX w Y 9 1124 | SFX w e ical e 1125 | SFX w er rical er 1126 | SFX w 0 ical [^e]r 1127 | SFX w 0 atical [aeiou]m 1128 | SFX w 0 ical [^aeiou]m 1129 | SFX w 0 tical ma 1130 | SFX w a ical [^m]a 1131 | SFX w y ical y 1132 | SFX w 0 ical [^aemry] 1133 | SFX 1 Y 9 1134 | SFX 1 e ically e 1135 | SFX 1 er rically er 1136 | SFX 1 0 ically [^e]r 1137 | SFX 1 0 atically [aeiou]m 1138 | SFX 1 0 ically [^aeiou]m 1139 | SFX 1 0 tically ma 1140 | SFX 1 a ically [^m]a 1141 | SFX 1 y ically y 1142 | SFX 1 0 ically [^aemry] 1143 | SFX 3 Y 21 1144 | SFX 3 e ist [^aceiou]e 1145 | SFX 3 ce tist ce 1146 | SFX 3 0 ist [aeiou]e 1147 | SFX 3 y ist [^aeioubp]y 1148 | SFX 3 0 ist [aeioubp]y 1149 | SFX 3 o ist o 1150 | SFX 3 0 ists [^eoy] 1151 | SFX 3 e ists [^aceiou]e 1152 | SFX 3 ce tists ce 1153 | SFX 3 0 ists [aeiou]e 1154 | SFX 3 y ists [^aeioubp]y 1155 | SFX 3 0 ists [aeioubp]y 1156 | SFX 3 o ists o 1157 | SFX 3 0 ists [^eoy] 1158 | SFX 3 e ist's [^aceiou]e 1159 | SFX 3 ce tist's ce 1160 | SFX 3 0 ist's [aeiou]e 1161 | SFX 3 y ist's [^aeioubp]y 1162 | SFX 3 0 ist's [aeioubp]y 1163 | SFX 3 o ist's o 1164 | SFX 3 0 ist's [^eoy] 1165 | -------------------------------------------------------------------------------- /data/en_AU.aff: -------------------------------------------------------------------------------- 1 | SET UTF-8 2 | TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ' 3 | ICONV 1 4 | ICONV ’ ' 5 | NOSUGGEST ! 6 | 7 | # ordinal numbers 8 | COMPOUNDMIN 1 9 | # only in compounds: 1th, 2th, 3th 10 | ONLYINCOMPOUND c 11 | # compound rules: 12 | # 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.) 13 | # 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.) 14 | COMPOUNDRULE 2 15 | COMPOUNDRULE n*1t 16 | COMPOUNDRULE n*mp 17 | WORDCHARS 0123456789 18 | 19 | PFX A Y 1 20 | PFX A 0 re . 21 | 22 | PFX I Y 1 23 | PFX I 0 in . 24 | 25 | PFX U Y 1 26 | PFX U 0 un . 27 | 28 | PFX C Y 1 29 | PFX C 0 de . 30 | 31 | PFX E Y 1 32 | PFX E 0 dis . 33 | 34 | PFX F Y 1 35 | PFX F 0 con . 36 | 37 | PFX K Y 1 38 | PFX K 0 pro . 39 | 40 | SFX V N 2 41 | SFX V e ive e 42 | SFX V 0 ive [^e] 43 | 44 | SFX N Y 3 45 | SFX N e ion e 46 | SFX N y ication y 47 | SFX N 0 en [^ey] 48 | 49 | SFX X Y 3 50 | SFX X e ions e 51 | SFX X y ications y 52 | SFX X 0 ens [^ey] 53 | 54 | SFX H N 2 55 | SFX H y ieth y 56 | SFX H 0 th [^y] 57 | 58 | SFX Y Y 1 59 | SFX Y 0 ly . 60 | 61 | SFX G Y 2 62 | SFX G e ing e 63 | SFX G 0 ing [^e] 64 | 65 | SFX J Y 2 66 | SFX J e ings e 67 | SFX J 0 ings [^e] 68 | 69 | SFX D Y 4 70 | SFX D 0 d e 71 | SFX D y ied [^aeiou]y 72 | SFX D 0 ed [^ey] 73 | SFX D 0 ed [aeiou]y 74 | 75 | SFX T N 4 76 | SFX T 0 st e 77 | SFX T y iest [^aeiou]y 78 | SFX T 0 est [aeiou]y 79 | SFX T 0 est [^ey] 80 | 81 | SFX R Y 4 82 | SFX R 0 r e 83 | SFX R y ier [^aeiou]y 84 | SFX R 0 er [aeiou]y 85 | SFX R 0 er [^ey] 86 | 87 | SFX Z Y 4 88 | SFX Z 0 rs e 89 | SFX Z y iers [^aeiou]y 90 | SFX Z 0 ers [aeiou]y 91 | SFX Z 0 ers [^ey] 92 | 93 | SFX S Y 4 94 | SFX S y ies [^aeiou]y 95 | SFX S 0 s [aeiou]y 96 | SFX S 0 es [sxzh] 97 | SFX S 0 s [^sxzhy] 98 | 99 | SFX P Y 3 100 | SFX P y iness [^aeiou]y 101 | SFX P 0 ness [aeiou]y 102 | SFX P 0 ness [^y] 103 | 104 | SFX M Y 1 105 | SFX M 0 's . 106 | 107 | SFX B Y 3 108 | SFX B 0 able [^aeiou] 109 | SFX B 0 able ee 110 | SFX B e able [^aeiou]e 111 | 112 | SFX L Y 1 113 | SFX L 0 ment . 114 | 115 | REP 90 116 | REP a ei 117 | REP ei a 118 | REP a ey 119 | REP ey a 120 | REP ai ie 121 | REP ie ai 122 | REP alot a_lot 123 | REP are air 124 | REP are ear 125 | REP are eir 126 | REP air are 127 | REP air ere 128 | REP ere air 129 | REP ere ear 130 | REP ere eir 131 | REP ear are 132 | REP ear air 133 | REP ear ere 134 | REP eir are 135 | REP eir ere 136 | REP ch te 137 | REP te ch 138 | REP ch ti 139 | REP ti ch 140 | REP ch tu 141 | REP tu ch 142 | REP ch s 143 | REP s ch 144 | REP ch k 145 | REP k ch 146 | REP f ph 147 | REP ph f 148 | REP gh f 149 | REP f gh 150 | REP i igh 151 | REP igh i 152 | REP i uy 153 | REP uy i 154 | REP i ee 155 | REP ee i 156 | REP j di 157 | REP di j 158 | REP j gg 159 | REP gg j 160 | REP j ge 161 | REP ge j 162 | REP s ti 163 | REP ti s 164 | REP s ci 165 | REP ci s 166 | REP k cc 167 | REP cc k 168 | REP k qu 169 | REP qu k 170 | REP kw qu 171 | REP o eau 172 | REP eau o 173 | REP o ew 174 | REP ew o 175 | REP oo ew 176 | REP ew oo 177 | REP ew ui 178 | REP ui ew 179 | REP oo ui 180 | REP ui oo 181 | REP ew u 182 | REP u ew 183 | REP oo u 184 | REP u oo 185 | REP u oe 186 | REP oe u 187 | REP u ieu 188 | REP ieu u 189 | REP ue ew 190 | REP ew ue 191 | REP uff ough 192 | REP oo ieu 193 | REP ieu oo 194 | REP ier ear 195 | REP ear ier 196 | REP ear air 197 | REP air ear 198 | REP w qu 199 | REP qu w 200 | REP z ss 201 | REP ss z 202 | REP shun tion 203 | REP shun sion 204 | REP shun cion 205 | REP size cise 206 | -------------------------------------------------------------------------------- /data/en_US-large.aff: -------------------------------------------------------------------------------- 1 | SET UTF8 2 | TRY esianrtolcdugmphbyfvkwzESIANRTOLCDUGMPHBYFVKWZ' 3 | ICONV 1 4 | ICONV ’ ' 5 | NOSUGGEST ! 6 | 7 | # ordinal numbers 8 | COMPOUNDMIN 1 9 | # only in compounds: 1th, 2th, 3th 10 | ONLYINCOMPOUND c 11 | # compound rules: 12 | # 1. [0-9]*1[0-9]th (10th, 11th, 12th, 56714th, etc.) 13 | # 2. [0-9]*[02-9](1st|2nd|3rd|[4-9]th) (21st, 22nd, 123rd, 1234th, etc.) 14 | COMPOUNDRULE 2 15 | COMPOUNDRULE n*1t 16 | COMPOUNDRULE n*mp 17 | WORDCHARS 0123456789 18 | 19 | PFX A Y 1 20 | PFX A 0 re . 21 | 22 | PFX I Y 1 23 | PFX I 0 in . 24 | 25 | PFX U Y 1 26 | PFX U 0 un . 27 | 28 | PFX C Y 1 29 | PFX C 0 de . 30 | 31 | PFX E Y 1 32 | PFX E 0 dis . 33 | 34 | PFX F Y 1 35 | PFX F 0 con . 36 | 37 | PFX K Y 1 38 | PFX K 0 pro . 39 | 40 | SFX V N 2 41 | SFX V e ive e 42 | SFX V 0 ive [^e] 43 | 44 | SFX N Y 3 45 | SFX N e ion e 46 | SFX N y ication y 47 | SFX N 0 en [^ey] 48 | 49 | SFX X Y 3 50 | SFX X e ions e 51 | SFX X y ications y 52 | SFX X 0 ens [^ey] 53 | 54 | SFX H N 2 55 | SFX H y ieth y 56 | SFX H 0 th [^y] 57 | 58 | SFX Y Y 1 59 | SFX Y 0 ly . 60 | 61 | SFX G Y 2 62 | SFX G e ing e 63 | SFX G 0 ing [^e] 64 | 65 | SFX J Y 2 66 | SFX J e ings e 67 | SFX J 0 ings [^e] 68 | 69 | SFX D Y 4 70 | SFX D 0 d e 71 | SFX D y ied [^aeiou]y 72 | SFX D 0 ed [^ey] 73 | SFX D 0 ed [aeiou]y 74 | 75 | SFX T N 4 76 | SFX T 0 st e 77 | SFX T y iest [^aeiou]y 78 | SFX T 0 est [aeiou]y 79 | SFX T 0 est [^ey] 80 | 81 | SFX R Y 4 82 | SFX R 0 r e 83 | SFX R y ier [^aeiou]y 84 | SFX R 0 er [aeiou]y 85 | SFX R 0 er [^ey] 86 | 87 | SFX Z Y 4 88 | SFX Z 0 rs e 89 | SFX Z y iers [^aeiou]y 90 | SFX Z 0 ers [aeiou]y 91 | SFX Z 0 ers [^ey] 92 | 93 | SFX S Y 4 94 | SFX S y ies [^aeiou]y 95 | SFX S 0 s [aeiou]y 96 | SFX S 0 es [sxzh] 97 | SFX S 0 s [^sxzhy] 98 | 99 | SFX P Y 3 100 | SFX P y iness [^aeiou]y 101 | SFX P 0 ness [aeiou]y 102 | SFX P 0 ness [^y] 103 | 104 | SFX M Y 1 105 | SFX M 0 's . 106 | 107 | SFX B Y 3 108 | SFX B 0 able [^aeiou] 109 | SFX B 0 able ee 110 | SFX B e able [^aeiou]e 111 | 112 | SFX L Y 1 113 | SFX L 0 ment . 114 | 115 | REP 90 116 | REP a ei 117 | REP ei a 118 | REP a ey 119 | REP ey a 120 | REP ai ie 121 | REP ie ai 122 | REP alot a_lot 123 | REP are air 124 | REP are ear 125 | REP are eir 126 | REP air are 127 | REP air ere 128 | REP ere air 129 | REP ere ear 130 | REP ere eir 131 | REP ear are 132 | REP ear air 133 | REP ear ere 134 | REP eir are 135 | REP eir ere 136 | REP ch te 137 | REP te ch 138 | REP ch ti 139 | REP ti ch 140 | REP ch tu 141 | REP tu ch 142 | REP ch s 143 | REP s ch 144 | REP ch k 145 | REP k ch 146 | REP f ph 147 | REP ph f 148 | REP gh f 149 | REP f gh 150 | REP i igh 151 | REP igh i 152 | REP i uy 153 | REP uy i 154 | REP i ee 155 | REP ee i 156 | REP j di 157 | REP di j 158 | REP j gg 159 | REP gg j 160 | REP j ge 161 | REP ge j 162 | REP s ti 163 | REP ti s 164 | REP s ci 165 | REP ci s 166 | REP k cc 167 | REP cc k 168 | REP k qu 169 | REP qu k 170 | REP kw qu 171 | REP o eau 172 | REP eau o 173 | REP o ew 174 | REP ew o 175 | REP oo ew 176 | REP ew oo 177 | REP ew ui 178 | REP ui ew 179 | REP oo ui 180 | REP ui oo 181 | REP ew u 182 | REP u ew 183 | REP oo u 184 | REP u oo 185 | REP u oe 186 | REP oe u 187 | REP u ieu 188 | REP ieu u 189 | REP ue ew 190 | REP ew ue 191 | REP uff ough 192 | REP oo ieu 193 | REP ieu oo 194 | REP ier ear 195 | REP ear ier 196 | REP ear air 197 | REP air ear 198 | REP w qu 199 | REP qu w 200 | REP z ss 201 | REP ss z 202 | REP shun tion 203 | REP shun sion 204 | REP shun cion 205 | REP size cise 206 | -------------------------------------------------------------------------------- /data/es_ANY.aff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lukeapage/node-markdown-spellcheck/78ad8e3c21fe982a1e4d1fc1fbf421fb57c9739b/data/es_ANY.aff -------------------------------------------------------------------------------- /data/es_ANY.dic: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lukeapage/node-markdown-spellcheck/78ad8e3c21fe982a1e4d1fc1fbf421fb57c9739b/data/es_ANY.dic -------------------------------------------------------------------------------- /es5/cli-interactive.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | 5 | exports.default = function (file, src, options, fileProcessed) { 6 | spellAndFixFile(file, src, options, function () { 7 | _spellConfig2.default.writeFile(function () { 8 | if (options.relativeSpellingFiles) { 9 | _spellConfig2.default.writeFile(fileProcessed, true); 10 | } else { 11 | fileProcessed(); 12 | } 13 | }); 14 | }); 15 | }; 16 | 17 | var _index = require("./index"); 18 | 19 | var _index2 = _interopRequireDefault(_index); 20 | 21 | var _spellcheck = require("./spellcheck"); 22 | 23 | var _spellcheck2 = _interopRequireDefault(_spellcheck); 24 | 25 | var _inquirer = require("inquirer"); 26 | 27 | var _inquirer2 = _interopRequireDefault(_inquirer); 28 | 29 | var _filters = require("./filters"); 30 | 31 | var _filters2 = _interopRequireDefault(_filters); 32 | 33 | var _context = require("./context"); 34 | 35 | var _context2 = _interopRequireDefault(_context); 36 | 37 | var _spellConfig = require("./spell-config"); 38 | 39 | var _spellConfig2 = _interopRequireDefault(_spellConfig); 40 | 41 | var _writeCorrections = require("./write-corrections"); 42 | 43 | var _writeCorrections2 = _interopRequireDefault(_writeCorrections); 44 | 45 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 46 | 47 | var ACTION_IGNORE = "ignore"; 48 | var ACTION_FILE_IGNORE = "fileignore"; 49 | var ACTION_FILE_IGNORE_RELATIVE = "fileignore-relative"; 50 | var ACTION_ADD = "add"; 51 | var ACTION_ADD_CASED = "add-cased"; 52 | var ACTION_ADD_RELATIVE = "add-relative"; 53 | var ACTION_ADD_CASED_RELATIVE = "add-cased-relative"; 54 | var ACTION_CORRECT = "enter"; 55 | 56 | var CHOICE_IGNORE = { name: "Ignore", value: ACTION_IGNORE }; 57 | var CHOICE_FILE_IGNORE = { name: "Add to file ignores", value: ACTION_FILE_IGNORE }; 58 | var CHOICE_FILE_IGNORE_RELATIVE = { name: "[Relative] Add to file ignores", value: ACTION_FILE_IGNORE_RELATIVE }; 59 | var CHOICE_ADD = { name: "Add to dictionary - case insensitive", value: ACTION_ADD }; 60 | var CHOICE_ADD_CASED = { name: "Add to dictionary - case sensitive", value: ACTION_ADD_CASED }; 61 | var CHOICE_ADD_RELATIVE = { name: "[Relative] Add to dictionary - case insensitive", value: ACTION_ADD_RELATIVE }; 62 | var CHOICE_ADD_CASED_RELATIVE = { name: "[Relative] Add to dictionary - case sensitive", value: ACTION_ADD_CASED_RELATIVE }; 63 | var CHOICE_CORRECT = { name: "Enter correct spelling", value: ACTION_CORRECT }; 64 | 65 | var previousChoices = Object.create(null); 66 | 67 | function incorrectWordChoices(word, message, filename, options, done) { 68 | var suggestions = options.suggestions ? _spellcheck2.default.suggest(word) : []; 69 | 70 | var choices = [CHOICE_IGNORE, options.relativeSpellingFiles ? CHOICE_FILE_IGNORE_RELATIVE : CHOICE_FILE_IGNORE, CHOICE_ADD, CHOICE_CORRECT]; 71 | 72 | if (options.relativeSpellingFiles) { 73 | choices.splice(4, 0, CHOICE_ADD_RELATIVE); 74 | } 75 | 76 | if (word.match(/[A-Z]/)) { 77 | choices.splice(3, 0, CHOICE_ADD_CASED); 78 | if (options.relativeSpellingFiles) { 79 | choices.splice(5, 0, CHOICE_ADD_CASED_RELATIVE); 80 | } 81 | } 82 | 83 | var defaultAction = ACTION_CORRECT; 84 | if (previousChoices[word]) { 85 | var previousAction = previousChoices[word]; 86 | if (previousAction.newWord) { 87 | var suggestionIndex = suggestions.indexOf(previousAction.newWord); 88 | if (suggestions.indexOf(previousAction.newWord) >= 0) { 89 | defaultAction = suggestionIndex.toString(); 90 | } else { 91 | suggestions.unshift(previousAction.newWord); 92 | defaultAction = "0"; 93 | } 94 | } else { 95 | defaultAction = previousAction.action; 96 | } 97 | } 98 | 99 | suggestions.forEach(function (suggestion, index) { 100 | choices.push({ 101 | key: index, 102 | name: suggestion, 103 | value: index.toString() 104 | }); 105 | }); 106 | 107 | _inquirer2.default.prompt([{ 108 | type: "list", 109 | name: "action", 110 | message: message, 111 | choices: choices, 112 | default: defaultAction 113 | }]).then(function (answer) { 114 | switch (answer.action) { 115 | case ACTION_ADD: 116 | word = word.toLowerCase(); 117 | /* fallthrough */ 118 | case ACTION_ADD_CASED: 119 | _spellcheck2.default.addWord(word); 120 | _spellConfig2.default.addToGlobalDictionary(word); 121 | done(); 122 | break; 123 | case ACTION_ADD_RELATIVE: 124 | word = word.toLowerCase(); 125 | /* fallthrough */ 126 | case ACTION_ADD_CASED_RELATIVE: 127 | _spellcheck2.default.addWord(word); 128 | _spellConfig2.default.addToGlobalDictionary(word, true); 129 | done(); 130 | break; 131 | case ACTION_CORRECT: 132 | getCorrectWord(word, filename, options, done); 133 | break; 134 | case ACTION_FILE_IGNORE: 135 | _spellcheck2.default.addWord(word, true); 136 | _spellConfig2.default.addToFileDictionary(filename, word); 137 | previousChoices[word] = answer; 138 | done(); 139 | break; 140 | case ACTION_FILE_IGNORE_RELATIVE: 141 | _spellcheck2.default.addWord(word, true); 142 | _spellConfig2.default.addToFileDictionary(filename, word, true); 143 | previousChoices[word] = answer; 144 | done(); 145 | break; 146 | case ACTION_IGNORE: 147 | _spellcheck2.default.addWord(word); 148 | done(); 149 | break; 150 | default: 151 | var suggestionId = Number(answer.action); 152 | if (isNaN(suggestionId) || suggestionId >= suggestions.length) { 153 | throw new Error("unrecognise prompt action"); 154 | } 155 | previousChoices[word] = { newWord: suggestions[suggestionId] }; 156 | done(suggestions[Number(answer.action)]); 157 | break; 158 | } 159 | }); 160 | } 161 | 162 | function getCorrectWord(word, filename, options, done) { 163 | _inquirer2.default.prompt([{ 164 | type: "input", 165 | name: "word", 166 | message: "correct word >", 167 | default: word 168 | }]).then(function (answer) { 169 | var newWords = answer.word.split(/\s/g); 170 | var hasMistake = false; 171 | 172 | for (var i = 0; i < newWords.length; i++) { 173 | var newWord = newWords[i]; 174 | if (_filters2.default.filter([{ word: newWord }], options).length > 0 && !_spellcheck2.default.checkWord(newWord)) { 175 | hasMistake = true; 176 | } 177 | } 178 | 179 | if (hasMistake) { 180 | if (newWords.length === 1) { 181 | incorrectWordChoices(answer.word, "Corrected word is not in dictionary..", filename, options, function (newNewWord) { 182 | var finalNewWord = newNewWord || answer.word; 183 | previousChoices[word] = { newWord: finalNewWord }; 184 | done(finalNewWord); 185 | }); 186 | return; 187 | } 188 | 189 | console.log("Detected some words in your correction that may be invalid. Re-run to check."); 190 | } 191 | 192 | previousChoices[word] = { newWord: answer.word }; 193 | done(answer.word); 194 | }); 195 | } 196 | 197 | function spellAndFixFile(filename, src, options, onFinishedFile) { 198 | var corrections = []; 199 | 200 | function onSpellingMistake(wordInfo, done) { 201 | var displayBlock = _context2.default.getBlock(src, wordInfo.index, wordInfo.word.length); 202 | console.log(displayBlock.info); 203 | incorrectWordChoices(wordInfo.word, " ", filename, options, function (newWord) { 204 | if (newWord) { 205 | corrections.push({ wordInfo: wordInfo, newWord: newWord }); 206 | } 207 | done(); 208 | }); 209 | } 210 | 211 | _index2.default.spellCallback(src, options, onSpellingMistake, function () { 212 | if (corrections.length) { 213 | (0, _writeCorrections2.default)(src, filename, corrections, onFinishedFile); 214 | } else { 215 | onFinishedFile(); 216 | } 217 | }); 218 | } -------------------------------------------------------------------------------- /es5/cli.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | var _commander = require('commander'); 4 | 5 | var _commander2 = _interopRequireDefault(_commander); 6 | 7 | var _fs = require('fs'); 8 | 9 | var _fs2 = _interopRequireDefault(_fs); 10 | 11 | var _path = require('path'); 12 | 13 | var _path2 = _interopRequireDefault(_path); 14 | 15 | var _cliInteractive = require('./cli-interactive'); 16 | 17 | var _cliInteractive2 = _interopRequireDefault(_cliInteractive); 18 | 19 | var _index = require('./index'); 20 | 21 | var _index2 = _interopRequireDefault(_index); 22 | 23 | var _chalk = require('chalk'); 24 | 25 | var _chalk2 = _interopRequireDefault(_chalk); 26 | 27 | var _multiFileProcessor = require('./multi-file-processor'); 28 | 29 | var _multiFileProcessor2 = _interopRequireDefault(_multiFileProcessor); 30 | 31 | var _relativeFileProcessor = require('./relative-file-processor'); 32 | 33 | var _relativeFileProcessor2 = _interopRequireDefault(_relativeFileProcessor); 34 | 35 | var _spellcheck = require('./spellcheck'); 36 | 37 | var _spellcheck2 = _interopRequireDefault(_spellcheck); 38 | 39 | var _reportGenerator = require('./report-generator'); 40 | 41 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 42 | 43 | var packageConfig = _fs2.default.readFileSync(_path2.default.join(__dirname, '../package.json')); 44 | var buildVersion = JSON.parse(packageConfig).version; 45 | 46 | _commander2.default.version(buildVersion) 47 | // default cli behaviour will be an interactive walkthrough each error, with suggestions, 48 | // options to replace etc. 49 | .option('-r, --report', 'Outputs a full report which details the unique spelling errors found.').option('-n, --ignore-numbers', 'Ignores numbers.').option('--en-us', 'American English dictionary.').option('--en-gb', 'British English dictionary.').option('--en-au', 'Australian English dictionary.').option('--es-es', 'Spanish dictionary.').option('-d, --dictionary [file]', 'specify a custom dictionary file - it should not include the file extension and will load .dic and .aiff.').option('-a, --ignore-acronyms', 'Ignores acronyms.').option('-x, --no-suggestions', 'Do not suggest words (can be slow)').option('-t, --target-relative', 'Uses ".spelling" files relative to the target.').usage("[options] source-file source-file").parse(process.argv); 50 | 51 | var language = void 0; 52 | if (_commander2.default.enUs) { 53 | language = "en-us"; 54 | } else if (_commander2.default.enGb) { 55 | language = "en-gb"; 56 | } else if (_commander2.default.enAu) { 57 | language = "en-au"; 58 | } else if (_commander2.default.esEs) { 59 | language = "es-es"; 60 | } 61 | 62 | var options = { 63 | ignoreAcronyms: _commander2.default.ignoreAcronyms, 64 | ignoreNumbers: _commander2.default.ignoreNumbers, 65 | suggestions: _commander2.default.suggestions, 66 | relativeSpellingFiles: _commander2.default.targetRelative, 67 | dictionary: { 68 | language: language, 69 | file: _commander2.default.dictionary 70 | } 71 | }; 72 | 73 | if (!_commander2.default.args.length) { 74 | _commander2.default.outputHelp(); 75 | process.exit(); 76 | } else { 77 | 78 | _spellcheck2.default.initialise(options); 79 | 80 | var inputPatterns = _commander2.default.args; 81 | var processor = options.relativeSpellingFiles ? _relativeFileProcessor2.default : _multiFileProcessor2.default; 82 | processor(inputPatterns, options, function (filename, src, fileProcessed) { 83 | 84 | if (_commander2.default.report) { 85 | var errors = _index2.default.spell(src, options); 86 | if (errors.length > 0) { 87 | console.log((0, _reportGenerator.generateFileReport)(filename, { errors: errors, src: src })); 88 | process.exitCode = 1; 89 | } 90 | fileProcessed(null, errors); 91 | } else { 92 | console.log("Spelling - " + _chalk2.default.bold(filename)); 93 | (0, _cliInteractive2.default)(filename, src, options, fileProcessed); 94 | } 95 | }, function (e, results) { 96 | console.log((0, _reportGenerator.generateSummaryReport)(results)); 97 | }); 98 | } -------------------------------------------------------------------------------- /es5/context.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | var _chalk = require('chalk'); 6 | 7 | var _chalk2 = _interopRequireDefault(_chalk); 8 | 9 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 10 | 11 | function getLines(src, index, noBefore, noAfter) { 12 | var beforeLines = []; 13 | var afterLines = []; 14 | var thisLineStart = void 0, 15 | line = void 0, 16 | column = void 0; 17 | var lastCutIndex = index; 18 | 19 | for (var i = index - 1; i >= 0; i--) { 20 | if (src[i] === '\n') { 21 | if (thisLineStart === undefined) { 22 | thisLineStart = i + 1; 23 | column = index - (i + 1); 24 | } else { 25 | beforeLines.push(src.substr(i, lastCutIndex - i)); 26 | } 27 | lastCutIndex = i; 28 | if (beforeLines.length >= noBefore) { 29 | break; 30 | } 31 | } 32 | } 33 | if (thisLineStart === undefined) { 34 | thisLineStart = 0; 35 | column = index; 36 | } 37 | for (var _i = index; _i < src.length; _i++) { 38 | if (src[_i] === '\n') { 39 | if (line === undefined) { 40 | line = src.substr(thisLineStart, _i - thisLineStart); 41 | } else { 42 | afterLines.push(src.substr(lastCutIndex, _i - lastCutIndex)); 43 | } 44 | lastCutIndex = _i; 45 | if (afterLines.length >= noAfter) { 46 | break; 47 | } 48 | } 49 | } 50 | if (line === undefined) { 51 | line = src.slice(thisLineStart); 52 | } 53 | var lineNumber = 1; 54 | for (var _i2 = index - 1; _i2 >= 0; _i2--) { 55 | if (src[_i2] === '\n') { 56 | lineNumber++; 57 | } 58 | } 59 | return { 60 | line: line, 61 | beforeLines: beforeLines, 62 | afterLines: afterLines, 63 | column: column, 64 | lineNumber: lineNumber 65 | }; 66 | } 67 | 68 | exports.default = { 69 | getBlock: function getBlock(src, index, length) { 70 | var lineInfo = getLines(src, index, 2, 2); 71 | var lineStart = 0; 72 | var lineEnd = lineInfo.line.length; 73 | if (lineInfo.column > 30) { 74 | lineStart = lineInfo.column - 30; 75 | } 76 | if (lineEnd - (lineInfo.column + length) > 30) { 77 | lineEnd = lineInfo.column + length + 30; 78 | } 79 | var info = lineInfo.line.substring(lineStart, lineInfo.column) + _chalk2.default.red(lineInfo.line.substr(lineInfo.column, length)) + lineInfo.line.substring(lineInfo.column + length, lineEnd); 80 | return { 81 | info: info, 82 | lineNumber: lineInfo.lineNumber 83 | }; 84 | } 85 | }; -------------------------------------------------------------------------------- /es5/filters.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | function filterFactory(regexp) { 5 | return function (errors) { 6 | return errors.filter(function (e) { 7 | return !e.word.match(regexp); 8 | }); 9 | }; 10 | } 11 | 12 | var numbers = filterFactory(/^[0-9,\.\-#]+(th|st|nd|rd)?$/); 13 | var acronyms = filterFactory(/^[A-Z0-9]{2,}(['\u2018-\u2019]s)?$/); 14 | 15 | exports.default = { 16 | acronyms: acronyms, 17 | numbers: numbers, 18 | filter: function filter(words, options) { 19 | var ignoreAcronyms = options && options.ignoreAcronyms; 20 | var ignoreNumbers = options && options.ignoreNumbers; 21 | 22 | if (ignoreAcronyms) { 23 | words = acronyms(words); 24 | } 25 | if (ignoreNumbers) { 26 | words = numbers(words); 27 | } 28 | return words; 29 | } 30 | }; -------------------------------------------------------------------------------- /es5/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | var _fs = require('fs'); 6 | 7 | var _fs2 = _interopRequireDefault(_fs); 8 | 9 | var _markdownParser = require('./markdown-parser'); 10 | 11 | var _markdownParser2 = _interopRequireDefault(_markdownParser); 12 | 13 | var _wordParser = require('./word-parser'); 14 | 15 | var _wordParser2 = _interopRequireDefault(_wordParser); 16 | 17 | var _spellcheck = require('./spellcheck'); 18 | 19 | var _spellcheck2 = _interopRequireDefault(_spellcheck); 20 | 21 | var _filters = require('./filters'); 22 | 23 | var _filters2 = _interopRequireDefault(_filters); 24 | 25 | var _async = require('async'); 26 | 27 | var _async2 = _interopRequireDefault(_async); 28 | 29 | var _reportGenerator = require('./report-generator'); 30 | 31 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 32 | 33 | function getWords(src, options) { 34 | var words = (0, _wordParser2.default)((0, _markdownParser2.default)(src)); 35 | 36 | return _filters2.default.filter(words, options); 37 | } 38 | 39 | function spell(src, options) { 40 | if (typeof src !== "string") { 41 | throw new Error("spell takes a string"); 42 | } 43 | var words = getWords(src, options); 44 | return _spellcheck2.default.checkWords(words, options); 45 | } 46 | 47 | function spellFile(filename, options) { 48 | var src = _fs2.default.readFileSync(filename, 'utf-8'); 49 | return { 50 | errors: spell(src, options), 51 | src: src 52 | }; 53 | } 54 | 55 | function spellCallback(src, options, callback, done) { 56 | var words = getWords(src, options); 57 | 58 | _async2.default.eachSeries(words, _async2.default.ensureAsync(function (wordInfo, onWordProcessed) { 59 | if (!_spellcheck2.default.checkWord(wordInfo.word, options)) { 60 | callback(wordInfo, onWordProcessed); 61 | } else { 62 | onWordProcessed(); 63 | } 64 | }), done); 65 | } 66 | 67 | exports.default = { spell: spell, spellFile: spellFile, spellCallback: spellCallback, spellcheck: _spellcheck2.default, generateSummaryReport: _reportGenerator.generateSummaryReport, generateFileReport: _reportGenerator.generateFileReport }; -------------------------------------------------------------------------------- /es5/markdown-parser.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | 5 | var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; 6 | 7 | exports.default = function (src) { 8 | var textTokens = []; 9 | var currentIndex = 0; 10 | 11 | var tracker = (0, _trackingReplacement2.default)(src); 12 | 13 | // remove things we won't process so we can use simple next matching word logic 14 | // to calculate the index 15 | 16 | var jekyllFrontMatter = getJekyllFrontMatter(src); 17 | if (jekyllFrontMatter) { 18 | tracker.replaceAll(jekyllFrontMatter, " "); 19 | } 20 | 21 | tracker.removeAll(/```[\w\W]*?```/); 22 | tracker.removeAll(/~~~[\w\W]*?~~~/); 23 | tracker.removeAll(/``[\w\W]*?``/); 24 | tracker.removeAll(/`[^`]*`/); 25 | tracker.replaceAll(//, " "); // remove contents of style 26 | tracker.replaceAll(//, " "); // remove contents of scripts 27 | tracker.replaceAll(/\{%\s*highlight[\w\W]*?\{%\s*endhighlight\s*%\}/, " "); // remove contents code blocks 28 | tracker.replaceAll(/\{%.*%\}/, " "); 29 | tracker.replaceAll(/\{\{.*\}\}/, " "); 30 | tracker.replaceAll(/&[#a-z0-9]{1,5};/, " "); 31 | src = tracker.replaceAll(/<\/?[a-z0-9]+ ?([a-z]+="[^"]*" ?)*\/?>/i, " "); 32 | 33 | var options = { 34 | gfm: true, 35 | renderer: { 36 | strong: function strong() {}, 37 | em: function em() {}, 38 | codespan: function codespan() {}, 39 | br: function br() {}, 40 | del: function del() {}, 41 | link: function link() {}, 42 | image: function image() {}, 43 | text: function text(_text) { 44 | _text = _text.replace(/'/g, "'"); 45 | var roughSplit = _text.split(/(https?|ftp):\/\/[^\s/$.?#].[^\s]*|[\s\xa0\r\n]|&[a-z#0-9]+;|[&<>]/); 46 | for (var i = 0; i < roughSplit.length; i++) { 47 | var split = roughSplit[i]; 48 | if (split) { 49 | addToken(split); 50 | } 51 | } 52 | } 53 | } 54 | }; 55 | 56 | function addToken(text) { 57 | var newIndex = src.indexOf(text, currentIndex); 58 | if (newIndex === -1) { 59 | throw new Error("Markdown Parser : Inline Lexer : Could not find index of text - \n" + text + "\n\n**In**\n\n" + src.substring(currentIndex, 30) + "\n"); 60 | } 61 | currentIndex = newIndex + text.length; 62 | textTokens.push({ text: text, index: tracker.getOriginalIndex(newIndex) }); 63 | } 64 | 65 | var tokens = _marked2.default.lexer(src, options); 66 | var inlineLexer = new _marked2.default.InlineLexer(tokens.links, options); 67 | 68 | for (var i = 0; i < tokens.length; i++) { 69 | var token = tokens[i]; 70 | if (token.text && token.type !== "code") { 71 | inlineLexer.output(token.text); 72 | } 73 | } 74 | 75 | return textTokens; 76 | }; 77 | 78 | var _marked = require("marked"); 79 | 80 | var _marked2 = _interopRequireDefault(_marked); 81 | 82 | var _jsYaml = require("js-yaml"); 83 | 84 | var _jsYaml2 = _interopRequireDefault(_jsYaml); 85 | 86 | var _trackingReplacement = require("./tracking-replacement"); 87 | 88 | var _trackingReplacement2 = _interopRequireDefault(_trackingReplacement); 89 | 90 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 91 | 92 | function getJekyllFrontMatter(src) { 93 | var matches = src.match(/^\r?\n?---\r?\n([\w\W]+?)\r?\n---\r?\n/); 94 | 95 | if (matches) { 96 | var fencedContent = matches[1]; 97 | 98 | try { 99 | var parsed = _jsYaml2.default.safeLoad(fencedContent); 100 | 101 | return (typeof parsed === "undefined" ? "undefined" : _typeof(parsed)) === "object" ? matches[0] : undefined; 102 | } catch (e) { 103 | // not valid yaml 104 | } 105 | } 106 | } -------------------------------------------------------------------------------- /es5/multi-file-processor.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | exports.default = function (inputPatterns, options, fileCallback, resultCallback) { 6 | var allFiles = []; 7 | 8 | _async2.default.parallel([_spellConfig2.default.initialise.bind(_spellConfig2.default, './.spelling'), function (processed) { 9 | (0, _globby2.default)(inputPatterns).then(function (files) { 10 | allFiles = files; 11 | processed(); 12 | }).catch(function () { 13 | console.error("Error globbing:", inputPatterns); 14 | process.exitCode = 1; 15 | processed(); 16 | }); 17 | }], function () { 18 | 19 | // finished callback - config loaded and glob has returned all files 20 | 21 | _spellConfig2.default.getGlobalWords().forEach(function (word) { 22 | return _spellcheck2.default.addWord(word); 23 | }); 24 | 25 | _async2.default.mapSeries(allFiles, function (file, fileProcessed) { 26 | 27 | _fs2.default.readFile(file, 'utf-8', function (err, src) { 28 | 29 | if (err) { 30 | console.error("Failed to open file:" + file); 31 | console.error(err); 32 | process.exitCode = 1; 33 | return fileProcessed(); 34 | } 35 | 36 | _spellConfig2.default.getFileWords(file).forEach(function (word) { 37 | return _spellcheck2.default.addWord(word, true); 38 | }); 39 | 40 | fileCallback(file, src, function (err, result) { 41 | _spellcheck2.default.resetTemporaryCustomDictionary(); 42 | fileProcessed(err, result); 43 | }); 44 | }); 45 | }, resultCallback); 46 | }); 47 | }; 48 | 49 | var _globby = require('globby'); 50 | 51 | var _globby2 = _interopRequireDefault(_globby); 52 | 53 | var _async = require('async'); 54 | 55 | var _async2 = _interopRequireDefault(_async); 56 | 57 | var _spellConfig = require('./spell-config'); 58 | 59 | var _spellConfig2 = _interopRequireDefault(_spellConfig); 60 | 61 | var _spellcheck = require('./spellcheck'); 62 | 63 | var _spellcheck2 = _interopRequireDefault(_spellcheck); 64 | 65 | var _fs = require('fs'); 66 | 67 | var _fs2 = _interopRequireDefault(_fs); 68 | 69 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } -------------------------------------------------------------------------------- /es5/relative-file-processor.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | exports.default = function (inputPatterns, options, fileCallback, resultCallback) { 6 | var allFiles = []; 7 | 8 | (0, _globby2.default)(inputPatterns).then(function (files) { 9 | allFiles = files; 10 | spellCheckFiles(); 11 | }).catch(function () { 12 | console.error("Error globbing:", inputPatterns); 13 | process.exitCode = 1; 14 | }); 15 | 16 | function spellCheckFiles() { 17 | _async2.default.mapSeries(allFiles, function (file, fileProcessed) { 18 | var relativeSpellingFile = _path2.default.join(_path2.default.dirname(file), ".spelling"); 19 | _spellConfig2.default.initialise(relativeSpellingFile, function () { 20 | processFile(file, fileProcessed); 21 | }); 22 | }, resultCallback); 23 | } 24 | 25 | function processFile(file, fileProcessed) { 26 | _spellConfig2.default.getGlobalWords().forEach(function (word) { 27 | return _spellcheck2.default.addWord(word); 28 | }); 29 | 30 | _fs2.default.readFile(file, 'utf-8', function (err, src) { 31 | if (err) { 32 | console.error("Failed to open file:" + file); 33 | console.error(err); 34 | process.exitCode = 1; 35 | return fileProcessed(); 36 | } 37 | 38 | _spellConfig2.default.getFileWords(file).forEach(function (word) { 39 | return _spellcheck2.default.addWord(word, true); 40 | }); 41 | 42 | fileCallback(file, src, function (err, result) { 43 | _spellcheck2.default.resetTemporaryCustomDictionary(); 44 | fileProcessed(err, result); 45 | }); 46 | }); 47 | } 48 | }; 49 | 50 | var _globby = require('globby'); 51 | 52 | var _globby2 = _interopRequireDefault(_globby); 53 | 54 | var _async = require('async'); 55 | 56 | var _async2 = _interopRequireDefault(_async); 57 | 58 | var _path = require('path'); 59 | 60 | var _path2 = _interopRequireDefault(_path); 61 | 62 | var _spellConfig = require('./spell-config'); 63 | 64 | var _spellConfig2 = _interopRequireDefault(_spellConfig); 65 | 66 | var _spellcheck = require('./spellcheck'); 67 | 68 | var _spellcheck2 = _interopRequireDefault(_spellcheck); 69 | 70 | var _fs = require('fs'); 71 | 72 | var _fs2 = _interopRequireDefault(_fs); 73 | 74 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } -------------------------------------------------------------------------------- /es5/report-generator.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | exports.generateSummaryReport = generateSummaryReport; 5 | exports.generateFileReport = generateFileReport; 6 | 7 | var _chalk = require('chalk'); 8 | 9 | var _chalk2 = _interopRequireDefault(_chalk); 10 | 11 | var _context = require('./context'); 12 | 13 | var _context2 = _interopRequireDefault(_context); 14 | 15 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 16 | 17 | // Generates a report that summarises the spelling errors found across multiple 18 | // markdown files. 19 | // results is an array containing the errors (as a nested array) for each file. 20 | function generateSummaryReport(results) { 21 | var errorCount = results.map(function (e) { 22 | return e && e.length ? e.length : 0; 23 | }).reduce(function (p, c) { 24 | return p + c; 25 | }, 0); 26 | 27 | var filePlural = 'file' + (results.length > 1 ? 's' : ''); 28 | var errorPlural = 'error' + (errorCount > 1 ? 's' : ''); 29 | var areOrIs = results.length > 1 ? 'are' : 'is'; 30 | 31 | if (errorCount > 0) { 32 | return _chalk2.default.red('>>') + ' ' + errorCount + ' spelling ' + errorPlural + ' found in ' + results.length + ' ' + filePlural; 33 | } 34 | return _chalk2.default.green('>>') + ' ' + results.length + ' ' + filePlural + ' ' + areOrIs + ' free from spelling errors'; 35 | } 36 | 37 | // Generates a report for the errors found in a single markdown file. 38 | function generateFileReport(file, spellingInfo) { 39 | var report = ' ' + _chalk2.default.bold(file) + '\n'; 40 | 41 | for (var k = 0; k < spellingInfo.errors.length; k++) { 42 | var error = spellingInfo.errors[k]; 43 | var displayBlock = _context2.default.getBlock(spellingInfo.src, error.index, error.word.length); 44 | 45 | var lineNumber = String(displayBlock.lineNumber); 46 | var lineNumberPadding = Array(10 - lineNumber.length).join(' '); 47 | var linePrefix = '' + lineNumberPadding + lineNumber + ' |'; 48 | report += linePrefix + ' ' + displayBlock.info + ' \n'; 49 | } 50 | return report; 51 | } -------------------------------------------------------------------------------- /es5/spell-config.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | var _fs = require('fs'); 6 | 7 | var _fs2 = _interopRequireDefault(_fs); 8 | 9 | var _async = require('async'); 10 | 11 | var _async2 = _interopRequireDefault(_async); 12 | 13 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 14 | 15 | var globalDictionary = []; 16 | var fileDictionary = {}; 17 | var sharedSpelling = {}; 18 | var relativeSpelling = {}; 19 | 20 | function spellingFile(fileName) { 21 | return { 22 | fileName: fileName, 23 | fileLines: [], 24 | lastLineOfGlobalSpellings: -1, 25 | isCrLf: false, 26 | isDirty: false 27 | }; 28 | } 29 | 30 | function parse(spelling) { 31 | var lastNonCommentIndex = -1; 32 | var inGlobal = true; 33 | var currentFile = void 0; 34 | spelling.fileLines.forEach(function (line, index) { 35 | if (!line || line.indexOf('#') === 0) { 36 | return; 37 | } 38 | var fileMatch = line.match(/^\s*-\s+(.*)/); 39 | if (fileMatch) { 40 | if (inGlobal) { 41 | spelling.lastLineOfGlobalSpellings = lastNonCommentIndex === -1 ? index : lastNonCommentIndex + 1; 42 | inGlobal = false; 43 | } else { 44 | fileDictionary[currentFile].index = lastNonCommentIndex + 1; 45 | } 46 | currentFile = fileMatch[1]; 47 | fileDictionary[currentFile] = { words: [] }; 48 | } else { 49 | var word = line.trim(); 50 | if (inGlobal) { 51 | globalDictionary.push(word); 52 | } else { 53 | fileDictionary[currentFile].words.push(word); 54 | } 55 | } 56 | lastNonCommentIndex = index; 57 | }); 58 | // make sure we end on a new-line 59 | if (spelling.fileLines[spelling.fileLines.length - 1]) { 60 | spelling.fileLines[spelling.fileLines.length] = ""; 61 | } 62 | if (inGlobal) { 63 | spelling.lastLineOfGlobalSpellings = lastNonCommentIndex === -1 ? spelling.fileLines.length - 1 : lastNonCommentIndex + 1; 64 | } else { 65 | fileDictionary[currentFile].index = lastNonCommentIndex; 66 | } 67 | } 68 | 69 | function emptyFile(spelling) { 70 | spelling.fileLines = ["# markdown-spellcheck spelling configuration file", "# Format - lines beginning # are comments", "# global dictionary is at the start, file overrides afterwards", "# one word per line, to define a file override use ' - filename'", "# where filename is relative to this configuration file", ""]; 71 | spelling.lastLineOfGlobalSpellings = spelling.fileLines.length - 1; 72 | } 73 | 74 | function initConfig() { 75 | globalDictionary = []; 76 | fileDictionary = {}; 77 | sharedSpelling = spellingFile("./.spelling"); 78 | relativeSpelling = spellingFile(""); 79 | } 80 | 81 | function loadAndParseSpelling(spelling, next) { 82 | _fs2.default.readFile(spelling.fileName, { encoding: 'utf-8' }, function (err, data) { 83 | if (err) { 84 | emptyFile(spelling); 85 | return next(); 86 | } 87 | if (data.indexOf('\r') >= 0) { 88 | spelling.isCrLf = true; 89 | data = data.replace(/\r/g, ""); 90 | } 91 | 92 | spelling.fileLines = data.split('\n'); 93 | parse(spelling); 94 | return next(); 95 | }); 96 | } 97 | 98 | function initialise(filename, done) { 99 | initConfig(); 100 | relativeSpelling.fileName = filename; 101 | var sharedSpellingOnly = filename === "./.spelling"; 102 | _async2.default.parallel([function (next) { 103 | loadAndParseSpelling(sharedSpelling, next); 104 | }, function (next) { 105 | sharedSpellingOnly && next() || !sharedSpellingOnly && loadAndParseSpelling(relativeSpelling, next); 106 | }], function () { 107 | return done(); 108 | }); 109 | } 110 | 111 | function writeFile(done, relative) { 112 | var spelling = relative ? relativeSpelling : sharedSpelling; 113 | if (spelling.isDirty) { 114 | var data = spelling.fileLines.join(spelling.isCrLf ? "\r\n" : "\n"); 115 | _fs2.default.writeFile(spelling.fileName, data, function (err) { 116 | if (err) { 117 | console.error("Failed to save spelling file"); 118 | console.error(err); 119 | process.exitCode = 1; 120 | } else { 121 | spelling.isDirty = false; 122 | } 123 | done(); 124 | }); 125 | } else { 126 | done(); 127 | } 128 | } 129 | 130 | function addToGlobalDictionary(word, relative) { 131 | var spelling = relative ? relativeSpelling : sharedSpelling; 132 | globalDictionary.push(word); 133 | spelling.fileLines.splice(spelling.lastLineOfGlobalSpellings, 0, word); 134 | spelling.isDirty = true; 135 | spelling.lastLineOfGlobalSpellings++; 136 | for (var filename in fileDictionary) { 137 | if (fileDictionary.hasOwnProperty(filename)) { 138 | fileDictionary[filename].index++; 139 | } 140 | } 141 | } 142 | 143 | function addToFileDictionary(filename, word, relative) { 144 | var spelling = relative ? relativeSpelling : sharedSpelling; 145 | if (fileDictionary.hasOwnProperty(filename)) { 146 | var fileDict = fileDictionary[filename]; 147 | spelling.fileLines.splice(fileDict.index, 0, word); 148 | spelling.isDirty = true; 149 | for (var dictionaryFilename in fileDictionary) { 150 | if (fileDictionary.hasOwnProperty(dictionaryFilename) && fileDictionary[dictionaryFilename].index >= fileDict.index) { 151 | fileDictionary[dictionaryFilename].index++; 152 | } 153 | } 154 | fileDict.words.push(word); 155 | } else { 156 | spelling.fileLines.splice(spelling.fileLines.length - 1, 0, " - " + filename, word); 157 | spelling.isDirty = true; 158 | fileDictionary[filename] = { 159 | index: spelling.fileLines.length - 1, 160 | words: [word] 161 | }; 162 | } 163 | } 164 | 165 | function getGlobalWords() { 166 | return globalDictionary; 167 | } 168 | 169 | function getFileWords(filename) { 170 | if (fileDictionary.hasOwnProperty(filename)) { 171 | return fileDictionary[filename].words; 172 | } 173 | return []; 174 | } 175 | 176 | exports.default = { 177 | initialise: initialise, 178 | writeFile: writeFile, 179 | addToGlobalDictionary: addToGlobalDictionary, 180 | addToFileDictionary: addToFileDictionary, 181 | getGlobalWords: getGlobalWords, 182 | getFileWords: getFileWords 183 | }; -------------------------------------------------------------------------------- /es5/spellcheck.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | 5 | var _hunspellSpellchecker = require('hunspell-spellchecker'); 6 | 7 | var _hunspellSpellchecker2 = _interopRequireDefault(_hunspellSpellchecker); 8 | 9 | var _fs = require('fs'); 10 | 11 | var _fs2 = _interopRequireDefault(_fs); 12 | 13 | var _path = require('path'); 14 | 15 | var _path2 = _interopRequireDefault(_path); 16 | 17 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 18 | 19 | var spellchecker = void 0, 20 | dict = void 0; 21 | 22 | function initialise(options) { 23 | 24 | var dictionaryOptions = options && options.dictionary; 25 | 26 | var baseFile = _path2.default.join(__dirname, '../data/en-GB'); 27 | if (dictionaryOptions && dictionaryOptions.file) { 28 | baseFile = dictionaryOptions.file; 29 | } else if (dictionaryOptions && dictionaryOptions.language) { 30 | switch (dictionaryOptions.language) { 31 | case 'en-us': 32 | baseFile = _path2.default.join(__dirname, '../data/en_US-large'); 33 | break; 34 | case 'en-gb': 35 | // default - do nothing 36 | break; 37 | case 'en-au': 38 | baseFile = _path2.default.join(__dirname, '../data/en_AU'); 39 | break; 40 | case 'es-es': 41 | baseFile = _path2.default.join(__dirname, '../data/es_ANY'); 42 | break; 43 | default: 44 | throw new Error("unsupported language:" + dictionaryOptions.language); 45 | } 46 | } 47 | 48 | spellchecker = new _hunspellSpellchecker2.default(); 49 | dict = spellchecker.parse({ 50 | aff: _fs2.default.readFileSync(baseFile + '.aff'), 51 | dic: _fs2.default.readFileSync(baseFile + '.dic') 52 | }); 53 | spellchecker.use(dict); 54 | } 55 | 56 | function normaliseApos(word) { 57 | return word.replace(/\u2019/, "'"); 58 | } 59 | 60 | function checkWord(word, options) { 61 | if (!spellchecker) { 62 | initialise(options); 63 | } 64 | word = normaliseApos(word); 65 | if (spellchecker.check(word)) { 66 | return true; 67 | } 68 | 69 | if (word.match(/'s$/)) { 70 | var wordWithoutPlural = word.substr(0, word.length - 2); 71 | if (spellchecker.check(wordWithoutPlural)) { 72 | return true; 73 | } 74 | } 75 | 76 | // for etc. as we cannot tell if it ends in "." as that is stripped 77 | var wordWithDot = word + "."; 78 | if (spellchecker.check(wordWithDot)) { 79 | return true; 80 | } 81 | 82 | if (word.indexOf('-')) { 83 | var subWords = word.split('-'); 84 | 85 | if (subWords.every(function (subWord) { 86 | return spellchecker.check(subWord); 87 | })) { 88 | return true; 89 | } 90 | } 91 | 92 | return false; 93 | } 94 | 95 | function checkWords(words, options) { 96 | var mistakes = []; 97 | for (var i = 0; i < words.length; i++) { 98 | var wordInfo = words[i]; 99 | if (!checkWord(wordInfo.word, options)) { 100 | mistakes.push(wordInfo); 101 | } 102 | } 103 | return mistakes; 104 | } 105 | 106 | function _addWord(word) { 107 | dict.dictionaryTable[word] = [[]]; 108 | } 109 | 110 | var customDictionary = []; 111 | var needsReset = false; 112 | function addWord(word, temporary) { 113 | if (!spellchecker) { 114 | initialise(); 115 | } 116 | 117 | word = normaliseApos(word); 118 | 119 | if (!temporary) { 120 | customDictionary.push(word); 121 | } else { 122 | needsReset = true; 123 | } 124 | _addWord(word); 125 | } 126 | 127 | function resetTemporaryCustomDictionary() { 128 | if (needsReset) { 129 | if (!spellchecker) { 130 | initialise(); 131 | } 132 | customDictionary.forEach(function (word) { 133 | return _addWord(word); 134 | }); 135 | } 136 | } 137 | 138 | function suggest(word) { 139 | return spellchecker.suggest(word); 140 | } 141 | 142 | exports.default = { 143 | initialise: initialise, 144 | checkWords: checkWords, 145 | checkWord: checkWord, 146 | addWord: addWord, 147 | resetTemporaryCustomDictionary: resetTemporaryCustomDictionary, 148 | suggest: suggest 149 | }; -------------------------------------------------------------------------------- /es5/tracking-replacement.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | 5 | exports.default = function (src) { 6 | var maps = []; 7 | function getOriginalIndex(newIndex) { 8 | var firstMapBefore = void 0; 9 | for (var i = 0; i < maps.length; i++) { 10 | var map = maps[i]; 11 | if (map.newIndex <= newIndex) { 12 | if (!firstMapBefore || firstMapBefore.newIndex < map.newIndex) { 13 | firstMapBefore = map; 14 | } 15 | } 16 | } 17 | if (firstMapBefore) { 18 | return firstMapBefore.index + (newIndex - firstMapBefore.newIndex); 19 | } 20 | return newIndex; 21 | } 22 | function replaceAll(target, replacement) { 23 | while (true) { 24 | // eslint-disable-line no-constant-condition 25 | var match = void 0; 26 | 27 | if (target instanceof RegExp) { 28 | match = src.match(target); 29 | } else { 30 | match = { 31 | index: src.indexOf(target), 32 | 0: target 33 | }; 34 | } 35 | 36 | if (!match || match.index === -1) { 37 | break; 38 | } 39 | 40 | var cutTo = match.index + match[0].length; 41 | var originalIndex = getOriginalIndex(cutTo); 42 | var changeInLength = match[0].length - replacement.length; 43 | 44 | for (var i = maps.length - 1; i >= 0; i--) { 45 | var map = maps[i]; 46 | if (map.newIndex >= match.index) { 47 | if (map.newIndex < cutTo) { 48 | maps.splice(i, 1); 49 | } else { 50 | map.newIndex -= changeInLength; 51 | } 52 | } 53 | } 54 | 55 | maps.push({ newIndex: match.index + replacement.length, index: originalIndex }); 56 | if (replacement.length) { 57 | maps.push({ newIndex: match.index, index: NaN }); 58 | } 59 | 60 | src = src.substring(0, match.index) + replacement + src.slice(match.index + match[0].length); 61 | } 62 | return src; 63 | } 64 | 65 | return { 66 | removeAll: function removeAll(target) { 67 | return replaceAll(target, ""); 68 | }, 69 | 70 | replaceAll: replaceAll, 71 | getOriginalIndex: getOriginalIndex 72 | }; 73 | }; -------------------------------------------------------------------------------- /es5/word-parser.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | 5 | exports.default = function (tokens) { 6 | var wordList = []; 7 | for (var i = 0; i < tokens.length; i++) { 8 | var token = tokens[i]; 9 | var text = token.text; 10 | var index = token.index; 11 | while (true) { 12 | // eslint-disable-line no-constant-condition 13 | var nextWord = text.match(/(\w+(\.\w+)+\.?)|[\u00c0-\u01bf\u01d0-\u029f\w'\u2018-\u2019][\-#\u00c0-\u01bf\u01d0-\u029f\w'\u2018-\u2019]*|[\u0400-\u04FF\w'\u2018-\u2019][\-#\u0400-\u04FF\w'\u2018-\u2019]*/); 14 | if (!nextWord) { 15 | break; 16 | } 17 | var word = nextWord[0]; 18 | var thisWordIndex = index + nextWord.index; 19 | 20 | var badStart = word.match(/^[#'\u2018]+/); 21 | if (badStart) { 22 | var badStartLength = badStart[0].length; 23 | thisWordIndex += badStartLength; 24 | word = word.substr(badStartLength, word.length - badStartLength); 25 | } 26 | var badEndings = word.match(/['\u2019\-#]+$/); 27 | if (badEndings) { 28 | word = word.substr(0, word.length - badEndings[0].length); 29 | } 30 | wordList.push({ word: word, index: thisWordIndex }); 31 | 32 | index += nextWord.index + nextWord[0].length; 33 | text = text.slice(nextWord.index + nextWord[0].length); 34 | } 35 | } 36 | return wordList; 37 | }; -------------------------------------------------------------------------------- /es5/word-replacer.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | exports.__esModule = true; 4 | exports.replace = replace; 5 | 6 | function compare(a, b) { 7 | if (a > b) { 8 | return 1; 9 | } else if (a < b) { 10 | return -1; 11 | } 12 | return 0; 13 | } 14 | 15 | function replaceWord(src, index, oldWord, newWord) { 16 | return src.slice(0, index) + newWord + src.slice(index + oldWord.length); 17 | } 18 | 19 | function replace(src, corrections) { 20 | 21 | corrections = corrections.sort(function (a, b) { 22 | return (/* reverse arguments - reverse list */compare(b.wordInfo.index, a.wordInfo.index) 23 | ); 24 | }); 25 | 26 | for (var i = 0; i < corrections.length; i++) { 27 | var correction = corrections[i]; 28 | src = replaceWord(src, correction.wordInfo.index, correction.wordInfo.word, correction.newWord); 29 | } 30 | 31 | return src; 32 | } -------------------------------------------------------------------------------- /es5/write-corrections.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | exports.__esModule = true; 4 | exports.default = writeCorrections; 5 | 6 | var _fs = require('fs'); 7 | 8 | var _fs2 = _interopRequireDefault(_fs); 9 | 10 | var _wordReplacer = require('./word-replacer'); 11 | 12 | function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } 13 | 14 | function writeCorrections(src, file, corrections, onCorrected) { 15 | var correctedSrc = (0, _wordReplacer.replace)(src, corrections); 16 | _fs2.default.writeFile(file, correctedSrc, function (err) { 17 | if (err) { 18 | console.error("Failed to write corrections to :", file); 19 | process.exitCode = 1; 20 | } 21 | onCorrected(); 22 | }); 23 | } -------------------------------------------------------------------------------- /es6/cli-interactive.js: -------------------------------------------------------------------------------- 1 | import markdownSpellcheck from "./index"; 2 | import spellcheck from "./spellcheck"; 3 | import inquirer from 'inquirer'; 4 | import filters from './filters'; 5 | import context from './context'; 6 | import spellConfig from './spell-config'; 7 | import writeCorrections from './write-corrections'; 8 | 9 | const ACTION_IGNORE = "ignore"; 10 | const ACTION_FILE_IGNORE = "fileignore"; 11 | const ACTION_FILE_IGNORE_RELATIVE = "fileignore-relative"; 12 | const ACTION_ADD = "add"; 13 | const ACTION_ADD_CASED = "add-cased"; 14 | const ACTION_ADD_RELATIVE = "add-relative"; 15 | const ACTION_ADD_CASED_RELATIVE = "add-cased-relative"; 16 | const ACTION_CORRECT = "enter"; 17 | 18 | const CHOICE_IGNORE = { name: "Ignore", value: ACTION_IGNORE }; 19 | const CHOICE_FILE_IGNORE = { name: "Add to file ignores", value: ACTION_FILE_IGNORE }; 20 | const CHOICE_FILE_IGNORE_RELATIVE = { name: "[Relative] Add to file ignores", value: ACTION_FILE_IGNORE_RELATIVE }; 21 | const CHOICE_ADD = { name: "Add to dictionary - case insensitive", value: ACTION_ADD }; 22 | const CHOICE_ADD_CASED = { name: "Add to dictionary - case sensitive", value: ACTION_ADD_CASED }; 23 | const CHOICE_ADD_RELATIVE = { name: "[Relative] Add to dictionary - case insensitive", value: ACTION_ADD_RELATIVE }; 24 | const CHOICE_ADD_CASED_RELATIVE = { name: "[Relative] Add to dictionary - case sensitive", value: ACTION_ADD_CASED_RELATIVE }; 25 | const CHOICE_CORRECT = { name: "Enter correct spelling", value: ACTION_CORRECT }; 26 | 27 | const previousChoices = Object.create(null); 28 | 29 | function incorrectWordChoices(word, message, filename, options, done) { 30 | const suggestions = 31 | options.suggestions ? spellcheck.suggest(word) : []; 32 | 33 | const choices = [ 34 | CHOICE_IGNORE, 35 | options.relativeSpellingFiles ? CHOICE_FILE_IGNORE_RELATIVE : CHOICE_FILE_IGNORE, 36 | CHOICE_ADD, 37 | CHOICE_CORRECT 38 | ]; 39 | 40 | if (options.relativeSpellingFiles) { 41 | choices.splice(4, 0, CHOICE_ADD_RELATIVE); 42 | } 43 | 44 | if (word.match(/[A-Z]/)) { 45 | choices.splice(3, 0, CHOICE_ADD_CASED); 46 | if (options.relativeSpellingFiles) { 47 | choices.splice(5, 0, CHOICE_ADD_CASED_RELATIVE); 48 | } 49 | } 50 | 51 | let defaultAction = ACTION_CORRECT; 52 | if (previousChoices[word]) { 53 | const previousAction = previousChoices[word]; 54 | if (previousAction.newWord) { 55 | const suggestionIndex = suggestions.indexOf(previousAction.newWord); 56 | if (suggestions.indexOf(previousAction.newWord) >= 0) { 57 | defaultAction = suggestionIndex.toString(); 58 | } 59 | else { 60 | suggestions.unshift(previousAction.newWord); 61 | defaultAction = "0"; 62 | } 63 | } 64 | else { 65 | defaultAction = previousAction.action; 66 | } 67 | } 68 | 69 | suggestions.forEach((suggestion, index) => { 70 | choices.push({ 71 | key: index, 72 | name: suggestion, 73 | value: index.toString() 74 | }); 75 | }); 76 | 77 | inquirer.prompt([{ 78 | type: "list", 79 | name: "action", 80 | message: message, 81 | choices, 82 | default: defaultAction 83 | }]).then(function(answer) { 84 | switch (answer.action) { 85 | case ACTION_ADD: 86 | word = word.toLowerCase(); 87 | /* fallthrough */ 88 | case ACTION_ADD_CASED: 89 | spellcheck.addWord(word); 90 | spellConfig.addToGlobalDictionary(word); 91 | done(); 92 | break; 93 | case ACTION_ADD_RELATIVE: 94 | word = word.toLowerCase(); 95 | /* fallthrough */ 96 | case ACTION_ADD_CASED_RELATIVE: 97 | spellcheck.addWord(word); 98 | spellConfig.addToGlobalDictionary(word, true); 99 | done(); 100 | break; 101 | case ACTION_CORRECT: 102 | getCorrectWord(word, filename, options, done); 103 | break; 104 | case ACTION_FILE_IGNORE: 105 | spellcheck.addWord(word, true); 106 | spellConfig.addToFileDictionary(filename, word); 107 | previousChoices[word] = answer; 108 | done(); 109 | break; 110 | case ACTION_FILE_IGNORE_RELATIVE: 111 | spellcheck.addWord(word, true); 112 | spellConfig.addToFileDictionary(filename, word, true); 113 | previousChoices[word] = answer; 114 | done(); 115 | break; 116 | case ACTION_IGNORE: 117 | spellcheck.addWord(word); 118 | done(); 119 | break; 120 | default: 121 | const suggestionId = Number(answer.action); 122 | if (isNaN(suggestionId) || suggestionId >= suggestions.length) { 123 | throw new Error("unrecognise prompt action"); 124 | } 125 | previousChoices[word] = { newWord: suggestions[suggestionId] }; 126 | done(suggestions[Number(answer.action)]); 127 | break; 128 | } 129 | }); 130 | } 131 | 132 | function getCorrectWord(word, filename, options, done) { 133 | inquirer.prompt([{ 134 | type: "input", 135 | name: "word", 136 | message: "correct word >", 137 | default: word 138 | }]).then(function(answer) { 139 | const newWords = answer.word.split(/\s/g); 140 | let hasMistake = false; 141 | 142 | for (let i = 0; i < newWords.length; i++) { 143 | const newWord = newWords[i]; 144 | if (filters.filter([{ word: newWord }], options).length > 0 && !spellcheck.checkWord(newWord)) { 145 | hasMistake = true; 146 | } 147 | } 148 | 149 | if (hasMistake) { 150 | if (newWords.length === 1) { 151 | incorrectWordChoices(answer.word, "Corrected word is not in dictionary..", filename, options, (newNewWord) => { 152 | const finalNewWord = newNewWord || answer.word; 153 | previousChoices[word] = { newWord: finalNewWord }; 154 | done(finalNewWord); 155 | }); 156 | return; 157 | } 158 | 159 | console.log("Detected some words in your correction that may be invalid. Re-run to check."); 160 | } 161 | 162 | previousChoices[word] = { newWord: answer.word }; 163 | done(answer.word); 164 | }); 165 | } 166 | 167 | function spellAndFixFile(filename, src, options, onFinishedFile) { 168 | const corrections = []; 169 | 170 | function onSpellingMistake(wordInfo, done) { 171 | const displayBlock = context.getBlock(src, wordInfo.index, wordInfo.word.length); 172 | console.log(displayBlock.info); 173 | incorrectWordChoices(wordInfo.word, " ", filename, options, (newWord) => { 174 | if (newWord) { 175 | corrections.push({ wordInfo, newWord }); 176 | } 177 | done(); 178 | }); 179 | } 180 | 181 | markdownSpellcheck.spellCallback(src, options, onSpellingMistake, () => { 182 | if (corrections.length) { 183 | writeCorrections(src, filename, corrections, onFinishedFile); 184 | } 185 | else { 186 | onFinishedFile(); 187 | } 188 | }); 189 | } 190 | 191 | export default function(file, src, options, fileProcessed) { 192 | spellAndFixFile(file, src, options, () => { 193 | spellConfig.writeFile(() => { 194 | if (options.relativeSpellingFiles) { 195 | spellConfig.writeFile(fileProcessed, true); 196 | } else { 197 | fileProcessed(); 198 | } 199 | }); 200 | }); 201 | } 202 | -------------------------------------------------------------------------------- /es6/cli.js: -------------------------------------------------------------------------------- 1 | import program from 'commander'; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | import cliInteractive from './cli-interactive'; 5 | import markdownSpellcheck from "./index"; 6 | import chalk from 'chalk'; 7 | import multiFileProcessor from './multi-file-processor'; 8 | import relativeFileProcessor from './relative-file-processor'; 9 | import spellcheck from './spellcheck'; 10 | import { generateSummaryReport, generateFileReport } from './report-generator'; 11 | 12 | const packageConfig = fs.readFileSync(path.join(__dirname, '../package.json')); 13 | const buildVersion = JSON.parse(packageConfig).version; 14 | 15 | program 16 | .version(buildVersion) 17 | // default cli behaviour will be an interactive walkthrough each error, with suggestions, 18 | // options to replace etc. 19 | .option('-r, --report', 'Outputs a full report which details the unique spelling errors found.') 20 | .option('-n, --ignore-numbers', 'Ignores numbers.') 21 | .option('--en-us', 'American English dictionary.') 22 | .option('--en-gb', 'British English dictionary.') 23 | .option('--en-au', 'Australian English dictionary.') 24 | .option('--es-es', 'Spanish dictionary.') 25 | .option('-d, --dictionary [file]', 'specify a custom dictionary file - it should not include the file extension and will load .dic and .aiff.') 26 | .option('-a, --ignore-acronyms', 'Ignores acronyms.') 27 | .option('-x, --no-suggestions', 'Do not suggest words (can be slow)') 28 | .option('-t, --target-relative', 'Uses ".spelling" files relative to the target.') 29 | .usage("[options] source-file source-file") 30 | .parse(process.argv); 31 | 32 | let language; 33 | if (program.enUs) { 34 | language = "en-us"; 35 | } 36 | else if (program.enGb) { 37 | language = "en-gb"; 38 | } 39 | else if (program.enAu) { 40 | language = "en-au"; 41 | } 42 | else if (program.esEs) { 43 | language = "es-es"; 44 | } 45 | 46 | const options = { 47 | ignoreAcronyms: program.ignoreAcronyms, 48 | ignoreNumbers: program.ignoreNumbers, 49 | suggestions: program.suggestions, 50 | relativeSpellingFiles: program.targetRelative, 51 | dictionary: { 52 | language: language, 53 | file: program.dictionary 54 | } 55 | }; 56 | 57 | if (!program.args.length) { 58 | program.outputHelp(); 59 | process.exit(); 60 | } 61 | else { 62 | 63 | spellcheck.initialise(options); 64 | 65 | const inputPatterns = program.args; 66 | const processor = options.relativeSpellingFiles ? relativeFileProcessor : multiFileProcessor; 67 | processor(inputPatterns, options, (filename, src, fileProcessed) => { 68 | 69 | if (program.report) { 70 | const errors = markdownSpellcheck.spell(src, options); 71 | if (errors.length > 0) { 72 | console.log(generateFileReport(filename, { errors: errors, src: src })); 73 | process.exitCode = 1; 74 | } 75 | fileProcessed(null, errors); 76 | } 77 | else { 78 | console.log("Spelling - " + chalk.bold(filename)); 79 | cliInteractive(filename, src, options, fileProcessed); 80 | } 81 | }, (e, results) => { 82 | console.log(generateSummaryReport(results)); 83 | }); 84 | } 85 | -------------------------------------------------------------------------------- /es6/context.js: -------------------------------------------------------------------------------- 1 | import chalk from 'chalk'; 2 | 3 | function getLines(src, index, noBefore, noAfter) { 4 | const beforeLines = []; 5 | const afterLines = []; 6 | let thisLineStart, 7 | line, 8 | column; 9 | let lastCutIndex = index; 10 | 11 | for (let i = index - 1; i >= 0; i--) { 12 | if (src[i] === '\n') { 13 | if (thisLineStart === undefined) { 14 | thisLineStart = i + 1; 15 | column = index - (i + 1); 16 | } 17 | else { 18 | beforeLines.push(src.substr(i, lastCutIndex - i)); 19 | } 20 | lastCutIndex = i; 21 | if (beforeLines.length >= noBefore) { 22 | break; 23 | } 24 | } 25 | } 26 | if (thisLineStart === undefined) { 27 | thisLineStart = 0; 28 | column = index; 29 | } 30 | for (let i = index; i < src.length; i++) { 31 | if (src[i] === '\n') { 32 | if (line === undefined) { 33 | line = src.substr(thisLineStart, i - thisLineStart); 34 | } 35 | else { 36 | afterLines.push(src.substr(lastCutIndex, i - lastCutIndex)); 37 | } 38 | lastCutIndex = i; 39 | if (afterLines.length >= noAfter) { 40 | break; 41 | } 42 | } 43 | } 44 | if (line === undefined) { 45 | line = src.slice(thisLineStart); 46 | } 47 | let lineNumber = 1; 48 | for (let i = index - 1; i >= 0; i--) { 49 | if (src[i] === '\n') { 50 | lineNumber++; 51 | } 52 | } 53 | return { 54 | line, 55 | beforeLines, 56 | afterLines, 57 | column, 58 | lineNumber 59 | }; 60 | } 61 | 62 | export default { 63 | getBlock(src, index, length) { 64 | const lineInfo = getLines(src, index, 2, 2); 65 | let lineStart = 0; 66 | let lineEnd = lineInfo.line.length; 67 | if (lineInfo.column > 30) { 68 | lineStart = lineInfo.column - 30; 69 | } 70 | if ((lineEnd - (lineInfo.column + length)) > 30) { 71 | lineEnd = lineInfo.column + length + 30; 72 | } 73 | let info = lineInfo.line.substring(lineStart, lineInfo.column) + 74 | chalk.red(lineInfo.line.substr(lineInfo.column, length)) + 75 | lineInfo.line.substring(lineInfo.column + length, lineEnd); 76 | return { 77 | info, 78 | lineNumber: lineInfo.lineNumber 79 | }; 80 | } 81 | }; -------------------------------------------------------------------------------- /es6/filters.js: -------------------------------------------------------------------------------- 1 | function filterFactory(regexp) { 2 | return (errors) => 3 | errors.filter((e) => !e.word.match(regexp)); 4 | } 5 | 6 | const numbers = filterFactory(/^[0-9,\.\-#]+(th|st|nd|rd)?$/); 7 | const acronyms = filterFactory(/^[A-Z0-9]{2,}(['\u2018-\u2019]s)?$/); 8 | 9 | export default { 10 | acronyms, 11 | numbers, 12 | filter(words, options) { 13 | const ignoreAcronyms = options && options.ignoreAcronyms; 14 | const ignoreNumbers = options && options.ignoreNumbers; 15 | 16 | if (ignoreAcronyms) { 17 | words = acronyms(words); 18 | } 19 | if (ignoreNumbers) { 20 | words = numbers(words); 21 | } 22 | return words; 23 | } 24 | }; -------------------------------------------------------------------------------- /es6/index.js: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import markdownParser from './markdown-parser'; 3 | import wordParser from './word-parser'; 4 | import spellcheck from './spellcheck'; 5 | import filters from './filters'; 6 | import async from 'async'; 7 | import { generateSummaryReport, generateFileReport } from './report-generator'; 8 | 9 | function getWords(src, options) { 10 | let words = wordParser(markdownParser(src)); 11 | 12 | return filters.filter(words, options); 13 | } 14 | 15 | function spell(src, options) { 16 | if (typeof src !== "string") { 17 | throw new Error("spell takes a string"); 18 | } 19 | const words = getWords(src, options); 20 | return spellcheck.checkWords(words, options); 21 | } 22 | 23 | function spellFile(filename, options) { 24 | const src = fs.readFileSync(filename, 'utf-8'); 25 | return { 26 | errors: spell(src, options), 27 | src 28 | }; 29 | } 30 | 31 | function spellCallback(src, options, callback, done) { 32 | const words = getWords(src, options); 33 | 34 | async.eachSeries(words, async.ensureAsync(function(wordInfo, onWordProcessed) { 35 | if (!spellcheck.checkWord(wordInfo.word, options)) { 36 | callback(wordInfo, onWordProcessed); 37 | } 38 | else { 39 | onWordProcessed(); 40 | } 41 | }), done); 42 | } 43 | 44 | export default { spell, spellFile, spellCallback, spellcheck, generateSummaryReport, generateFileReport }; -------------------------------------------------------------------------------- /es6/markdown-parser.js: -------------------------------------------------------------------------------- 1 | import marked from "marked"; 2 | import yaml from "js-yaml"; 3 | import trackingReplacer from "./tracking-replacement"; 4 | 5 | export default function(src) { 6 | const textTokens = []; 7 | let currentIndex = 0; 8 | 9 | const tracker = trackingReplacer(src); 10 | 11 | // remove things we won't process so we can use simple next matching word logic 12 | // to calculate the index 13 | 14 | const jekyllFrontMatter = getJekyllFrontMatter(src); 15 | if (jekyllFrontMatter) { 16 | tracker.replaceAll(jekyllFrontMatter, " "); 17 | } 18 | 19 | tracker.removeAll(/```[\w\W]*?```/); 20 | tracker.removeAll(/~~~[\w\W]*?~~~/); 21 | tracker.removeAll(/``[\w\W]*?``/); 22 | tracker.removeAll(/`[^`]*`/); 23 | tracker.replaceAll(//, " "); // remove contents of style 24 | tracker.replaceAll(//, " "); // remove contents of scripts 25 | tracker.replaceAll(/\{%\s*highlight[\w\W]*?\{%\s*endhighlight\s*%\}/, " "); // remove contents code blocks 26 | tracker.replaceAll(/\{%.*%\}/, " "); 27 | tracker.replaceAll(/\{\{.*\}\}/, " "); 28 | tracker.replaceAll(/&[#a-z0-9]{1,5};/, " "); 29 | src = tracker.replaceAll(/<\/?[a-z0-9]+ ?([a-z]+="[^"]*" ?)*\/?>/i, " "); 30 | 31 | const options = { 32 | gfm: true, 33 | renderer: { 34 | strong: function() { 35 | }, 36 | em: function() { 37 | }, 38 | codespan: function() { 39 | }, 40 | br: function() { 41 | }, 42 | del: function() { 43 | }, 44 | link: function() { 45 | }, 46 | image: function() { 47 | }, 48 | text: function(text) { 49 | text = text.replace(/'/g, "'"); 50 | const roughSplit = text.split(/(https?|ftp):\/\/[^\s/$.?#].[^\s]*|[\s\xa0\r\n]|&[a-z#0-9]+;|[&<>]/); 51 | for (let i = 0; i < roughSplit.length; i++) { 52 | const split = roughSplit[i]; 53 | if (split) { 54 | addToken(split); 55 | } 56 | } 57 | } 58 | } 59 | }; 60 | 61 | function addToken(text) { 62 | const newIndex = src.indexOf(text, currentIndex); 63 | if (newIndex === -1) { 64 | throw new Error("Markdown Parser : Inline Lexer : Could not find index of text - \n" + text + "\n\n**In**\n\n" + src.substring(currentIndex, 30) + "\n"); 65 | } 66 | currentIndex = newIndex + text.length; 67 | textTokens.push({ text: text, index: tracker.getOriginalIndex(newIndex) }); 68 | } 69 | 70 | const tokens = marked.lexer(src, options); 71 | const inlineLexer = new marked.InlineLexer(tokens.links, options); 72 | 73 | for (let i = 0; i < tokens.length; i++) { 74 | const token = tokens[i]; 75 | if (token.text && token.type !== "code") { 76 | inlineLexer.output(token.text); 77 | } 78 | } 79 | 80 | return textTokens; 81 | } 82 | 83 | function getJekyllFrontMatter(src) { 84 | const matches = src.match(/^\r?\n?---\r?\n([\w\W]+?)\r?\n---\r?\n/); 85 | 86 | if (matches) { 87 | const fencedContent = matches[1]; 88 | 89 | try { 90 | const parsed = yaml.safeLoad(fencedContent); 91 | 92 | return typeof parsed === "object" ? matches[0] : undefined; 93 | } 94 | catch (e) { 95 | // not valid yaml 96 | } 97 | } 98 | } -------------------------------------------------------------------------------- /es6/multi-file-processor.js: -------------------------------------------------------------------------------- 1 | import globby from 'globby'; 2 | import async from 'async'; 3 | import spellConfig from './spell-config'; 4 | import spellcheck from "./spellcheck"; 5 | import fs from 'fs'; 6 | 7 | export default function(inputPatterns, options, fileCallback, resultCallback) { 8 | let allFiles = []; 9 | 10 | async.parallel([spellConfig.initialise.bind(spellConfig, './.spelling'), 11 | (processed) => { 12 | globby(inputPatterns) 13 | .then((files) => { 14 | allFiles = files; 15 | processed(); 16 | }) 17 | .catch(() => { 18 | console.error("Error globbing:", inputPatterns); 19 | process.exitCode = 1; 20 | processed(); 21 | }); 22 | }], () => { 23 | 24 | // finished callback - config loaded and glob has returned all files 25 | 26 | spellConfig.getGlobalWords() 27 | .forEach((word) => spellcheck.addWord(word)); 28 | 29 | async.mapSeries(allFiles, (file, fileProcessed) => { 30 | 31 | fs.readFile(file, 'utf-8', (err, src) => { 32 | 33 | if (err) { 34 | console.error("Failed to open file:" + file); 35 | console.error(err); 36 | process.exitCode = 1; 37 | return fileProcessed(); 38 | } 39 | 40 | spellConfig.getFileWords(file) 41 | .forEach((word) => spellcheck.addWord(word, true)); 42 | 43 | fileCallback(file, src, (err, result) => { 44 | spellcheck.resetTemporaryCustomDictionary(); 45 | fileProcessed(err, result); 46 | }); 47 | }); 48 | }, resultCallback); 49 | }); 50 | } 51 | -------------------------------------------------------------------------------- /es6/relative-file-processor.js: -------------------------------------------------------------------------------- 1 | import globby from 'globby'; 2 | import async from 'async'; 3 | import path from 'path'; 4 | import spellConfig from './spell-config'; 5 | import spellcheck from "./spellcheck"; 6 | import fs from 'fs'; 7 | 8 | export default function(inputPatterns, options, fileCallback, resultCallback) { 9 | let allFiles = []; 10 | 11 | globby(inputPatterns) 12 | .then((files) => { 13 | allFiles = files; 14 | spellCheckFiles(); 15 | }) 16 | .catch(() => { 17 | console.error("Error globbing:", inputPatterns); 18 | process.exitCode = 1; 19 | }); 20 | 21 | function spellCheckFiles() { 22 | async.mapSeries(allFiles, (file, fileProcessed) => { 23 | const relativeSpellingFile = path.join(path.dirname(file), ".spelling"); 24 | spellConfig.initialise(relativeSpellingFile, () => { 25 | processFile(file, fileProcessed); 26 | }); 27 | }, resultCallback); 28 | } 29 | 30 | function processFile(file, fileProcessed) { 31 | spellConfig.getGlobalWords().forEach((word) => spellcheck.addWord(word)); 32 | 33 | fs.readFile(file, 'utf-8', (err, src) => { 34 | if (err) { 35 | console.error("Failed to open file:" + file); 36 | console.error(err); 37 | process.exitCode = 1; 38 | return fileProcessed(); 39 | } 40 | 41 | spellConfig.getFileWords(file).forEach((word) => spellcheck.addWord(word, true)); 42 | 43 | fileCallback(file, src, (err, result) => { 44 | spellcheck.resetTemporaryCustomDictionary(); 45 | fileProcessed(err, result); 46 | }); 47 | }); 48 | } 49 | 50 | } 51 | -------------------------------------------------------------------------------- /es6/report-generator.js: -------------------------------------------------------------------------------- 1 | import chalk from 'chalk'; 2 | import context from './context'; 3 | 4 | // Generates a report that summarises the spelling errors found across multiple 5 | // markdown files. 6 | // results is an array containing the errors (as a nested array) for each file. 7 | export function generateSummaryReport(results) { 8 | const errorCount = results.map((e) => e && e.length ? e.length : 0) 9 | .reduce((p, c) => p + c, 0); 10 | 11 | const filePlural = 'file' + (results.length > 1 ? 's' : ''); 12 | const errorPlural = 'error' + (errorCount > 1 ? 's' : ''); 13 | const areOrIs = results.length > 1 ? 'are' : 'is'; 14 | 15 | if (errorCount > 0) { 16 | return `${chalk.red('>>')} ${errorCount} spelling ${errorPlural} found in ${results.length} ${filePlural}`; 17 | } 18 | return `${chalk.green('>>')} ${results.length} ${filePlural} ${areOrIs} free from spelling errors`; 19 | } 20 | 21 | // Generates a report for the errors found in a single markdown file. 22 | export function generateFileReport(file, spellingInfo) { 23 | let report = ` ${chalk.bold(file)}\n`; 24 | 25 | for (let k = 0; k < spellingInfo.errors.length; k++) { 26 | const error = spellingInfo.errors[k]; 27 | const displayBlock = context.getBlock(spellingInfo.src, error.index, error.word.length); 28 | 29 | const lineNumber = String(displayBlock.lineNumber); 30 | const lineNumberPadding = Array(10 - lineNumber.length).join(' '); 31 | const linePrefix = `${lineNumberPadding}${lineNumber} |`; 32 | report += `${linePrefix} ${displayBlock.info} \n`; 33 | } 34 | return report; 35 | } -------------------------------------------------------------------------------- /es6/spell-config.js: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import async from 'async'; 3 | 4 | let globalDictionary = []; 5 | let fileDictionary = {}; 6 | let sharedSpelling = {}; 7 | let relativeSpelling = {}; 8 | 9 | function spellingFile(fileName) { 10 | return { 11 | fileName, 12 | fileLines: [], 13 | lastLineOfGlobalSpellings: -1, 14 | isCrLf: false, 15 | isDirty: false 16 | }; 17 | } 18 | 19 | function parse(spelling) { 20 | let lastNonCommentIndex = -1; 21 | let inGlobal = true; 22 | let currentFile; 23 | spelling.fileLines.forEach((line, index) => { 24 | if (!line || line.indexOf('#') === 0) { 25 | return; 26 | } 27 | let fileMatch = line.match(/^\s*-\s+(.*)/); 28 | if (fileMatch) { 29 | if (inGlobal) { 30 | spelling.lastLineOfGlobalSpellings = lastNonCommentIndex === -1 ? index : lastNonCommentIndex + 1; 31 | inGlobal = false; 32 | } 33 | else { 34 | fileDictionary[currentFile].index = lastNonCommentIndex + 1; 35 | } 36 | currentFile = fileMatch[1]; 37 | fileDictionary[currentFile] = { words: [] }; 38 | } 39 | else { 40 | let word = line.trim(); 41 | if (inGlobal) { 42 | globalDictionary.push(word); 43 | } 44 | else { 45 | fileDictionary[currentFile].words.push(word); 46 | } 47 | } 48 | lastNonCommentIndex = index; 49 | }); 50 | // make sure we end on a new-line 51 | if (spelling.fileLines[spelling.fileLines.length - 1]) { 52 | spelling.fileLines[spelling.fileLines.length] = ""; 53 | } 54 | if (inGlobal) { 55 | spelling.lastLineOfGlobalSpellings = lastNonCommentIndex === -1 ? spelling.fileLines.length - 1 : lastNonCommentIndex + 1; 56 | } 57 | else { 58 | fileDictionary[currentFile].index = lastNonCommentIndex; 59 | } 60 | } 61 | 62 | function emptyFile(spelling) { 63 | spelling.fileLines = [ 64 | "# markdown-spellcheck spelling configuration file", 65 | "# Format - lines beginning # are comments", 66 | "# global dictionary is at the start, file overrides afterwards", 67 | "# one word per line, to define a file override use ' - filename'", 68 | "# where filename is relative to this configuration file", 69 | "" 70 | ]; 71 | spelling.lastLineOfGlobalSpellings = spelling.fileLines.length - 1; 72 | } 73 | 74 | function initConfig() { 75 | globalDictionary = []; 76 | fileDictionary = {}; 77 | sharedSpelling = spellingFile("./.spelling"); 78 | relativeSpelling = spellingFile(""); 79 | } 80 | 81 | function loadAndParseSpelling(spelling, next) { 82 | fs.readFile(spelling.fileName, { encoding: 'utf-8' }, (err, data) => { 83 | if (err) { 84 | emptyFile(spelling); 85 | return next(); 86 | } 87 | if (data.indexOf('\r') >= 0) { 88 | spelling.isCrLf = true; 89 | data = data.replace(/\r/g, ""); 90 | } 91 | 92 | spelling.fileLines = data.split('\n'); 93 | parse(spelling); 94 | return next(); 95 | }); 96 | } 97 | 98 | function initialise(filename, done) { 99 | initConfig(); 100 | relativeSpelling.fileName = filename; 101 | const sharedSpellingOnly = filename === "./.spelling"; 102 | async.parallel([ 103 | (next) => { loadAndParseSpelling(sharedSpelling, next); }, 104 | (next) => { (sharedSpellingOnly && next()) || (!sharedSpellingOnly && loadAndParseSpelling(relativeSpelling, next)) } 105 | ], () => { 106 | return done(); 107 | }); 108 | } 109 | 110 | function writeFile(done, relative) { 111 | const spelling = relative ? relativeSpelling : sharedSpelling; 112 | if (spelling.isDirty) { 113 | const data = spelling.fileLines.join(spelling.isCrLf ? "\r\n" : "\n"); 114 | fs.writeFile(spelling.fileName, data, (err) => { 115 | if (err) { 116 | console.error("Failed to save spelling file"); 117 | console.error(err); 118 | process.exitCode = 1; 119 | } else { 120 | spelling.isDirty = false; 121 | } 122 | done(); 123 | }); 124 | } else { 125 | done(); 126 | } 127 | } 128 | 129 | function addToGlobalDictionary(word, relative) { 130 | const spelling = relative ? relativeSpelling : sharedSpelling; 131 | globalDictionary.push(word); 132 | spelling.fileLines.splice(spelling.lastLineOfGlobalSpellings, 0, word); 133 | spelling.isDirty = true; 134 | spelling.lastLineOfGlobalSpellings++; 135 | for (let filename in fileDictionary) { 136 | if (fileDictionary.hasOwnProperty(filename)) { 137 | fileDictionary[filename].index++; 138 | } 139 | } 140 | } 141 | 142 | function addToFileDictionary(filename, word, relative) { 143 | const spelling = relative ? relativeSpelling : sharedSpelling; 144 | if (fileDictionary.hasOwnProperty(filename)) { 145 | let fileDict = fileDictionary[filename]; 146 | spelling.fileLines.splice(fileDict.index, 0, word); 147 | spelling.isDirty = true; 148 | for (let dictionaryFilename in fileDictionary) { 149 | if (fileDictionary.hasOwnProperty(dictionaryFilename) && 150 | fileDictionary[dictionaryFilename].index >= fileDict.index) { 151 | fileDictionary[dictionaryFilename].index++; 152 | } 153 | } 154 | fileDict.words.push(word); 155 | } 156 | else { 157 | spelling.fileLines.splice(spelling.fileLines.length - 1, 0, " - " + filename, word); 158 | spelling.isDirty = true; 159 | fileDictionary[filename] = { 160 | index: spelling.fileLines.length - 1, 161 | words: [word] 162 | }; 163 | } 164 | } 165 | 166 | function getGlobalWords() { 167 | return globalDictionary; 168 | } 169 | 170 | function getFileWords(filename) { 171 | if (fileDictionary.hasOwnProperty(filename)) { 172 | return fileDictionary[filename].words; 173 | } 174 | return []; 175 | } 176 | 177 | export default { 178 | initialise, 179 | writeFile, 180 | addToGlobalDictionary, 181 | addToFileDictionary, 182 | getGlobalWords, 183 | getFileWords 184 | }; 185 | -------------------------------------------------------------------------------- /es6/spellcheck.js: -------------------------------------------------------------------------------- 1 | import SpellChecker from "hunspell-spellchecker"; 2 | import fs from 'fs'; 3 | import path from 'path'; 4 | 5 | let spellchecker, dict; 6 | 7 | function initialise(options) { 8 | 9 | const dictionaryOptions = options && options.dictionary; 10 | 11 | let baseFile = path.join(__dirname, '../data/en-GB'); 12 | if (dictionaryOptions && dictionaryOptions.file) { 13 | baseFile = dictionaryOptions.file; 14 | } 15 | else if (dictionaryOptions && dictionaryOptions.language) { 16 | switch (dictionaryOptions.language) { 17 | case 'en-us': 18 | baseFile = path.join(__dirname, '../data/en_US-large'); 19 | break; 20 | case 'en-gb': 21 | // default - do nothing 22 | break; 23 | case 'en-au': 24 | baseFile = path.join(__dirname, '../data/en_AU'); 25 | break; 26 | case 'es-es': 27 | baseFile = path.join(__dirname, '../data/es_ANY'); 28 | break; 29 | default: 30 | throw new Error("unsupported language:" + dictionaryOptions.language); 31 | } 32 | } 33 | 34 | spellchecker = new SpellChecker(); 35 | dict = spellchecker.parse({ 36 | aff: fs.readFileSync(baseFile + '.aff'), 37 | dic: fs.readFileSync(baseFile + '.dic') 38 | }); 39 | spellchecker.use(dict); 40 | } 41 | 42 | function normaliseApos(word) { 43 | return word.replace(/\u2019/, "'"); 44 | } 45 | 46 | function checkWord(word, options) { 47 | if (!spellchecker) { 48 | initialise(options); 49 | } 50 | word = normaliseApos(word); 51 | if (spellchecker.check(word)) { 52 | return true; 53 | } 54 | 55 | if (word.match(/'s$/)) { 56 | const wordWithoutPlural = word.substr(0, word.length - 2); 57 | if (spellchecker.check(wordWithoutPlural)) { 58 | return true; 59 | } 60 | } 61 | 62 | // for etc. as we cannot tell if it ends in "." as that is stripped 63 | const wordWithDot = word + "."; 64 | if (spellchecker.check(wordWithDot)) { 65 | return true; 66 | } 67 | 68 | if (word.indexOf('-')) { 69 | const subWords = word.split('-'); 70 | 71 | if (subWords.every((subWord) => spellchecker.check(subWord))) { 72 | return true; 73 | } 74 | } 75 | 76 | return false; 77 | } 78 | 79 | function checkWords(words, options) { 80 | const mistakes = []; 81 | for (let i = 0; i < words.length; i++) { 82 | const wordInfo = words[i]; 83 | if (!checkWord(wordInfo.word, options)) { 84 | mistakes.push(wordInfo); 85 | } 86 | } 87 | return mistakes; 88 | } 89 | 90 | function _addWord(word) { 91 | dict.dictionaryTable[word] = [[]]; 92 | } 93 | 94 | const customDictionary = []; 95 | let needsReset = false; 96 | function addWord(word, temporary) { 97 | if (!spellchecker) { 98 | initialise(); 99 | } 100 | 101 | word = normaliseApos(word); 102 | 103 | if (!temporary) { 104 | customDictionary.push(word); 105 | } 106 | else { 107 | needsReset = true; 108 | } 109 | _addWord(word); 110 | } 111 | 112 | function resetTemporaryCustomDictionary() { 113 | if (needsReset) { 114 | if (!spellchecker) { 115 | initialise(); 116 | } 117 | customDictionary.forEach((word) => _addWord(word)); 118 | } 119 | } 120 | 121 | function suggest(word) { 122 | return spellchecker.suggest(word); 123 | } 124 | 125 | export default { 126 | initialise, 127 | checkWords, 128 | checkWord, 129 | addWord, 130 | resetTemporaryCustomDictionary, 131 | suggest 132 | }; 133 | -------------------------------------------------------------------------------- /es6/tracking-replacement.js: -------------------------------------------------------------------------------- 1 | export default function(src) { 2 | const maps = []; 3 | function getOriginalIndex(newIndex) { 4 | let firstMapBefore; 5 | for (let i = 0; i < maps.length; i++) { 6 | let map = maps[i]; 7 | if (map.newIndex <= newIndex) { 8 | if (!firstMapBefore || firstMapBefore.newIndex < map.newIndex) { 9 | firstMapBefore = map; 10 | } 11 | } 12 | } 13 | if (firstMapBefore) { 14 | return firstMapBefore.index + (newIndex - firstMapBefore.newIndex); 15 | } 16 | return newIndex; 17 | } 18 | function replaceAll(target, replacement) { 19 | while (true) { // eslint-disable-line no-constant-condition 20 | let match; 21 | 22 | if (target instanceof RegExp) { 23 | match = src.match(target); 24 | } else { 25 | match = { 26 | index: src.indexOf(target), 27 | 0: target 28 | }; 29 | } 30 | 31 | if (!match || match.index === -1) { 32 | break; 33 | } 34 | 35 | const cutTo = match.index + match[0].length; 36 | const originalIndex = getOriginalIndex(cutTo); 37 | const changeInLength = match[0].length - replacement.length; 38 | 39 | for (let i = maps.length - 1; i >= 0; i--) { 40 | const map = maps[i]; 41 | if (map.newIndex >= match.index) { 42 | if (map.newIndex < cutTo) { 43 | maps.splice(i, 1); 44 | } 45 | else { 46 | map.newIndex -= changeInLength; 47 | } 48 | } 49 | } 50 | 51 | maps.push({ newIndex: match.index + replacement.length, index: originalIndex }); 52 | if (replacement.length) { 53 | maps.push({ newIndex: match.index, index: NaN }); 54 | } 55 | 56 | src = src.substring(0, match.index) + replacement + src.slice(match.index + match[0].length); 57 | } 58 | return src; 59 | } 60 | 61 | return { 62 | removeAll(target) { 63 | return replaceAll(target, ""); 64 | }, 65 | replaceAll, 66 | getOriginalIndex 67 | }; 68 | } -------------------------------------------------------------------------------- /es6/word-parser.js: -------------------------------------------------------------------------------- 1 | export default function(tokens) { 2 | const wordList = []; 3 | for (let i = 0; i < tokens.length; i++) { 4 | const token = tokens[i]; 5 | let text = token.text; 6 | let index = token.index; 7 | while (true) { // eslint-disable-line no-constant-condition 8 | const nextWord = text.match(/(\w+(\.\w+)+\.?)|[\u00c0-\u01bf\u01d0-\u029f\w'\u2018-\u2019][\-#\u00c0-\u01bf\u01d0-\u029f\w'\u2018-\u2019]*|[\u0400-\u04FF\w'\u2018-\u2019][\-#\u0400-\u04FF\w'\u2018-\u2019]*/); 9 | if (!nextWord) { 10 | break; 11 | } 12 | let word = nextWord[0]; 13 | let thisWordIndex = index + nextWord.index; 14 | 15 | const badStart = word.match(/^[#'\u2018]+/); 16 | if (badStart) { 17 | const badStartLength = badStart[0].length; 18 | thisWordIndex += badStartLength; 19 | word = word.substr(badStartLength, word.length - badStartLength); 20 | } 21 | const badEndings = word.match(/['\u2019\-#]+$/); 22 | if (badEndings) { 23 | word = word.substr(0, word.length - badEndings[0].length); 24 | } 25 | wordList.push({ word: word, index: thisWordIndex }); 26 | 27 | index += nextWord.index + nextWord[0].length; 28 | text = text.slice(nextWord.index + nextWord[0].length); 29 | } 30 | } 31 | return wordList; 32 | } 33 | -------------------------------------------------------------------------------- /es6/word-replacer.js: -------------------------------------------------------------------------------- 1 | 2 | function compare(a, b) { 3 | if (a > b) { 4 | return 1; 5 | } 6 | else if (a < b) { 7 | return -1; 8 | } 9 | return 0; 10 | } 11 | 12 | function replaceWord(src, index, oldWord, newWord) { 13 | return src.slice(0, index) + newWord + src.slice(index + oldWord.length); 14 | } 15 | 16 | export function replace(src, corrections) { 17 | 18 | corrections = corrections.sort((a, b) => /* reverse arguments - reverse list */ compare(b.wordInfo.index, a.wordInfo.index)); 19 | 20 | for (let i = 0; i < corrections.length; i++) { 21 | const correction = corrections[i]; 22 | src = replaceWord(src, correction.wordInfo.index, correction.wordInfo.word, correction.newWord); 23 | } 24 | 25 | return src; 26 | } -------------------------------------------------------------------------------- /es6/write-corrections.js: -------------------------------------------------------------------------------- 1 | import fs from 'fs'; 2 | import { replace } from './word-replacer'; 3 | 4 | export default function writeCorrections(src, file, corrections, onCorrected) { 5 | const correctedSrc = replace(src, corrections); 6 | fs.writeFile(file, correctedSrc, (err) => { 7 | if (err) { 8 | console.error("Failed to write corrections to :", file); 9 | process.exitCode = 1; 10 | } 11 | onCorrected(); 12 | }); 13 | } -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | var gulp = require('gulp'); 2 | require('babel-core/register'); 3 | var babel = require('gulp-babel'); 4 | var mocha = require('gulp-mocha'); 5 | var runSequence = require("run-sequence"); 6 | var del = require('del'); 7 | var eslint = require('gulp-eslint'); 8 | var eslintIfFixed = require('gulp-eslint-if-fixed'); 9 | var path = require('path'); 10 | 11 | var paths = { 12 | es6: ['es6/**/*.js'], 13 | es5: 'es5', 14 | es5Files: 'es5/*.js', 15 | test: 'test/**/*.js' 16 | }; 17 | 18 | gulp.task('clean', function() { 19 | del([paths.es5Files]); 20 | }); 21 | 22 | gulp.task('babel', function () { 23 | return gulp.src(paths.es6) 24 | .pipe(babel()) 25 | .pipe(gulp.dest(paths.es5)); 26 | }); 27 | 28 | gulp.task('watch', function() { 29 | gulp.watch(paths.es6, ['test']); 30 | }); 31 | 32 | gulp.task('default', ['watch']); 33 | 34 | gulp.task('test', function(callback) { 35 | runSequence('clean', 'babel', 'mocha', callback); 36 | }); 37 | 38 | gulp.task('mocha', function () { 39 | return gulp.src(paths.test) 40 | .pipe(mocha({ 41 | timeout: 10000 42 | })) 43 | .once('error', function (e) { 44 | console.error(e); 45 | process.exit(1); 46 | }); 47 | }); 48 | 49 | gulp.task('lint', function () { 50 | return gulp.src(paths.es6) 51 | .pipe(eslint()) 52 | .pipe(eslint.format()) 53 | .pipe(eslint.failOnError()); 54 | }); 55 | gulp.task('lint-fix', function() { 56 | return gulp.src(paths.es6) 57 | .pipe(eslint({fix:true})) 58 | .pipe(eslint.format()) 59 | .pipe(eslintIfFixed('es6')); 60 | }); 61 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "markdown-spellcheck", 3 | "version": "2.0.0-beta.0", 4 | "description": "Spell-checks markdown files with an interactive CLI allowing automated spell checking.", 5 | "keywords": [ 6 | "markdown", 7 | "spell", 8 | "spell-check", 9 | "spellcheck", 10 | "spelling" 11 | ], 12 | "engines": { 13 | "node": ">= 4" 14 | }, 15 | "main": "es5/index.js", 16 | "module": "es6/index.js", 17 | "bin": { 18 | "mdspell": "./bin/mdspell" 19 | }, 20 | "scripts": { 21 | "test": "node bin/mdspell -ran *.md && gulp lint && gulp test" 22 | }, 23 | "author": "Luke Page", 24 | "license": "ISC", 25 | "repository": { 26 | "type": "git", 27 | "url": "https://github.com/lukeapage/node-markdown-spellcheck.git" 28 | }, 29 | "bugs": { 30 | "url": "https://github.com/lukeapage/node-markdown-spellcheck/issues" 31 | }, 32 | "dependencies": { 33 | "async": "^2.1.4", 34 | "chalk": "^2.0.1", 35 | "commander": "^2.8.1", 36 | "globby": "^6.1.0", 37 | "hunspell-spellchecker": "^1.0.2", 38 | "inquirer": "^1.0.0", 39 | "js-yaml": "^3.10.0", 40 | "marked": "^0.7.0", 41 | "sinon-as-promised": "^4.0.0" 42 | }, 43 | "devDependencies": { 44 | "babel-core": "^6.18.2", 45 | "babel-preset-es2015": "^6.3.13", 46 | "babel-preset-es2015-loose": "^8.0.0", 47 | "babel-runtime": "^6.25.0", 48 | "chai": "^4.1.1", 49 | "del": "^3.0.0", 50 | "gulp": "^3.9.0", 51 | "gulp-babel": "^6.1.1", 52 | "gulp-eslint": "^4.0.0", 53 | "gulp-eslint-if-fixed": "^1.0.0", 54 | "gulp-mocha": "^3.0.0", 55 | "proxyquire": "^1.7.0", 56 | "run-sequence": "^1.1.2", 57 | "sinon": "^1.17.5" 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/lukeapage/node-markdown-spellcheck.svg?branch=master)](https://travis-ci.org/lukeapage/node-markdown-spellcheck) [![Build status](https://ci.appveyor.com/api/projects/status/o0ypaoe7tbm31nkd/branch/master?svg=true)](https://ci.appveyor.com/project/lukeapage/node-markdown-spellcheck/branch/master) [![npm version](https://badge.fury.io/js/markdown-spellcheck.svg)](http://badge.fury.io/js/markdown-spellcheck) [![Dependencies](https://david-dm.org/lukeapage/node-markdown-spellcheck.svg)](https://david-dm.org/lukeapage/node-markdown-spellcheck) [![devDependency Status](https://david-dm.org/lukeapage/node-markdown-spellcheck/dev-status.svg)](https://david-dm.org/lukeapage/node-markdown-spellcheck#info=devDependencies) 2 | 3 | Reads markdown files and spellchecks them, using [open source Hunspell dictionary files](#dictionaries-being-used). 4 | 5 | ## CLI Usage 6 | 7 | There are two modes, interactive fixing, which will allow you to fix mistakes and add exceptions to a custom dictionary and a report mode which will just report the list of errors. 8 | 9 | ``` 10 | # install 11 | npm i markdown-spellcheck -g 12 | 13 | # run in interactive mode 14 | mdspell "**/*.md" 15 | 16 | # see help & options 17 | mdspell 18 | ``` 19 | 20 | ### CLI Options 21 | 22 | ### Excluding patterns 23 | 24 | Multiple patterns can be used on the command line and can use `!` for negation. E.g. 25 | 26 | ``` 27 | mdspell '**/*.md' '!**/node_modules/**/*.md' 28 | ``` 29 | 30 | #### Ignore numbers (`-n`, `--ignore-numbers`) 31 | 32 | Ignores numbers like `1.2` and `1,2.4`. 33 | 34 | #### Ignore acronyms (`-a`, `--ignore-acronyms`) 35 | 36 | Ignores acronyms like `NPM`. Also ignores numbers. Does not ignore single letters e.g. `U`. 37 | 38 | #### No suggestions (`-x`, `--no-suggestions`) 39 | 40 | Suggestions are slow at present, so use this to remove them. 41 | 42 | #### American English (`--en-us`) 43 | 44 | Use the American English dictionary. We default to British English but will change in the next major to American. 45 | 46 | #### British English (`--en-gb`) 47 | 48 | Use the British English dictionary. We default to British English but will change in the next major to American. 49 | 50 | #### Australian English (`--en-au`) 51 | 52 | Use the Australian English dictionary. 53 | 54 | #### Spanish (`--es-es`) 55 | 56 | Use the Spanish dictionary. 57 | 58 | #### Dictionary (`-d`, `--dictionary`) 59 | 60 | Specify a custom Hunspell dictionary to load. The passed filename should not include a file extension and `markdown-spellcheck` will attempt to load the file with `.aff` and `.dic` extensions. 61 | 62 | ### Interactive Mode 63 | 64 | The default interactive mode shows you the context of the spelling mistake and gives you options with what to do about it. E.g. 65 | 66 | ``` 67 | Spelling - readme.md 68 | shows you the context of the speling mistake and gives you options 69 | ? (Use arrow keys) 70 | Ignore 71 | Add to file ignores 72 | Add to dictionary - case insensitive 73 | > Enter correct spelling 74 | spelling 75 | spieling 76 | spewing 77 | selling 78 | peeling 79 | ``` 80 | 81 | Where `speling` will be highlighted in red. 82 | 83 | * "Ignore" will ignore that word and not ask about it again in the current run. If you re-run the command again though, it will appear. 84 | * "Add to file ignores" will ignore the word in this file only. 85 | * "Add to dictionary - case insensitive" will add to the dictionary for all files and match any case. E.g. with the word `Microsoft` both `Microsoft` and `microsoft` would match. 86 | * "Add to dictionary - case sensitive" will add to the dictionary for all files and match the case that has been used. E.g. with the word `Microsoft`, the word `microsoft` will not match. 87 | 88 | All exclusions will be stored in a `.spelling` file in the directory from which you run the command. 89 | 90 | ### Target Relative Mode 91 | 92 | Using the `--target-relative` (`-t`) option will augment the shared `.spelling` file with a relative `.spelling` file (sibling of the `.md` file) and give you the additional options with the interactive mode: 93 | 94 | * "Add to file ignores" will be replaced with "[Relative] Add to file ignores". There is no need to add file ignores into the shared `.spelling` file. 95 | * "[Relative] Add to dictionary - case insensitive" will add to the dictionary for all files within the current `.md` file and match any case. 96 | * "[Relative] Add to dictionary - case sensitive" will add to the dictionary for all files within the folder of the current `.md` file. 97 | 98 | ### Report Mode 99 | 100 | Using the `--report` (`-r`) option will show a report of all the spelling mistakes that have been found. This mode is useful for CI build reports. 101 | 102 | ## `.spelling` files 103 | 104 | The `.spelling` file is self documenting as it includes... 105 | 106 | ``` 107 | # markdown-spellcheck spelling configuration file 108 | # Format - lines begining # are comments 109 | # global dictionary is at the start, file overrides afterwards 110 | # one word per line, to define a file override use ' - filename' 111 | # where filename is relative to this configuration file 112 | ``` 113 | 114 | ## Use To Stop Spelling Regressions 115 | 116 | ### Usage with `npm` 117 | 118 | Add to your `package.json` and then run in report mode. If new spelling errors occur that are not ignored in the `.spelling` file, a error exit code will be set. 119 | 120 | For instance, if your `package.json` has: 121 | 122 | ``` 123 | "scripts": { 124 | "test": "gulp test" 125 | }, 126 | ``` 127 | 128 | Change it to... 129 | 130 | ``` 131 | "scripts": { 132 | "test": "mdspell -r **/*.md && gulp test" 133 | }, 134 | ``` 135 | 136 | ### Usage in `grunt` 137 | 138 | See [grunt-mdspell](https://github.com/ColinEberhardt/grunt-mdspell). 139 | 140 | ### Usage in `gulp` 141 | 142 | ### Dictionaries being used 143 | 144 | #### English-GB 145 | 146 | See [https://github.com/marcoagpinto/aoo-mozilla-en-dict](https://github.com/marcoagpinto/aoo-mozilla-en-dict). 147 | 148 | Missing word? Raise it at [https://github.com/marcoagpinto/aoo-mozilla-en-dict/issues](https://github.com/marcoagpinto/aoo-mozilla-en-dict/issues). 149 | 150 | #### English-US and English-AU 151 | 152 | See [http://wordlist.aspell.net/dicts/](http://wordlist.aspell.net/dicts/). 153 | 154 | Missing word? Raise it at [https://github.com/kevina/wordlist/issues](https://github.com/kevina/wordlist/issues). 155 | -------------------------------------------------------------------------------- /test/cli-interactive.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import proxyquire from "proxyquire"; 3 | import sinon from "sinon"; 4 | import async from 'async'; 5 | require('sinon-as-promised'); 6 | 7 | function getCliInteractive(spellConfig, spellcheck, inquirer, writeCorrections, index) { 8 | return proxyquire('../es5/cli-interactive', 9 | { 10 | 'inquirer': inquirer, 11 | './write-corrections': { default: writeCorrections }, 12 | './spell-config': { default: spellConfig }, 13 | './spellcheck': { default: spellcheck }, 14 | './index': { default: index } 15 | }).default; 16 | } 17 | 18 | function mockSpellConfig(globalWords, fileWords) { 19 | return { 20 | addToGlobalDictionary: sinon.stub(), 21 | addToFileDictionary: sinon.stub(), 22 | writeFile: sinon.stub().callsArg(0) 23 | }; 24 | } 25 | 26 | function mockSpellcheck() { 27 | return { 28 | addWord: sinon.stub(), 29 | checkWord: sinon.stub() 30 | }; 31 | } 32 | 33 | function mockInquirer() { 34 | var inquirer = { 35 | prompt: sinon.stub().resolves() 36 | }; 37 | return inquirer; 38 | } 39 | 40 | function mockWriteCorrections() { 41 | return sinon.stub().callsArg(3); 42 | } 43 | 44 | function mockIndex(mistakes) { 45 | return { 46 | spellCallback(ignore, ignore2, perMistake, endOfFile) { 47 | if (mistakes) { 48 | const next = () => { 49 | if (mistakes.length) { 50 | const wordInfo = {word: mistakes.pop(), index: 0}; 51 | perMistake(wordInfo, next); 52 | } else { 53 | endOfFile(); 54 | } 55 | }; 56 | next(); 57 | } else { 58 | endOfFile(); 59 | } 60 | } 61 | }; 62 | } 63 | 64 | 65 | describe("cli interactive", () => { 66 | 67 | it("should work with no mistakes", () => { 68 | const cliInteractive = getCliInteractive(mockSpellConfig(), mockSpellcheck(), mockInquirer(), mockWriteCorrections(), mockIndex()); 69 | const fileProcessed = sinon.spy(); 70 | cliInteractive("myfile", "", {}, fileProcessed); 71 | 72 | expect(fileProcessed.calledOnce).to.equal(true); 73 | }); 74 | 75 | it("should work with a single ignore", () => { 76 | const inquirer = mockInquirer(); 77 | const spellcheck = mockSpellcheck(); 78 | const cliInteractive = getCliInteractive(mockSpellConfig(), spellcheck, inquirer, mockWriteCorrections(), mockIndex(["mispelt"])); 79 | const fileProcessed = sinon.spy(); 80 | cliInteractive("myfile", "", {}, fileProcessed); 81 | 82 | inquirer.prompt().then(({ action = "ignore" } = {}) => { 83 | expect(fileProcessed.calledOnce).to.equal(true); 84 | expect(spellcheck.addWord.calledOnce).to.equal(true); 85 | }); 86 | }); 87 | 88 | it("correct word with 2 words", () => { 89 | const inquirer = mockInquirer(); 90 | const spellcheck = mockSpellcheck(); 91 | const writeCorrections = mockWriteCorrections(); 92 | const cliInteractive = getCliInteractive(mockSpellConfig(), spellcheck, inquirer, writeCorrections, mockIndex(["twowords"])); 93 | const fileProcessed = sinon.spy(); 94 | cliInteractive("myfile", "", {}, fileProcessed); 95 | 96 | inquirer.prompt().then(({ action = "enter" } = {}) => { 97 | spellcheck.checkWord.onCall(0).returns(true); 98 | spellcheck.checkWord.onCall(1).returns(true); 99 | 100 | inquirer.prompt().then(({ word = "two words" } = {}) => { 101 | expect(spellcheck.checkWord.calledTwice).to.equal(true); 102 | 103 | expect(writeCorrections.calledOnce).to.equal(true); 104 | expect(writeCorrections.firstCall.args[2]).to.deep.equal([{ 105 | "newWord": "two words", 106 | "wordInfo": { 107 | "index": 0, 108 | "word": "twowords" 109 | } 110 | }]); 111 | expect(fileProcessed.calledOnce).to.equal(true); 112 | }); 113 | }); 114 | }); 115 | 116 | it("correct word with incorrect word", () => { 117 | const inquirer = mockInquirer(); 118 | const spellcheck = mockSpellcheck(); 119 | const writeCorrections = mockWriteCorrections(); 120 | const cliInteractive = getCliInteractive(mockSpellConfig(), spellcheck, inquirer, writeCorrections, mockIndex(["incorect"])); 121 | const fileProcessed = sinon.spy(); 122 | cliInteractive("myfile", "", {}, fileProcessed); 123 | 124 | inquirer.prompt().then(({ action = "enter" } = {}) => { 125 | spellcheck.checkWord.onCall(0).returns(false); 126 | inquirer.prompt().then(({ word = "incorret" } = {}) => { 127 | inquirer.prompt().then(({ action = "enter" } = {}) => { 128 | spellcheck.checkWord.onCall(1).returns(true); 129 | inquirer.prompt().then(({ word = "incorret" } = {}) => { 130 | expect(writeCorrections.calledOnce).to.equal(true); 131 | expect(writeCorrections.firstCall.args[2]).to.deep.equal([{ 132 | "newWord": "incorrect", 133 | "wordInfo": { 134 | "index": 0, 135 | "word": "incorect" 136 | } 137 | }]); 138 | expect(fileProcessed.calledOnce).to.equal(true); 139 | }) 140 | }) 141 | }) 142 | }); 143 | }); 144 | 145 | it("correct word with filtered word", () => { 146 | const inquirer = mockInquirer(); 147 | const spellcheck = mockSpellcheck(); 148 | const writeCorrections = mockWriteCorrections(); 149 | const cliInteractive = getCliInteractive(mockSpellConfig(), spellcheck, inquirer, writeCorrections, mockIndex(["incorect"])); 150 | const fileProcessed = sinon.spy(); 151 | cliInteractive("myfile", "", {ignoreAcronyms: true}, fileProcessed); 152 | 153 | inquirer.prompt().then(({ action = "enter" } = {}) => { 154 | spellcheck.checkWord.onCall(0).returns(false); 155 | inquirer.prompt().then(({word = "ABS"} = {}) => { 156 | expect(writeCorrections.calledOnce).to.equal(true); 157 | expect(writeCorrections.firstCall.args[2]).to.deep.equal([{ 158 | "newWord": "ABS", 159 | "wordInfo": { 160 | "index": 0, 161 | "word": "incorect" 162 | } 163 | }]); 164 | expect(fileProcessed.calledOnce).to.equal(true); 165 | }) 166 | }); 167 | }); 168 | // todo more tests 169 | }); 170 | -------------------------------------------------------------------------------- /test/filters.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import filters from "../es5/filters"; 3 | 4 | describe("filters", () => { 5 | it("should remove acronyms", () => { 6 | const filteredList = filters.acronyms([ 7 | { word: "AI", index: 0 }, 8 | { word: "AI's", index:0 }, 9 | { word: "AIs", index:0 }, // controversial, not detected 10 | { word: "COntains", index:0 }, 11 | { word: "contaiNS", index:0 }, 12 | { word: "A1", index:0 }, 13 | ]); 14 | 15 | expect(filteredList).to.deep.equal([ { word: "AIs", index:0 }, { word: "COntains", index:0 }, 16 | { word: "contaiNS", index:0 }]); 17 | }); 18 | 19 | it("should remove numbers", () => { 20 | const filteredList = filters.numbers([ 21 | { word: "1", index: 0 }, 22 | { word: "123", index:0 }, 23 | { word: "12,34", index:0 }, 24 | { word: "12,34.00", index:0 }, 25 | { word: "12.3", index:0 }, 26 | { word: "A1", index:0 }, 27 | { word: "1A", index:0 } 28 | ]); 29 | 30 | expect(filteredList).to.deep.equal([ { word: "A1", index:0 }, { word: "1A", index:0 }]); 31 | }); 32 | }); -------------------------------------------------------------------------------- /test/fixture/.gitattributes: -------------------------------------------------------------------------------- 1 | *.md eol=lf 2 | -------------------------------------------------------------------------------- /test/fixture/test.md: -------------------------------------------------------------------------------- 1 | > File 2 | 3 | This paragraph contains no spelling mistakes. 4 | 5 | Infact, there will just be one. 6 | 7 | ``` 8 | var x = jkdhfdgs; 9 | ``` -------------------------------------------------------------------------------- /test/fixture/test2.md: -------------------------------------------------------------------------------- 1 | > File 2 | 3 | This paragraph contains no spelling mistakes. 4 | 5 | Infact, there will just be one. 6 | 7 | ~~~ 8 | var x = jkdhfdgs; 9 | ~~~ 10 | -------------------------------------------------------------------------------- /test/index.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import markdownSpellcheck from "../es5/index"; 3 | import path from 'path'; 4 | 5 | describe("package", () => { 6 | it("test1", () => { 7 | const spellingInfo = markdownSpellcheck.spellFile(path.join(__dirname, 'fixture/test.md')); 8 | 9 | expect(spellingInfo.errors).to.deep.equal([ { word: 'Infact', index: 55 }]); 10 | }); 11 | 12 | it("test2", () => { 13 | const spellingInfo = markdownSpellcheck.spellFile(path.join(__dirname, 'fixture/test2.md')); 14 | 15 | expect(spellingInfo.errors).to.deep.equal([ { word: 'Infact', index: 55 }]); 16 | }); 17 | }); 18 | -------------------------------------------------------------------------------- /test/markdown-parser.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import markdownParser from "../es5/markdown-parser"; 3 | 4 | describe("basic markdown parsing", () => { 5 | it("should be able to parse text", () => { 6 | const tokens = markdownParser(` 7 | hey 8 | Paragraph`); 9 | 10 | expect(tokens).to.deep.equal([ 11 | {text: 'hey', index: 1}, 12 | {text: 'Paragraph', index: 5}]); 13 | }); 14 | 15 | it("should be able to parse headings", () => { 16 | const tokens = markdownParser(` 17 | hey 18 | === 19 | Heading 20 | ------- 21 | `); 22 | 23 | expect(tokens).to.deep.equal([ 24 | {text: 'hey', index: 1}, 25 | {text: 'Heading', index: 9}]); 26 | }); 27 | 28 | it("should be able to parse lists", () => { 29 | const tokens = markdownParser(` 30 | * List item 31 | text 32 | - list1 33 | - list2 34 | `); 35 | 36 | expect(tokens).to.deep.equal([ 37 | {text: 'List', index: 4}, 38 | {text: 'item', index: 9}, 39 | {text: 'text', index: 14}, 40 | {text: 'list1', index: 22}, 41 | {text: 'list2', index: 33}]); 42 | }); 43 | 44 | it("should be able to parse underlined text", () => { 45 | const tokens = markdownParser(` 46 | _underlined text_ 47 | `); 48 | 49 | expect(tokens).to.deep.equal([ 50 | {text: 'underlined', index: 2}, 51 | {text: 'text', index: 13}]); 52 | }); 53 | 54 | it("should be able to parse links", () => { 55 | const tokens = markdownParser(` 56 | [De Link!](http://link.com/ha) 57 | `); 58 | 59 | expect(tokens).to.deep.equal([ 60 | {text: 'De', index: 2}, 61 | {text: 'Link', index: 5}, 62 | {text: '!', index: 9}]); 63 | }); 64 | 65 | it("should be able to ignore code blocks", () => { 66 | const tokens = markdownParser(` 67 | \`\`\` 68 | var code = 3; 69 | \`\`\` 70 | `); 71 | 72 | expect(tokens).to.deep.equal([]); 73 | }); 74 | 75 | it("should be able to ignore inline code blocks", () => { 76 | const tokens = markdownParser(` 77 | This is a \`var\` inline. 78 | `); 79 | 80 | expect(tokens).to.deep.equal([ 81 | {text: 'This', index: 1}, 82 | {text: 'is', index: 6}, 83 | {text: 'a', index: 9}, 84 | {text: 'inline.', index: 17}]); 85 | }); 86 | 87 | it("should be able to ignore jekyll front matter", () => { 88 | const tokens = markdownParser(` 89 | --- 90 | title: Post title 91 | --- 92 | Hello 93 | `); 94 | 95 | expect(tokens).to.deep.equal([ 96 | {text: 'Hello', index: 27} 97 | ]); 98 | }); 99 | 100 | it("doesn't ignore text between two horizontal rules at the beginning of the content", () => { 101 | const tokens = markdownParser(` 102 | --- 103 | Apple 104 | --- 105 | Banana 106 | `); 107 | 108 | expect(tokens).to.deep.equal([ 109 | {text: 'Apple', index: 5}, 110 | {text: 'Banana', index: 15} 111 | ]); 112 | }); 113 | 114 | it("doesn't ignore text between two horizontal rules in the middle of the content", () => { 115 | const tokens = markdownParser(` 116 | Apple 117 | --- 118 | Banana 119 | --- 120 | Orange 121 | `); 122 | 123 | expect(tokens).to.deep.equal([ 124 | {text: 'Apple', index: 1}, 125 | {text: 'Banana', index: 11}, 126 | {text: 'Orange', index: 22} 127 | ]); 128 | }); 129 | 130 | it("doesn't ignore text between jekyll front matter and a horizontal rule in the content", () => { 131 | const tokens = markdownParser(` 132 | --- 133 | author: test 134 | --- 135 | This should be spell checked 136 | --- 137 | `); 138 | expect(tokens).to.deep.equal([ 139 | { text: 'This', index: 22 }, 140 | { text: 'should', index: 27 }, 141 | { text: 'be', index: 34 }, 142 | { text: 'spell', index: 37 }, 143 | { text: 'checked', index: 43 }]); 144 | }); 145 | 146 | it("should be able to cope with double back-tick", () => { 147 | const tokens = markdownParser(` 148 | This is a \`\`var\` with backtick\`\` inline. 149 | `); 150 | 151 | expect(tokens).to.deep.equal([ 152 | {text: 'This', index: 1}, 153 | {text: 'is', index: 6}, 154 | {text: 'a', index: 9}, 155 | {text: 'inline.', index: 34}]); 156 | }); 157 | 158 | it("should be able to ignore html tags", () => { 159 | const tokens = markdownParser(` 160 |

H1.

161 |

pinner

162 | `); 163 | 164 | expect(tokens).to.deep.equal([ 165 | {text: 'H1.', index: 5}, 166 | {text: 'p', index: 17}, 167 | {text: 'inner', index: 22}]); 168 | }); 169 | 170 | it("doesn't confuse repeating words", () => { 171 | const tokens = markdownParser("code code"); 172 | 173 | expect(tokens).to.deep.equal([ 174 | {text: 'code', index: 0}, 175 | {text: 'code', index: 5}]); 176 | }); 177 | 178 | it("copes with html entities", () => { 179 | const tokens = markdownParser(""code""); 180 | 181 | expect(tokens).to.deep.equal([ 182 | {text: 'code', index: 6}]); 183 | }); 184 | 185 | it("copes with html entities with the same code as later text", () => { 186 | const tokens = markdownParser(""quot"); 187 | 188 | expect(tokens).to.deep.equal([ 189 | {text: 'quot', index: 6}]); 190 | }); 191 | 192 | it("copes with quotes followed by text matching the entity name", () => { 193 | const tokens = markdownParser("\"quot"); 194 | 195 | expect(tokens).to.deep.equal([ 196 | {text: 'quot', index: 1}]); 197 | }); 198 | 199 | it("copes with quote marks", () => { 200 | const tokens = markdownParser('"code"'); 201 | 202 | expect(tokens).to.deep.equal([ 203 | {text: 'code', index: 1}]); 204 | }); 205 | 206 | it("doesn't confuse tags", () => { 207 | const tokens = markdownParser("codecode"); 208 | 209 | expect(tokens).to.deep.equal([ 210 | {text: 'code', index: 6}, 211 | {text: 'code', index: 17}]); 212 | }); 213 | 214 | it("doesn't confuse codeblocks", () => { 215 | const tokens = markdownParser(` 216 | \`\`\` 217 | code 218 | \`\`\` 219 | code 220 | `); 221 | 222 | expect(tokens).to.deep.equal([ 223 | {text: 'code', index: 14}]); 224 | }); 225 | 226 | it("doesn't confuse inline code", () => { 227 | const tokens = markdownParser("`code` code"); 228 | 229 | expect(tokens).to.deep.equal([ 230 | {text: 'code', index: 7}]); 231 | }); 232 | 233 | it("doesn't confuse tags", () => { 234 | const tokens = markdownParser("codecode"); 235 | 236 | expect(tokens).to.deep.equal([ 237 | {text: 'code', index: 6}, 238 | {text: 'code', index: 17}]); 239 | }); 240 | 241 | it("handles code blocks that are spaced", () => { 242 | const tokens = markdownParser(` 243 | $('#upload-form').transloadit({ 244 | `); 245 | 246 | expect(tokens).to.deep.equal([]); 247 | }); 248 | }); 249 | -------------------------------------------------------------------------------- /test/multi-file-processor.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import proxyquire from "proxyquire"; 3 | import sinon from "sinon"; 4 | import async from 'async'; 5 | 6 | function getMultiFileProcessor(globby, spellConfig, spellcheck) { 7 | return proxyquire('../es5/multi-file-processor', 8 | { 9 | 'globby': globby, 10 | './spell-config': { default: spellConfig }, 11 | './spellcheck': { default: spellcheck }, 12 | 'fs': { 13 | readFile: sinon.stub().callsArg(2) 14 | } 15 | }).default; 16 | } 17 | 18 | function mockGlobby(files) { 19 | return function(patterns) { 20 | return { 21 | then: function(cb) { 22 | cb(files); 23 | return this; 24 | }, 25 | catch: function() { 26 | return this; 27 | } 28 | }; 29 | }; 30 | } 31 | 32 | function mockSpellConfig(globalWords, fileWords) { 33 | var mockedSpellConfig = { 34 | initialise: sinon.stub(), 35 | getGlobalWords: sinon.stub().returns(globalWords || []), 36 | getFileWords: sinon.stub() 37 | }; 38 | 39 | if (fileWords) { 40 | fileWords.forEach((fileWord, index) => { 41 | mockedSpellConfig.getFileWords 42 | .onCall(index) 43 | .returns(fileWord); 44 | }); 45 | } else { 46 | mockedSpellConfig.getFileWords.returns([]); 47 | } 48 | 49 | mockedSpellConfig.initialise.callsArg(1); 50 | 51 | return mockedSpellConfig; 52 | } 53 | 54 | function mockSpellcheck() { 55 | return { 56 | addWord: sinon.stub(), 57 | resetTemporaryCustomDictionary: sinon.stub() 58 | }; 59 | } 60 | 61 | 62 | describe("multi-file-processor", () => { 63 | 64 | beforeEach(() => { 65 | sinon.stub(async, "setImmediate").callsArg(0); 66 | sinon.stub(async, "nextTick").callsArg(0); 67 | }); 68 | afterEach(() => { 69 | async.setImmediate.restore(); 70 | async.nextTick.restore(); 71 | }); 72 | 73 | it("should work with empty patterns", () => { 74 | const spellConfig = mockSpellConfig(); 75 | const multiFileProcessor = getMultiFileProcessor(mockGlobby([]), spellConfig, mockSpellcheck()); 76 | const fileCallSpy = sinon.stub(); 77 | fileCallSpy.callsArg(1); 78 | const finishedSpy = sinon.spy(); 79 | 80 | multiFileProcessor([], {}, fileCallSpy, finishedSpy); 81 | 82 | expect(fileCallSpy.notCalled).to.equal(true); 83 | expect(finishedSpy.calledOnce).to.equal(true); 84 | expect(spellConfig.initialise.calledOnce).to.equal(true); 85 | }); 86 | 87 | it("should work with multiple patterns", () => { 88 | 89 | const spellConfig = mockSpellConfig(["global-word"], [["word-1"],["word-2-a", "word-2-b"],[],["word-4"]]); 90 | const spellcheck = mockSpellcheck(); 91 | const multiFileProcessor = getMultiFileProcessor(mockGlobby(["1", "2", "3", "4"]), spellConfig, spellcheck); 92 | const fileCallSpy = sinon.stub(); 93 | fileCallSpy.callsArg(2); 94 | const finishedSpy = sinon.spy(); 95 | 96 | multiFileProcessor(["a", "b"], {}, fileCallSpy, finishedSpy); 97 | 98 | expect(fileCallSpy.callCount).to.equal(4); 99 | expect(fileCallSpy.getCall(0).args[0]).to.equal("1"); 100 | expect(fileCallSpy.getCall(1).args[0]).to.equal("2"); 101 | expect(fileCallSpy.getCall(2).args[0]).to.equal("3"); 102 | expect(fileCallSpy.getCall(3).args[0]).to.equal("4"); 103 | expect(finishedSpy.calledOnce).to.equal(true); 104 | expect(spellConfig.initialise.calledOnce).to.equal(true); 105 | 106 | expect(spellcheck.addWord.callCount).to.equal(5); 107 | expect(spellcheck.resetTemporaryCustomDictionary.callCount).to.equal(4); 108 | }); 109 | }); -------------------------------------------------------------------------------- /test/relative-file-processor.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import proxyquire from "proxyquire"; 3 | import sinon from "sinon"; 4 | import async from 'async'; 5 | 6 | function getRelativeFileProcessor(globby, spellConfig, spellcheck) { 7 | return proxyquire('../es5/relative-file-processor', 8 | { 9 | 'globby': globby, 10 | './spell-config': { default: spellConfig }, 11 | './spellcheck': { default: spellcheck }, 12 | 'fs': { 13 | readFile: sinon.stub().callsArg(2) 14 | } 15 | }).default; 16 | } 17 | 18 | function mockGlobby(files) { 19 | return function(patterns) { 20 | return { 21 | then: function(cb) { 22 | cb(files); 23 | return this; 24 | }, 25 | catch: function() { 26 | return this; 27 | } 28 | }; 29 | }; 30 | } 31 | 32 | function mockSpellConfig(globalWords, fileWords) { 33 | var mockedSpellConfig = { 34 | initialise: sinon.stub(), 35 | getGlobalWords: sinon.stub().returns(globalWords || []), 36 | getFileWords: sinon.stub() 37 | }; 38 | 39 | if (fileWords) { 40 | fileWords.forEach((fileWord, index) => { 41 | mockedSpellConfig.getFileWords 42 | .onCall(index) 43 | .returns(fileWord); 44 | }); 45 | } else { 46 | mockedSpellConfig.getFileWords.returns([]); 47 | } 48 | 49 | mockedSpellConfig.initialise.callsArg(1); 50 | 51 | return mockedSpellConfig; 52 | } 53 | 54 | function mockSpellcheck() { 55 | return { 56 | addWord: sinon.stub(), 57 | resetTemporaryCustomDictionary: sinon.stub(), 58 | resetDictionary: sinon.stub() 59 | }; 60 | } 61 | 62 | 63 | describe("relative-file-processor", () => { 64 | 65 | beforeEach(() => { 66 | sinon.stub(async, "setImmediate").callsArg(0); 67 | sinon.stub(async, "nextTick").callsArg(0); 68 | }); 69 | afterEach(() => { 70 | async.setImmediate.restore(); 71 | async.nextTick.restore(); 72 | }); 73 | 74 | it("should work with empty patterns", () => { 75 | const spellConfig = mockSpellConfig(); 76 | const relativeFileProcessor = getRelativeFileProcessor(mockGlobby([]), spellConfig, mockSpellcheck()); 77 | const fileCallSpy = sinon.stub(); 78 | fileCallSpy.callsArg(1); 79 | const finishedSpy = sinon.spy(); 80 | 81 | relativeFileProcessor([], {}, fileCallSpy, finishedSpy); 82 | 83 | expect(fileCallSpy.notCalled).to.equal(true); 84 | expect(finishedSpy.calledOnce).to.equal(true); 85 | expect(spellConfig.initialise.calledOnce).to.equal(false); 86 | }); 87 | 88 | it("should work with single pattern", () => { 89 | const spellConfig = mockSpellConfig(); 90 | const relativeFileProcessor = getRelativeFileProcessor(mockGlobby(["1"]), spellConfig, mockSpellcheck()); 91 | const fileCallSpy = sinon.stub(); 92 | fileCallSpy.callsArg(2); 93 | const finishedSpy = sinon.spy(); 94 | 95 | relativeFileProcessor(["1"], {}, fileCallSpy, finishedSpy); 96 | 97 | expect(fileCallSpy.notCalled).to.equal(false); 98 | expect(finishedSpy.calledOnce).to.equal(true); 99 | expect(spellConfig.initialise.calledOnce).to.equal(true); 100 | }); 101 | 102 | 103 | it("should work with multiple patterns", () => { 104 | 105 | const spellConfig = mockSpellConfig(["global-word"], [["word-1"], ["word-2-a", "word-2-b"], [], ["word-4"]]); 106 | const spellcheck = mockSpellcheck(); 107 | const relativeFileProcessor = getRelativeFileProcessor(mockGlobby(["1", "2", "3", "4"]), spellConfig, spellcheck); 108 | const fileCallSpy = sinon.stub(); 109 | fileCallSpy.callsArg(2); 110 | const finishedSpy = sinon.spy(); 111 | 112 | relativeFileProcessor(["1", "2"], {}, fileCallSpy, finishedSpy); 113 | 114 | expect(fileCallSpy.callCount).to.equal(4); 115 | expect(fileCallSpy.getCall(0).args[0]).to.equal("1"); 116 | expect(fileCallSpy.getCall(1).args[0]).to.equal("2"); 117 | expect(fileCallSpy.getCall(2).args[0]).to.equal("3"); 118 | expect(fileCallSpy.getCall(3).args[0]).to.equal("4"); 119 | expect(finishedSpy.calledOnce).to.equal(true); 120 | expect(spellConfig.initialise.called).to.equal(true); 121 | 122 | //1 global word for each file, then 1 word for file 1, 2 words for file 2 and 1 word for file 4. 123 | expect(spellcheck.addWord.callCount).to.equal(8); 124 | expect(spellcheck.resetTemporaryCustomDictionary.callCount).to.equal(4); 125 | }); 126 | }); -------------------------------------------------------------------------------- /test/spell-config.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import sinon from "sinon"; 3 | import proxyquire from "proxyquire"; 4 | 5 | function getSpellConfig() { 6 | return proxyquire('../es5/spell-config', { 7 | 'fs': { 8 | readFile: sinon.stub().callsArgWith(2, null, ""), 9 | writeFile: sinon.stub().callsArgWith(2, null) 10 | } 11 | }).default; 12 | } 13 | 14 | describe("Spell-Config", () => { 15 | 16 | it("should initialise correctly and call done", () => { 17 | const spellConfig = getSpellConfig(); 18 | const initDone = sinon.stub(); 19 | spellConfig.initialise("./.spelling", initDone); 20 | expect(initDone.calledOnce).to.equal(true); 21 | }); 22 | 23 | it("should add global words into array", () => { 24 | const spellConfig = getSpellConfig(); 25 | const initDone = sinon.stub(); 26 | spellConfig.initialise("./.spelling", initDone); 27 | spellConfig.addToGlobalDictionary("aaaaa"); 28 | expect(spellConfig.getGlobalWords().length).to.equal(1); 29 | expect(spellConfig.getGlobalWords()[0]).to.equal("aaaaa"); 30 | expect(initDone.calledOnce).to.equal(true); 31 | }); 32 | 33 | it("should add global words from relative or shared into array", () => { 34 | const spellConfig = getSpellConfig(); 35 | const initDone = sinon.stub(); 36 | spellConfig.initialise("/relative/.spelling", initDone); 37 | spellConfig.addToGlobalDictionary("aaaaa", false); 38 | spellConfig.addToGlobalDictionary("bbbbb", true); 39 | expect(spellConfig.getGlobalWords().length).to.equal(2); 40 | expect(spellConfig.getGlobalWords()[1]).to.equal("bbbbb"); 41 | expect(initDone.calledOnce).to.equal(true); 42 | }); 43 | 44 | it("should add file words into array", () => { 45 | const FILE = "/relative/blog.md"; 46 | const initDone = sinon.stub(); 47 | const spellConfig = getSpellConfig(); 48 | spellConfig.initialise("./.spelling", initDone); 49 | spellConfig.addToFileDictionary(FILE, "aaaaa", false); 50 | expect(spellConfig.getFileWords(FILE).length).to.equal(1); 51 | expect(initDone.calledOnce).to.equal(true); 52 | }); 53 | 54 | it("should add file words from relative or shared into array", () => { 55 | const FILE = "/relative/blog.md"; 56 | const initDone = sinon.stub(); 57 | const spellConfig = getSpellConfig(); 58 | spellConfig.initialise("/relative/.spelling", initDone); 59 | spellConfig.addToFileDictionary(FILE, "aaaaa", false); 60 | spellConfig.addToFileDictionary(FILE, "bbbbb", true); 61 | expect(spellConfig.getFileWords(FILE).length).to.equal(2); 62 | expect(initDone.calledOnce).to.equal(true); 63 | }); 64 | 65 | it("should call done after writeFile when spelling file is dirty or clean", () => { 66 | const spellConfig = getSpellConfig(); 67 | const initDone = sinon.stub(); 68 | spellConfig.initialise("./.spelling", initDone); 69 | expect(initDone.calledOnce).to.equal(true); 70 | 71 | const writeCleanFileDone = sinon.stub(); 72 | spellConfig.writeFile(writeCleanFileDone); 73 | expect(writeCleanFileDone.calledOnce).to.equal(true); 74 | 75 | const writeDirtyFileDone = sinon.stub(); 76 | spellConfig.addToGlobalDictionary("aaaaa", false); 77 | spellConfig.writeFile(writeDirtyFileDone); 78 | expect(writeDirtyFileDone.calledOnce).to.equal(true); 79 | }); 80 | 81 | }); -------------------------------------------------------------------------------- /test/spellcheck.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import spellcheck from "../es5/spellcheck"; 3 | 4 | describe("spell checker", () => { 5 | it("should detect bad spelling", () => { 6 | const badWords = spellcheck.checkWords([{ word: "notreal", index: 0 }]); 7 | 8 | expect(badWords).to.deep.equal([ { word: 'notreal', index: 0 }]); 9 | }); 10 | 11 | it("should detect good spelling", () => { 12 | const badWords = spellcheck.checkWords([{ word: "This", index: 0 }, { word: "sentence", index: 5 }]); 13 | 14 | expect(badWords).to.deep.equal([]); 15 | }); 16 | 17 | it("should allow words needing '.'", () => { 18 | const badWords = spellcheck.checkWords([{ word: "etc", index: 0 }]); 19 | 20 | expect(badWords).to.deep.equal([]); 21 | }); 22 | 23 | it("should allow words dashed", () => { 24 | const badWords = spellcheck.checkWords([{ word: "real-world", index: 0 }]); 25 | 26 | expect(badWords).to.deep.equal([]); 27 | }); 28 | 29 | it("should allow plural on anything", () => { 30 | const badWords = spellcheck.checkWords([{ word: "safety's", index: 0 }]); 31 | 32 | expect(badWords).to.deep.equal([]); 33 | }); 34 | 35 | it("should allow plural with utf apos on anything", () => { 36 | const badWords = spellcheck.checkWords([{ word: "safety’s", index: 0 }]); 37 | 38 | expect(badWords).to.deep.equal([]); 39 | }); 40 | 41 | it("should utf apos when adding words", () => { 42 | spellcheck.addWord("badwordspelling’s"); 43 | const badWords = spellcheck.checkWords([{ word: "badwordspelling’s", index: 0 }]); 44 | 45 | expect(badWords).to.deep.equal([]); 46 | }); 47 | 48 | }); 49 | -------------------------------------------------------------------------------- /test/tracking-replacement.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import trackingReplacement from "../es5/tracking-replacement"; 3 | 4 | describe("tracking replacement", () => { 5 | it("tracks a single replace all", () => { 6 | const replacer = trackingReplacement("s abc e"); 7 | var replaced = replacer.removeAll(/\sabc\s/); 8 | expect(replaced).to.equal("se"); 9 | expect(replacer.getOriginalIndex(0)).to.equal(0); // s 10 | expect(replacer.getOriginalIndex(1)).to.equal(6); // e 11 | }); 12 | 13 | it("tracks a single replace all - 2 chars", () => { 14 | const replacer = trackingReplacement("sa abc ea"); 15 | var replaced = replacer.removeAll(/\sabc\s/); 16 | expect(replaced).to.equal("saea"); 17 | expect(replacer.getOriginalIndex(0)).to.equal(0); 18 | expect(replacer.getOriginalIndex(1)).to.equal(1); 19 | expect(replacer.getOriginalIndex(2)).to.equal(7); 20 | expect(replacer.getOriginalIndex(3)).to.equal(8); 21 | }); 22 | 23 | it("tracks a single replace all with multiple replacements", () => { 24 | const replacer = trackingReplacement("s abc e abc d"); 25 | var replaced = replacer.removeAll(/\sabc\s/); 26 | expect(replaced).to.equal("sed"); 27 | expect(replacer.getOriginalIndex(0)).to.equal(0); // s 28 | expect(replacer.getOriginalIndex(1)).to.equal(6); // e 29 | expect(replacer.getOriginalIndex(2)).to.equal(12); // d 30 | }); 31 | 32 | it("tracks a multi- replace all with multiple replacements", () => { 33 | const replacer = trackingReplacement("_s_ abc _e_ abc _d_"); 34 | replacer.removeAll(/\sabc\s/); 35 | var replaced = replacer.removeAll(/_/); 36 | expect(replaced).to.equal("sed"); 37 | expect(replacer.getOriginalIndex(0)).to.equal(1); // s 38 | expect(replacer.getOriginalIndex(1)).to.equal(9); // e 39 | expect(replacer.getOriginalIndex(2)).to.equal(17); // d 40 | }); 41 | 42 | it("tracks a multi- replace all which removes already removed", () => { 43 | const replacer = trackingReplacement("_b_a_c_"); 44 | replacer.removeAll(/a/); 45 | var replaced = replacer.removeAll(/_/); 46 | expect(replaced).to.equal("bc"); 47 | expect(replacer.getOriginalIndex(0)).to.equal(1); 48 | expect(replacer.getOriginalIndex(1)).to.equal(5); 49 | }); 50 | 51 | it("tracks a multi- replace all which removes already removed entirely", () => { 52 | const replacer = trackingReplacement("_b_a_c_"); 53 | replacer.removeAll(/a/); 54 | var replaced = replacer.removeAll(/__/); 55 | expect(replaced).to.equal("_bc_"); 56 | expect(replacer.getOriginalIndex(0)).to.equal(0); 57 | expect(replacer.getOriginalIndex(1)).to.equal(1); 58 | expect(replacer.getOriginalIndex(2)).to.equal(5); 59 | expect(replacer.getOriginalIndex(3)).to.equal(6); 60 | }); 61 | 62 | it("tracks a single replaceAll with string not regex", () => { 63 | const frontMatter = "---author:tester---"; 64 | const replacer = trackingReplacement(`${frontMatter} content`); 65 | const replaced = replacer.removeAll(frontMatter); 66 | expect(replaced).to.equal(" content"); 67 | expect(replacer.getOriginalIndex(2)).to.equal(21); 68 | expect(replacer.getOriginalIndex(3)).to.equal(22); 69 | }); 70 | 71 | it("tracks a single replaceAll when target string contains regex", () => { 72 | const frontMatter = ` 73 | --- 74 | author: tester 75 | summary: "In my last article (on line annotation components for D3 charts)" 76 | ---`; 77 | const replacer = trackingReplacement(`${frontMatter} content`); 78 | const replaced = replacer.removeAll(frontMatter); 79 | expect(replaced).to.equal(" content"); 80 | expect(replacer.getOriginalIndex(2)).to.equal(117); 81 | }); 82 | 83 | }); -------------------------------------------------------------------------------- /test/word-parser.js: -------------------------------------------------------------------------------- 1 | import { expect } from 'chai'; 2 | import wordParser from "../es5/word-parser"; 3 | 4 | describe("word parser", () => { 5 | it("should be able to find a word", () => { 6 | const words = wordParser([{ text: "word", index: 0 }]); 7 | 8 | expect(words).to.deep.equal([ { word: 'word', index: 0 }]); 9 | }); 10 | 11 | it("should be able to find a cyrillic word", () => { 12 | const words = wordParser([{ text: "монгол", index: 0 }]); 13 | 14 | expect(words).to.deep.equal([ { word: 'монгол', index: 0 }]); 15 | }); 16 | 17 | it("should be able to find multiple words", () => { 18 | const words = wordParser([{ text: "a word", index: 0 }]); 19 | 20 | expect(words).to.deep.equal([ { word: 'a', index: 0 }, { word: 'word', index: 2 }]); 21 | }); 22 | 23 | it("should be able to find multiple words from mixed string with latin & cyrillic", () => { 24 | const words = wordParser([{ text: "Mongolia монгол", index: 0 }]); 25 | 26 | expect(words).to.deep.equal([ { word: 'Mongolia', index: 0 }, { word: 'монгол', index: 9 }]); 27 | }); 28 | 29 | it("should ignore punctuation", () => { 30 | const words = wordParser([{ text: "! yeah. but,far", index: 0 }]); 31 | 32 | expect(words).to.deep.equal([ { word: 'yeah', index: 2 }, { word: 'but', index: 8 }, { word: 'far', index: 12 }]); 33 | }); 34 | 35 | it("should include 's", () => { 36 | const words = wordParser([{ text: "Luke's James'", index: 0 }]); 37 | 38 | expect(words).to.deep.equal([ { word: "Luke's", index: 0 }, { word: 'James', index: 7 }]); 39 | }); 40 | 41 | it("should include #", () => { 42 | const words = wordParser([{ text: "##3 C#5s", index: 0 }]); 43 | 44 | expect(words).to.deep.equal([ { word: "3", index: 2 }, { word: "C#5s", index: 4 }]); 45 | }); 46 | 47 | it("should not include # at start", () => { 48 | const words = wordParser([{ text: "$('#word", index: 0 }]); 49 | 50 | expect(words).to.deep.equal([ { word: "word", index: 4 }]); 51 | }); 52 | 53 | it("should include accented characters", () => { 54 | const words = wordParser([{ text: "\u00c2lph\u00c2 gr\u00ffb", index: 0 }]); 55 | 56 | expect(words).to.deep.equal([ { word: "\u00c2lph\u00c2", index: 0 }, { word: "gr\u00ffb", index: 6 }]); 57 | }); 58 | 59 | it("should include utf characters", () => { 60 | const words = wordParser([{ text: "Ocakbaşı Balıkçısı", index: 0 }]); 61 | 62 | expect(words).to.deep.equal([ { word: "Ocakbaşı", index: 0 }, { word: "Balıkçısı", index: 9 }]); 63 | }); 64 | 65 | it("should include full stops sometimes", () => { 66 | const words = wordParser([{ text: "e.t.c. end. Node.JS", index: 0 }]); 67 | 68 | expect(words).to.deep.equal([ { word: "e.t.c.", index: 0 }, { word: "end", index: 7 }, { word: "Node.JS", index: 12 }]); 69 | }); 70 | 71 | it("should include dashed in the middle", () => { 72 | const words = wordParser([{ text: "full-stop -end", index: 0 }]); 73 | 74 | expect(words).to.deep.equal([ { word: "full-stop", index: 0 }, { word: "end", index: 11 }]); 75 | }); 76 | 77 | }); 78 | --------------------------------------------------------------------------------