├── .npmrc ├── __tests__ ├── fixture │ ├── simple-comment.scss │ ├── unquoted-import.scss │ ├── double-quoted-import.scss │ ├── interpolant │ │ ├── number.scss │ │ ├── quoted-string.scss │ │ ├── unquoted-string.scss │ │ └── decimal.scss │ ├── single-quoted-import.scss │ ├── unquoted-import-path.scss │ ├── comment-with-trailing-space.scss │ ├── double-quoted-import-path.scss │ ├── multiline-comment.scss │ ├── single-quoted-import-path.scss │ └── docblock-comment.scss ├── interpolant.js ├── comment.js ├── sass-spec.js ├── import.js └── __snapshots__ │ ├── interpolant.js.snap │ ├── comment.js.snap │ └── import.js.snap ├── index.js ├── CHANGELOG.md ├── .babelrc ├── .travis.yml ├── .gitignore ├── src ├── entry.js ├── input.js ├── previous-map.js ├── tokenize-string.js ├── tokenize-comment.js ├── tokenize-interpolant.js └── tokenize.js ├── LICENSE ├── package.json └── README.md /.npmrc: -------------------------------------------------------------------------------- 1 | package-lock = true 2 | -------------------------------------------------------------------------------- /__tests__/fixture/simple-comment.scss: -------------------------------------------------------------------------------- 1 | /* my comment */ 2 | -------------------------------------------------------------------------------- /__tests__/fixture/unquoted-import.scss: -------------------------------------------------------------------------------- 1 | @import foo; 2 | -------------------------------------------------------------------------------- /__tests__/fixture/double-quoted-import.scss: -------------------------------------------------------------------------------- 1 | @import "foo"; 2 | -------------------------------------------------------------------------------- /__tests__/fixture/interpolant/number.scss: -------------------------------------------------------------------------------- 1 | #{1} 2 | #{123} 3 | -------------------------------------------------------------------------------- /__tests__/fixture/single-quoted-import.scss: -------------------------------------------------------------------------------- 1 | @import 'foo'; 2 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | module.exports = require('./lib/entry').default; 2 | -------------------------------------------------------------------------------- /__tests__/fixture/unquoted-import-path.scss: -------------------------------------------------------------------------------- 1 | @import foo/bar; 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | See https://github.com/sasstools/scss-tokenizer/releases 2 | -------------------------------------------------------------------------------- /__tests__/fixture/comment-with-trailing-space.scss: -------------------------------------------------------------------------------- 1 | /*my comment*/ 2 | -------------------------------------------------------------------------------- /__tests__/fixture/double-quoted-import-path.scss: -------------------------------------------------------------------------------- 1 | @import "foo/bar"; 2 | -------------------------------------------------------------------------------- /__tests__/fixture/interpolant/quoted-string.scss: -------------------------------------------------------------------------------- 1 | #{"hello world!"} 2 | -------------------------------------------------------------------------------- /__tests__/fixture/interpolant/unquoted-string.scss: -------------------------------------------------------------------------------- 1 | #{hello world} 2 | -------------------------------------------------------------------------------- /__tests__/fixture/multiline-comment.scss: -------------------------------------------------------------------------------- 1 | /* 2 | my comment 3 | */ 4 | -------------------------------------------------------------------------------- /__tests__/fixture/single-quoted-import-path.scss: -------------------------------------------------------------------------------- 1 | @import 'foo/bar'; 2 | -------------------------------------------------------------------------------- /__tests__/fixture/interpolant/decimal.scss: -------------------------------------------------------------------------------- 1 | #{1.0} 2 | #{10.00} 3 | #{.100} 4 | -------------------------------------------------------------------------------- /.babelrc: -------------------------------------------------------------------------------- 1 | { 2 | "presets": [ 3 | ["env", {"node": true}] 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /__tests__/fixture/docblock-comment.scss: -------------------------------------------------------------------------------- 1 | /** 2 | * line 1 3 | * 4 | * line 2 5 | */ 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | branches: [master] 3 | node_js: 4 | - 6 5 | - 7 6 | - 8 7 | - 9 8 | - 10 9 | - 11 10 | script: 11 | - npm run build 12 | - npm run test 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Dependency directory 2 | # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git 3 | node_modules 4 | 5 | # Built files 6 | lib 7 | 8 | # Metadata 9 | .DS_Store 10 | -------------------------------------------------------------------------------- /src/entry.js: -------------------------------------------------------------------------------- 1 | import Input from './input'; 2 | import tokenize from './tokenize'; 3 | 4 | let scss = {}; 5 | scss.tokenize = function(css) { 6 | let input = new Input(css); 7 | return tokenize(input); 8 | }; 9 | 10 | export default scss; 11 | -------------------------------------------------------------------------------- /__tests__/interpolant.js: -------------------------------------------------------------------------------- 1 | var scss = require('..'); 2 | var fs = require('fs'); 3 | var path = require('path'); 4 | 5 | var fixture = function(name) { 6 | return fs.readFileSync( 7 | path.join(__dirname, 'fixture', name) 8 | ); 9 | } 10 | 11 | describe('interpolant', function() { 12 | it('should tokenize interpolant decimal', function() { 13 | const tree = scss.tokenize(fixture('interpolant/decimal.scss')); 14 | expect(tree).toMatchSnapshot(); 15 | }); 16 | 17 | it('should tokenize interpolant number', function() { 18 | const tree = scss.tokenize(fixture('interpolant/number.scss')); 19 | expect(tree).toMatchSnapshot(); 20 | }); 21 | 22 | it('should tokenize interpolant quoted-string', function() { 23 | const tree = scss.tokenize(fixture('interpolant/quoted-string.scss')); 24 | expect(tree).toMatchSnapshot(); 25 | }); 26 | 27 | it('should tokenize interpolant unquoted-string', function() { 28 | const tree = scss.tokenize(fixture('interpolant/unquoted-string.scss')); 29 | expect(tree).toMatchSnapshot(); 30 | }); 31 | }); 32 | -------------------------------------------------------------------------------- /__tests__/comment.js: -------------------------------------------------------------------------------- 1 | var scss = require('..'); 2 | var fs = require('fs'); 3 | var path = require('path'); 4 | 5 | var fixture = function(name) { 6 | return fs.readFileSync( 7 | path.join(__dirname, 'fixture', name) 8 | ); 9 | } 10 | 11 | describe('Comment', function() { 12 | it('should tokenize a simple comment', function() { 13 | const tree = scss.tokenize(fixture('simple-comment.scss')); 14 | expect(tree).toMatchSnapshot(); 15 | }); 16 | 17 | it('should tokenize a multiline comment', function() { 18 | const tree = scss.tokenize(fixture('multiline-comment.scss')); 19 | expect(tree).toMatchSnapshot(); 20 | }); 21 | 22 | it('should tokenize a docblock comment', function() { 23 | const tree = scss.tokenize(fixture('docblock-comment.scss')); 24 | expect(tree).toMatchSnapshot(); 25 | }); 26 | 27 | it('should tokenize a comment that does not have a space before the end', function() { 28 | const tree = scss.tokenize(fixture('comment-with-trailing-space.scss')); 29 | expect(tree).toMatchSnapshot(); 30 | }); 31 | }); 32 | -------------------------------------------------------------------------------- /src/input.js: -------------------------------------------------------------------------------- 1 | import PreviousMap from './previous-map'; 2 | 3 | import path from 'path'; 4 | 5 | let sequence = 0; 6 | 7 | export default class Input { 8 | constructor(css, opts = { }) { 9 | this.css = css.toString(); 10 | 11 | if ( this.css[0] === '\uFEFF' || this.css[0] === '\uFFFE' ) { 12 | this.css = this.css.slice(1); 13 | } 14 | 15 | if ( opts.from ) this.file = path.resolve(opts.from); 16 | 17 | let map = new PreviousMap(this.css, opts, this.id); 18 | if ( map.text ) { 19 | this.map = map; 20 | let file = map.consumer().file; 21 | if ( !this.file && file ) this.file = this.mapResolve(file); 22 | } 23 | 24 | if ( this.file ) { 25 | this.from = this.file; 26 | } else { 27 | sequence += 1; 28 | this.id = ''; 29 | this.from = this.id; 30 | } 31 | if ( this.map ) this.map.file = this.from; 32 | } 33 | 34 | mapResolve(file) { 35 | return path.resolve(this.map.consumer().sourceRoot || '.', file); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 sasstools 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "scss-tokenizer", 3 | "version": "0.4.3", 4 | "description": "A tokenzier for Sass' SCSS syntax", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "jest", 8 | "clean": "rm lib/*", 9 | "build": "npm run clean; babel src/ --out-dir lib", 10 | "prepublishOnly": "npm run build" 11 | }, 12 | "files": [ 13 | "index.js", 14 | "lib" 15 | ], 16 | "repository": { 17 | "type": "git", 18 | "url": "https://github.com/sasstools/scss-tokenizer.git" 19 | }, 20 | "keywords": [ 21 | "parser", 22 | "tokenizer", 23 | "sass", 24 | "scss", 25 | "libsass" 26 | ], 27 | "author": "xzyfer", 28 | "license": "MIT", 29 | "bugs": { 30 | "url": "https://github.com/sasstools/scss-tokenizer/issues" 31 | }, 32 | "homepage": "https://github.com/sasstools/scss-tokenizer", 33 | "dependencies": { 34 | "js-base64": "^2.4.9", 35 | "source-map": "^0.7.3" 36 | }, 37 | "devDependencies": { 38 | "babel-cli": "^6.26.0", 39 | "babel-core": "^6.26.3", 40 | "babel-jest": "^23.6.0", 41 | "babel-preset-env": "^1.7.0", 42 | "glob": "^7.1.3", 43 | "jest": "^23.6.0", 44 | "sass-spec": "3.5.1" 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # scss-tokenizer 2 | A tokenizer for Sass' SCSS syntax 3 | 4 | ![https://travis-ci.org/sasstools/scss-tokenizer.svg?branch=master](https://img.shields.io/travis/sasstools/scss-tokenizer.svg) 5 | ![https://www.npmjs.com/package/scss-tokenizer](https://img.shields.io/npm/v/scss-tokenizer.svg) 6 | ![https://github.com/sasstools/scss-tokenizer/issues](https://img.shields.io/github/issues/sasstools/scss-tokenizer.svg) 7 | ![](https://img.shields.io/github/license/sasstools/scss-tokenizer.svg) 8 | 9 | # Install 10 | 11 | ``` 12 | npm install scss-tokenizer 13 | ``` 14 | 15 | # Usage 16 | 17 | ```js 18 | var scss = require('scss-tokenizer'); 19 | scss.tokenize(css); 20 | ``` 21 | 22 | # API 23 | 24 | ### `tokenize` 25 | 26 | Tokenizes source `css` and returns an ordered array of tokens with positional 27 | data. 28 | 29 | ```js 30 | var tokenizer = require('scss-tokenizer'); 31 | var tokens = tokenize.tokenize(css); 32 | ``` 33 | 34 | Arguments: 35 | 36 | * `css (string|#toString)`: String with input CSS or any object 37 | with `toString()` method, like file stream. 38 | * `opts (object) optional`: options: 39 | * `from`: the path to the source CSS file. You should always set `from`, 40 | because it is used in map generation and in syntax error messages. 41 | 42 | # Test 43 | 44 | ``` 45 | npm test 46 | ``` 47 | 48 | ## Attribution 49 | 50 | This project started as a fork of the [PostCSS](https://github.com/postcss/postcss) tokenizer. 51 | -------------------------------------------------------------------------------- /__tests__/sass-spec.js: -------------------------------------------------------------------------------- 1 | // ----------------------------------------------------------------------------- 2 | // Asset we're able to tokenize sass-spec. 3 | // We're not asserting correctness here, just if we can tokenize sass-spec 4 | // without crashing or freezing the process. 5 | // ----------------------------------------------------------------------------- 6 | 7 | var fs = require('fs'); 8 | var path = require('path'); 9 | var glob = require('glob'); 10 | var spec = require('sass-spec'); 11 | var scss = require('../'); 12 | 13 | var contents, file, errorFile, i; 14 | var fails = []; 15 | var files = glob.sync(path.join(spec.dirname, 'spec/**/input.scss')); 16 | 17 | // ----------------------------------------------------------------------------- 18 | // Tokenize sass-spec. The tokenizer have any error conditions. 19 | // If an uncaught exception is thrown we report the failure to the user. 20 | // ----------------------------------------------------------------------------- 21 | 22 | describe('Sass spec', function() { 23 | it('should tokenize all specs', function() { 24 | for(i = 0; i < files.length; i++) { 25 | file = files[i]; 26 | errorFile = path.join(path.dirname(file), 'error'); 27 | 28 | try { 29 | if (fs.statSync(errorFile)) continue; 30 | } catch (e) { } 31 | 32 | contents = fs.readFileSync(file, { encoding: 'utf8' }); 33 | scss.tokenize(contents); 34 | } 35 | }, 30000); 36 | }); 37 | -------------------------------------------------------------------------------- /__tests__/import.js: -------------------------------------------------------------------------------- 1 | var scss = require('..'); 2 | var fs = require('fs'); 3 | var path = require('path'); 4 | 5 | var fixture = function(name) { 6 | return fs.readFileSync( 7 | path.join(__dirname, 'fixture', name) 8 | ); 9 | } 10 | 11 | describe('@import', function() { 12 | it('should tokenize a @import with double quotes', function() { 13 | const tree = scss.tokenize(fixture('double-quoted-import.scss')); 14 | expect(tree).toMatchSnapshot(); 15 | }); 16 | 17 | it('should tokenize a @import with single quotes', function() { 18 | const tree = scss.tokenize(fixture('single-quoted-import.scss')); 19 | expect(tree).toMatchSnapshot(); 20 | }); 21 | 22 | it('should tokenize a @import without quotes', function() { 23 | const tree = scss.tokenize(fixture('unquoted-import.scss')); 24 | expect(tree).toMatchSnapshot(); 25 | }); 26 | 27 | it('should tokenize a @import with double quotes and slash', function() { 28 | const tree = scss.tokenize(fixture('double-quoted-import-path.scss')); 29 | expect(tree).toMatchSnapshot(); 30 | }); 31 | 32 | it('should tokenize a @import with single quotes and slash', function() { 33 | const tree = scss.tokenize(fixture('single-quoted-import-path.scss')); 34 | expect(tree).toMatchSnapshot(); 35 | }); 36 | 37 | it('should tokenize a @import without quotes and slash', function() { 38 | const tree = scss.tokenize(fixture('unquoted-import-path.scss')); 39 | expect(tree).toMatchSnapshot(); 40 | }); 41 | }); 42 | -------------------------------------------------------------------------------- /src/previous-map.js: -------------------------------------------------------------------------------- 1 | import { Base64 } from 'js-base64'; 2 | import mozilla from 'source-map'; 3 | import path from 'path'; 4 | import fs from 'fs'; 5 | 6 | export default class PreviousMap { 7 | constructor(css, opts) { 8 | this.loadAnnotation(css); 9 | this.inline = this.startWith(this.annotation, 'data:'); 10 | 11 | let prev = opts.map ? opts.map.prev : undefined; 12 | let text = this.loadMap(opts.from, prev); 13 | if ( text ) this.text = text; 14 | } 15 | 16 | consumer() { 17 | if ( !this.consumerCache ) { 18 | this.consumerCache = new mozilla.SourceMapConsumer(this.text); 19 | } 20 | return this.consumerCache; 21 | } 22 | 23 | withContent() { 24 | return !!(this.consumer().sourcesContent && 25 | this.consumer().sourcesContent.length > 0); 26 | } 27 | 28 | startWith(string, start) { 29 | if ( !string ) return false; 30 | return string.substr(0, start.length) === start; 31 | } 32 | 33 | loadAnnotation(css) { 34 | let match = css.match(/\/\*\s*# sourceMappingURL=((?:(?!sourceMappingURL=).)*)\s*\*\//); 35 | if ( match ) this.annotation = match[1].trim(); 36 | } 37 | 38 | decodeInline(text) { 39 | let utfd64 = 'data:application/json;charset=utf-8;base64,'; 40 | let utf64 = 'data:application/json;charset=utf8;base64,'; 41 | let b64 = 'data:application/json;base64,'; 42 | let uri = 'data:application/json,'; 43 | 44 | if ( this.startWith(text, uri) ) { 45 | return decodeURIComponent( text.substr(uri.length) ); 46 | 47 | } else if ( this.startWith(text, base64) ) { 48 | return Base64.decode( text.substr(base64.length) ); 49 | 50 | } else if ( this.startWith(text, utf64) ) { 51 | return Base64.decode( text.substr(utf64.length) ); 52 | 53 | } else if ( this.startWith(text, utfd64) ) { 54 | return Base64.decode( text.substr(utfd64.length) ); 55 | 56 | } else { 57 | let encoding = text.match(/data:application\/json;([^,]+),/)[1]; 58 | throw new Error('Unsupported source map encoding ' + encoding); 59 | } 60 | } 61 | 62 | loadMap(file, prev) { 63 | if ( prev === false ) return false; 64 | 65 | if ( prev ) { 66 | if ( typeof prev === 'string' ) { 67 | return prev; 68 | } else if ( prev instanceof mozilla.SourceMapConsumer ) { 69 | return mozilla.SourceMapGenerator 70 | .fromSourceMap(prev).toString(); 71 | } else if ( prev instanceof mozilla.SourceMapGenerator ) { 72 | return prev.toString(); 73 | } else if ( typeof prev === 'object' && prev.mappings ) { 74 | return JSON.stringify(prev); 75 | } else { 76 | throw new Error('Unsupported previous source map format: ' + 77 | prev.toString()); 78 | } 79 | 80 | } else if ( this.inline ) { 81 | return this.decodeInline(this.annotation); 82 | 83 | } else if ( this.annotation ) { 84 | let map = this.annotation; 85 | if ( file ) map = path.join(path.dirname(file), map); 86 | 87 | this.root = path.dirname(map); 88 | if ( fs.existsSync && fs.existsSync(map) ) { 89 | return fs.readFileSync(map, 'utf-8').toString().trim(); 90 | } else { 91 | return false; 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /__tests__/__snapshots__/interpolant.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`interpolant should tokenize interpolant decimal 1`] = ` 4 | Array [ 5 | Array [ 6 | "startInterpolant", 7 | "#{", 8 | 1, 9 | 2, 10 | ], 11 | Array [ 12 | "number", 13 | "1.0", 14 | 1, 15 | 3, 16 | 1, 17 | 5, 18 | ], 19 | Array [ 20 | "endInterpolant", 21 | "}", 22 | 1, 23 | 6, 24 | ], 25 | Array [ 26 | "newline", 27 | " 28 | ", 29 | 2, 30 | 0, 31 | ], 32 | Array [ 33 | "startInterpolant", 34 | "#{", 35 | 2, 36 | 2, 37 | ], 38 | Array [ 39 | "number", 40 | "10.00", 41 | 2, 42 | 3, 43 | 2, 44 | 7, 45 | ], 46 | Array [ 47 | "endInterpolant", 48 | "}", 49 | 2, 50 | 8, 51 | ], 52 | Array [ 53 | "newline", 54 | " 55 | ", 56 | 3, 57 | 0, 58 | ], 59 | Array [ 60 | "startInterpolant", 61 | "#{", 62 | 3, 63 | 2, 64 | ], 65 | Array [ 66 | "number", 67 | ".100", 68 | 3, 69 | 3, 70 | 3, 71 | 6, 72 | ], 73 | Array [ 74 | "endInterpolant", 75 | "}", 76 | 3, 77 | 7, 78 | ], 79 | Array [ 80 | "newline", 81 | " 82 | ", 83 | 4, 84 | 0, 85 | ], 86 | ] 87 | `; 88 | 89 | exports[`interpolant should tokenize interpolant number 1`] = ` 90 | Array [ 91 | Array [ 92 | "startInterpolant", 93 | "#{", 94 | 1, 95 | 2, 96 | ], 97 | Array [ 98 | "number", 99 | "1", 100 | 1, 101 | 3, 102 | 1, 103 | 3, 104 | ], 105 | Array [ 106 | "endInterpolant", 107 | "}", 108 | 1, 109 | 4, 110 | ], 111 | Array [ 112 | "newline", 113 | " 114 | ", 115 | 2, 116 | 0, 117 | ], 118 | Array [ 119 | "startInterpolant", 120 | "#{", 121 | 2, 122 | 2, 123 | ], 124 | Array [ 125 | "number", 126 | "123", 127 | 2, 128 | 3, 129 | 2, 130 | 5, 131 | ], 132 | Array [ 133 | "endInterpolant", 134 | "}", 135 | 2, 136 | 6, 137 | ], 138 | Array [ 139 | "newline", 140 | " 141 | ", 142 | 3, 143 | 0, 144 | ], 145 | ] 146 | `; 147 | 148 | exports[`interpolant should tokenize interpolant quoted-string 1`] = ` 149 | Array [ 150 | Array [ 151 | "startInterpolant", 152 | "#{", 153 | 1, 154 | 2, 155 | ], 156 | Array [ 157 | "\\"", 158 | "\\"", 159 | 1, 160 | 3, 161 | ], 162 | Array [ 163 | "string", 164 | "hello world!", 165 | 1, 166 | 4, 167 | 1, 168 | 15, 169 | ], 170 | Array [ 171 | "\\"", 172 | "\\"", 173 | 1, 174 | 16, 175 | ], 176 | Array [ 177 | "endInterpolant", 178 | "}", 179 | 1, 180 | 17, 181 | ], 182 | Array [ 183 | "newline", 184 | " 185 | ", 186 | 2, 187 | 0, 188 | ], 189 | ] 190 | `; 191 | 192 | exports[`interpolant should tokenize interpolant unquoted-string 1`] = ` 193 | Array [ 194 | Array [ 195 | "startInterpolant", 196 | "#{", 197 | 1, 198 | 2, 199 | ], 200 | Array [ 201 | "ident", 202 | "hello", 203 | 1, 204 | 3, 205 | 1, 206 | 7, 207 | ], 208 | Array [ 209 | "space", 210 | " ", 211 | ], 212 | Array [ 213 | "ident", 214 | "world", 215 | 1, 216 | 9, 217 | 1, 218 | 13, 219 | ], 220 | Array [ 221 | "endInterpolant", 222 | "}", 223 | 1, 224 | 14, 225 | ], 226 | Array [ 227 | "newline", 228 | " 229 | ", 230 | 2, 231 | 0, 232 | ], 233 | ] 234 | `; 235 | -------------------------------------------------------------------------------- /__tests__/__snapshots__/comment.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`Comment should tokenize a comment that does not have a space before the end 1`] = ` 4 | Array [ 5 | Array [ 6 | "startComment", 7 | "/*", 8 | 1, 9 | 2, 10 | ], 11 | Array [ 12 | "word", 13 | "my", 14 | 1, 15 | 3, 16 | 1, 17 | 4, 18 | ], 19 | Array [ 20 | "space", 21 | " ", 22 | ], 23 | Array [ 24 | "word", 25 | "comment", 26 | 1, 27 | 6, 28 | 1, 29 | 12, 30 | ], 31 | Array [ 32 | "endComment", 33 | "*/", 34 | 1, 35 | 14, 36 | ], 37 | ] 38 | `; 39 | 40 | exports[`Comment should tokenize a docblock comment 1`] = ` 41 | Array [ 42 | Array [ 43 | "startComment", 44 | "/*", 45 | 1, 46 | 2, 47 | ], 48 | Array [ 49 | "word", 50 | "*", 51 | 1, 52 | 3, 53 | 1, 54 | 3, 55 | ], 56 | Array [ 57 | "newline", 58 | " 59 | ", 60 | 2, 61 | 0, 62 | ], 63 | Array [ 64 | "space", 65 | " ", 66 | ], 67 | Array [ 68 | "word", 69 | "*", 70 | 2, 71 | 2, 72 | 2, 73 | 2, 74 | ], 75 | Array [ 76 | "space", 77 | " ", 78 | ], 79 | Array [ 80 | "word", 81 | "line", 82 | 2, 83 | 4, 84 | 2, 85 | 7, 86 | ], 87 | Array [ 88 | "space", 89 | " ", 90 | ], 91 | Array [ 92 | "word", 93 | "1", 94 | 2, 95 | 9, 96 | 2, 97 | 9, 98 | ], 99 | Array [ 100 | "newline", 101 | " 102 | ", 103 | 3, 104 | 0, 105 | ], 106 | Array [ 107 | "space", 108 | " ", 109 | ], 110 | Array [ 111 | "word", 112 | "*", 113 | 3, 114 | 2, 115 | 3, 116 | 2, 117 | ], 118 | Array [ 119 | "newline", 120 | " 121 | ", 122 | 4, 123 | 0, 124 | ], 125 | Array [ 126 | "space", 127 | " ", 128 | ], 129 | Array [ 130 | "word", 131 | "*", 132 | 4, 133 | 2, 134 | 4, 135 | 2, 136 | ], 137 | Array [ 138 | "space", 139 | " ", 140 | ], 141 | Array [ 142 | "word", 143 | "line", 144 | 4, 145 | 4, 146 | 4, 147 | 7, 148 | ], 149 | Array [ 150 | "space", 151 | " ", 152 | ], 153 | Array [ 154 | "word", 155 | "2", 156 | 4, 157 | 9, 158 | 4, 159 | 9, 160 | ], 161 | Array [ 162 | "newline", 163 | " 164 | ", 165 | 5, 166 | 0, 167 | ], 168 | Array [ 169 | "space", 170 | " ", 171 | ], 172 | Array [ 173 | "endComment", 174 | "*/", 175 | 5, 176 | 3, 177 | ], 178 | ] 179 | `; 180 | 181 | exports[`Comment should tokenize a multiline comment 1`] = ` 182 | Array [ 183 | Array [ 184 | "startComment", 185 | "/*", 186 | 1, 187 | 2, 188 | ], 189 | Array [ 190 | "newline", 191 | " 192 | ", 193 | 2, 194 | 0, 195 | ], 196 | Array [ 197 | "word", 198 | "my", 199 | 2, 200 | 1, 201 | 2, 202 | 2, 203 | ], 204 | Array [ 205 | "space", 206 | " ", 207 | ], 208 | Array [ 209 | "word", 210 | "comment", 211 | 2, 212 | 4, 213 | 2, 214 | 10, 215 | ], 216 | Array [ 217 | "newline", 218 | " 219 | ", 220 | 3, 221 | 0, 222 | ], 223 | Array [ 224 | "endComment", 225 | "*/", 226 | 3, 227 | 2, 228 | ], 229 | ] 230 | `; 231 | 232 | exports[`Comment should tokenize a simple comment 1`] = ` 233 | Array [ 234 | Array [ 235 | "startComment", 236 | "/*", 237 | 1, 238 | 2, 239 | ], 240 | Array [ 241 | "space", 242 | " ", 243 | ], 244 | Array [ 245 | "word", 246 | "my", 247 | 1, 248 | 4, 249 | 1, 250 | 5, 251 | ], 252 | Array [ 253 | "space", 254 | " ", 255 | ], 256 | Array [ 257 | "word", 258 | "comment", 259 | 1, 260 | 7, 261 | 1, 262 | 13, 263 | ], 264 | Array [ 265 | "space", 266 | " ", 267 | ], 268 | Array [ 269 | "endComment", 270 | "*/", 271 | 1, 272 | 16, 273 | ], 274 | ] 275 | `; 276 | -------------------------------------------------------------------------------- /src/tokenize-string.js: -------------------------------------------------------------------------------- 1 | import Input from './input'; 2 | import tokenizeString from './tokenize-string'; 3 | import tokenizeInterpolant from './tokenize-interpolant'; 4 | 5 | let singleQuote = "'".charCodeAt(0), 6 | doubleQuote = '"'.charCodeAt(0), 7 | newline = '\n'.charCodeAt(0), 8 | space = ' '.charCodeAt(0), 9 | feed = '\f'.charCodeAt(0), 10 | tab = '\t'.charCodeAt(0), 11 | cr = '\r'.charCodeAt(0), 12 | hash = '#'.charCodeAt(0), 13 | backslash = '\\'.charCodeAt(0), 14 | slash = '/'.charCodeAt(0), 15 | openCurly = '{'.charCodeAt(0), 16 | closeCurly = '}'.charCodeAt(0), 17 | interpolantEnd = /([.\s]*?)[^\\](?=(}))/gm, 18 | sQuoteEnd = /([.\s]*?)[^\\](?=((#{)|'))/gm, 19 | dQuoteEnd = /([.\s]*?)[^\\](?=((#{)|"))/gm; 20 | 21 | export default function tokenize(input, l, p, o, quote) { 22 | let tokens = []; 23 | let css = input.css.valueOf(); 24 | 25 | let code, next, lines, last, content, escape, 26 | nextLine, nextOffset, escaped, escapePos, 27 | inInterpolant, inComment, inString; 28 | 29 | let length = css.length; 30 | let offset = o || -1; 31 | let line = l || 1; 32 | let pos = p || 0; 33 | 34 | let quoteEnd = quote === "'" ? sQuoteEnd : dQuoteEnd; 35 | let quoteChar = quote.charCodeAt(0); 36 | 37 | loop: 38 | while ( pos < length ) { 39 | code = css.charCodeAt(pos); 40 | 41 | if ( code === newline ) { 42 | offset = pos; 43 | line += 1; 44 | } 45 | 46 | switch ( code ) { 47 | 48 | case closeCurly: 49 | tokens.push(['endInterpolant', '}', line, pos - offset]); 50 | break; 51 | 52 | case quoteChar: 53 | tokens.push([quote, quote, line, pos - offset]); 54 | break loop; 55 | 56 | case backslash: 57 | next = pos; 58 | escape = true; 59 | while ( css.charCodeAt(next + 1) === backslash ) { 60 | next += 1; 61 | escape = !escape; 62 | } 63 | code = css.charCodeAt(next + 1); 64 | if ( escape && (code !== slash && 65 | code !== space && 66 | code !== newline && 67 | code !== tab && 68 | code !== cr && 69 | code !== feed ) ) { 70 | next += 1; 71 | } 72 | tokens.push(['string', css.slice(pos, next + 1), 73 | line, pos - offset, 74 | line, next - offset 75 | ]); 76 | pos = next; 77 | break; 78 | 79 | default: 80 | if ( code === hash && css.charCodeAt(pos + 1) === openCurly ) { 81 | tokens.push(['startInterpolant', '#{', line, pos + 1 - offset]); 82 | next = pos + 1; 83 | 84 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeInterpolant(input, line, next + 1, offset); 85 | tokens = tokens.concat(t); 86 | next = p; 87 | line = l; 88 | offset = o; 89 | 90 | pos = next; 91 | } else { 92 | quoteEnd.lastIndex = pos; 93 | quoteEnd.test(css); 94 | 95 | if ( quoteEnd.lastIndex === 0 ) { 96 | next = css.length - 1; 97 | } else { 98 | next = quoteEnd.lastIndex - 1; 99 | } 100 | 101 | tokens.push(['string', css.slice(pos, next + 1), 102 | line, pos - offset, 103 | line, next - offset 104 | ]); 105 | 106 | pos = next; 107 | } 108 | 109 | break; 110 | } 111 | 112 | pos++; 113 | } 114 | 115 | return { tokens, line, pos, offset }; 116 | } 117 | -------------------------------------------------------------------------------- /__tests__/__snapshots__/import.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`@import should tokenize a @import with double quotes 1`] = ` 4 | Array [ 5 | Array [ 6 | "@", 7 | "@", 8 | 1, 9 | 1, 10 | ], 11 | Array [ 12 | "ident", 13 | "import", 14 | 1, 15 | 2, 16 | 1, 17 | 7, 18 | ], 19 | Array [ 20 | "space", 21 | " ", 22 | ], 23 | Array [ 24 | "\\"", 25 | "\\"", 26 | 1, 27 | 9, 28 | ], 29 | Array [ 30 | "string", 31 | "foo", 32 | 1, 33 | 10, 34 | 1, 35 | 12, 36 | ], 37 | Array [ 38 | "\\"", 39 | "\\"", 40 | 1, 41 | 13, 42 | ], 43 | Array [ 44 | ";", 45 | ";", 46 | 1, 47 | 14, 48 | ], 49 | Array [ 50 | "newline", 51 | " 52 | ", 53 | 2, 54 | 0, 55 | ], 56 | ] 57 | `; 58 | 59 | exports[`@import should tokenize a @import with double quotes and slash 1`] = ` 60 | Array [ 61 | Array [ 62 | "@", 63 | "@", 64 | 1, 65 | 1, 66 | ], 67 | Array [ 68 | "ident", 69 | "import", 70 | 1, 71 | 2, 72 | 1, 73 | 7, 74 | ], 75 | Array [ 76 | "space", 77 | " ", 78 | ], 79 | Array [ 80 | "\\"", 81 | "\\"", 82 | 1, 83 | 9, 84 | ], 85 | Array [ 86 | "string", 87 | "foo/bar", 88 | 1, 89 | 10, 90 | 1, 91 | 16, 92 | ], 93 | Array [ 94 | "\\"", 95 | "\\"", 96 | 1, 97 | 17, 98 | ], 99 | Array [ 100 | ";", 101 | ";", 102 | 1, 103 | 18, 104 | ], 105 | Array [ 106 | "newline", 107 | " 108 | ", 109 | 2, 110 | 0, 111 | ], 112 | ] 113 | `; 114 | 115 | exports[`@import should tokenize a @import with single quotes 1`] = ` 116 | Array [ 117 | Array [ 118 | "@", 119 | "@", 120 | 1, 121 | 1, 122 | ], 123 | Array [ 124 | "ident", 125 | "import", 126 | 1, 127 | 2, 128 | 1, 129 | 7, 130 | ], 131 | Array [ 132 | "space", 133 | " ", 134 | ], 135 | Array [ 136 | "'", 137 | "'", 138 | 1, 139 | 9, 140 | ], 141 | Array [ 142 | "string", 143 | "foo", 144 | 1, 145 | 10, 146 | 1, 147 | 12, 148 | ], 149 | Array [ 150 | "'", 151 | "'", 152 | 1, 153 | 13, 154 | ], 155 | Array [ 156 | ";", 157 | ";", 158 | 1, 159 | 14, 160 | ], 161 | Array [ 162 | "newline", 163 | " 164 | ", 165 | 2, 166 | 0, 167 | ], 168 | ] 169 | `; 170 | 171 | exports[`@import should tokenize a @import with single quotes and slash 1`] = ` 172 | Array [ 173 | Array [ 174 | "@", 175 | "@", 176 | 1, 177 | 1, 178 | ], 179 | Array [ 180 | "ident", 181 | "import", 182 | 1, 183 | 2, 184 | 1, 185 | 7, 186 | ], 187 | Array [ 188 | "space", 189 | " ", 190 | ], 191 | Array [ 192 | "'", 193 | "'", 194 | 1, 195 | 9, 196 | ], 197 | Array [ 198 | "string", 199 | "foo/bar", 200 | 1, 201 | 10, 202 | 1, 203 | 16, 204 | ], 205 | Array [ 206 | "'", 207 | "'", 208 | 1, 209 | 17, 210 | ], 211 | Array [ 212 | ";", 213 | ";", 214 | 1, 215 | 18, 216 | ], 217 | Array [ 218 | "newline", 219 | " 220 | ", 221 | 2, 222 | 0, 223 | ], 224 | ] 225 | `; 226 | 227 | exports[`@import should tokenize a @import without quotes 1`] = ` 228 | Array [ 229 | Array [ 230 | "@", 231 | "@", 232 | 1, 233 | 1, 234 | ], 235 | Array [ 236 | "ident", 237 | "import", 238 | 1, 239 | 2, 240 | 1, 241 | 7, 242 | ], 243 | Array [ 244 | "space", 245 | " ", 246 | ], 247 | Array [ 248 | "ident", 249 | "foo", 250 | 1, 251 | 9, 252 | 1, 253 | 11, 254 | ], 255 | Array [ 256 | ";", 257 | ";", 258 | 1, 259 | 12, 260 | ], 261 | Array [ 262 | "newline", 263 | " 264 | ", 265 | 2, 266 | 0, 267 | ], 268 | ] 269 | `; 270 | 271 | exports[`@import should tokenize a @import without quotes and slash 1`] = ` 272 | Array [ 273 | Array [ 274 | "@", 275 | "@", 276 | 1, 277 | 1, 278 | ], 279 | Array [ 280 | "ident", 281 | "import", 282 | 1, 283 | 2, 284 | 1, 285 | 7, 286 | ], 287 | Array [ 288 | "space", 289 | " ", 290 | ], 291 | Array [ 292 | "ident", 293 | "foo", 294 | 1, 295 | 9, 296 | 1, 297 | 11, 298 | ], 299 | Array [ 300 | "/", 301 | "/", 302 | 1, 303 | 12, 304 | ], 305 | Array [ 306 | "ident", 307 | "bar", 308 | 1, 309 | 13, 310 | 1, 311 | 15, 312 | ], 313 | Array [ 314 | ";", 315 | ";", 316 | 1, 317 | 16, 318 | ], 319 | Array [ 320 | "newline", 321 | " 322 | ", 323 | 2, 324 | 0, 325 | ], 326 | ] 327 | `; 328 | -------------------------------------------------------------------------------- /src/tokenize-comment.js: -------------------------------------------------------------------------------- 1 | import Input from './input'; 2 | import tokenizeString from './tokenize-string'; 3 | import tokenizeInterpolant from './tokenize-interpolant'; 4 | 5 | let newline = '\n'.charCodeAt(0), 6 | space = ' '.charCodeAt(0), 7 | feed = '\f'.charCodeAt(0), 8 | tab = '\t'.charCodeAt(0), 9 | cr = '\r'.charCodeAt(0), 10 | hash = '#'.charCodeAt(0), 11 | backslash = '\\'.charCodeAt(0), 12 | slash = '/'.charCodeAt(0), 13 | openCurly = '{'.charCodeAt(0), 14 | closeCurly = '}'.charCodeAt(0), 15 | asterisk = '*'.charCodeAt(0), 16 | wordEnd = /[ \n\t\r\(\)\{\},:;@!'"\\]|\*(?=\/)|#(?={)/g; 17 | 18 | export default function tokenize(input, l, p, o) { 19 | let tokens = []; 20 | let css = input.css.valueOf(); 21 | 22 | let code, next, lines, last, content, escape, 23 | nextLine, nextOffset, escaped, escapePos, 24 | inInterpolant, inComment, inString; 25 | 26 | let length = css.length; 27 | let offset = o || -1; 28 | let line = l || 1; 29 | let pos = p || 0; 30 | 31 | loop: 32 | while ( pos < length ) { 33 | code = css.charCodeAt(pos); 34 | 35 | if ( code === newline ) { 36 | offset = pos; 37 | line += 1; 38 | } 39 | 40 | switch ( code ) { 41 | case space: 42 | case tab: 43 | case cr: 44 | case feed: 45 | next = pos; 46 | do { 47 | next += 1; 48 | code = css.charCodeAt(next); 49 | if ( code === newline ) { 50 | offset = next; 51 | line += 1; 52 | } 53 | } while ( code === space || 54 | code === tab || 55 | code === cr || 56 | code === feed ); 57 | 58 | tokens.push(['space', css.slice(pos, next)]); 59 | pos = next - 1; 60 | break; 61 | 62 | case newline: 63 | tokens.push(['newline', '\n', line, pos - offset]); 64 | break; 65 | 66 | case closeCurly: 67 | tokens.push(['endInterpolant', '}', line, pos - offset]); 68 | break; 69 | 70 | case backslash: 71 | next = pos; 72 | escape = true; 73 | while ( css.charCodeAt(next + 1) === backslash ) { 74 | next += 1; 75 | escape = !escape; 76 | } 77 | code = css.charCodeAt(next + 1); 78 | if ( escape && (code !== slash && 79 | code !== space && 80 | code !== newline && 81 | code !== tab && 82 | code !== cr && 83 | code !== feed ) ) { 84 | next += 1; 85 | } 86 | tokens.push(['word', css.slice(pos, next + 1), 87 | line, pos - offset, 88 | line, next - offset 89 | ]); 90 | pos = next; 91 | break; 92 | 93 | default: 94 | 95 | if ( code === asterisk && css.charCodeAt(pos + 1) === slash ) { 96 | next = pos; 97 | pos = next - 1; 98 | break loop; 99 | } 100 | 101 | if ( code === hash && css.charCodeAt(pos + 1) === openCurly ) { 102 | tokens.push(['startInterpolant', '#{', line, pos + 1 - offset]); 103 | next = pos + 1; 104 | 105 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeInterpolant(input, line, next + 1, offset); 106 | tokens = tokens.concat(t); 107 | next = p; 108 | line = l; 109 | offset = o; 110 | 111 | pos = next; 112 | break; 113 | } 114 | 115 | wordEnd.lastIndex = pos + 1; 116 | wordEnd.test(css); 117 | if ( wordEnd.lastIndex === 0 ) { 118 | next = css.length - 1; 119 | } else { 120 | next = wordEnd.lastIndex - 2; 121 | } 122 | 123 | tokens.push(['word', css.slice(pos, next + 1), 124 | line, pos - offset, 125 | line, next - offset 126 | ]); 127 | 128 | pos = next; 129 | 130 | break; 131 | } 132 | 133 | pos++; 134 | } 135 | 136 | return { tokens, line, pos, offset }; 137 | } 138 | -------------------------------------------------------------------------------- /src/tokenize-interpolant.js: -------------------------------------------------------------------------------- 1 | import Input from './input'; 2 | import tokenizeString from './tokenize-string'; 3 | import tokenizeComment from './tokenize-comment'; 4 | import tokenizeInterpolant from './tokenize-interpolant'; 5 | 6 | let singleQuote = "'".charCodeAt(0), 7 | doubleQuote = '"'.charCodeAt(0), 8 | dollar = '$'.charCodeAt(0), 9 | hash = '#'.charCodeAt(0), 10 | backslash = '\\'.charCodeAt(0), 11 | slash = '/'.charCodeAt(0), 12 | newline = '\n'.charCodeAt(0), 13 | space = ' '.charCodeAt(0), 14 | feed = '\f'.charCodeAt(0), 15 | tab = '\t'.charCodeAt(0), 16 | cr = '\r'.charCodeAt(0), 17 | openBracket = '('.charCodeAt(0), 18 | closeBracket = ')'.charCodeAt(0), 19 | openCurly = '{'.charCodeAt(0), 20 | closeCurly = '}'.charCodeAt(0), 21 | semicolon = ';'.charCodeAt(0), 22 | asterisk = '*'.charCodeAt(0), 23 | colon = ':'.charCodeAt(0), 24 | at = '@'.charCodeAt(0), 25 | comma = ','.charCodeAt(0), 26 | plus = '+'.charCodeAt(0), 27 | minus = '-'.charCodeAt(0), 28 | decComb = '>'.charCodeAt(0), 29 | adjComb = '~'.charCodeAt(0), 30 | number = /[+-]?(\d+(\.\d+)?|\.\d+)|(e[+-]\d+)/gi, 31 | sQuoteEnd = /(.*?)[^\\](?=((#{)|'))/gm, 32 | dQuoteEnd = /(.*?)[^\\](?=((#{)|"))/gm, 33 | wordEnd = /[ \n\t\r\(\)\{\},:;@!'"\\]|\/(?=\*)|#(?={)/g, 34 | ident = /-?([a-z_]|\\[^\\])([a-z-_0-9]|\\[^\\])*/gi; 35 | 36 | export default function tokenize(input, l, p, o) { 37 | let tokens = []; 38 | let css = input.css.valueOf(); 39 | 40 | let code, next, quote, lines, last, content, escape, 41 | nextLine, nextOffset, escaped, escapePos, 42 | inInterpolant, inComment, inString; 43 | 44 | let length = css.length; 45 | let offset = o || -1; 46 | let line = l || 1; 47 | let pos = p || 0; 48 | 49 | loop: 50 | while ( pos < length ) { 51 | code = css.charCodeAt(pos); 52 | 53 | if ( code === newline ) { 54 | offset = pos; 55 | line += 1; 56 | } 57 | 58 | switch ( code ) { 59 | case space: 60 | case tab: 61 | case cr: 62 | case feed: 63 | next = pos; 64 | do { 65 | next += 1; 66 | code = css.charCodeAt(next); 67 | if ( code === newline ) { 68 | offset = next; 69 | line += 1; 70 | } 71 | } while ( code === space || 72 | code === tab || 73 | code === cr || 74 | code === feed ); 75 | 76 | tokens.push(['space', css.slice(pos, next)]); 77 | pos = next - 1; 78 | break; 79 | 80 | case newline: 81 | tokens.push(['newline', '\n', line, pos - offset]); 82 | break; 83 | 84 | case plus: 85 | tokens.push(['+', '+', line, pos - offset]); 86 | break; 87 | 88 | case minus: 89 | tokens.push(['-', '-', line, pos - offset]); 90 | break; 91 | 92 | case decComb: 93 | tokens.push(['>', '>', line, pos - offset]); 94 | break; 95 | 96 | case adjComb: 97 | tokens.push(['~', '~', line, pos - offset]); 98 | break; 99 | 100 | case openCurly: 101 | tokens.push(['{', '{', line, pos - offset]); 102 | break; 103 | 104 | case closeCurly: 105 | tokens.push(['endInterpolant', '}', line, pos - offset]); 106 | break loop; 107 | 108 | case comma: 109 | tokens.push([',', ',', line, pos - offset]); 110 | break; 111 | 112 | case dollar: 113 | tokens.push(['$', '$', line, pos - offset]); 114 | break; 115 | 116 | case colon: 117 | tokens.push([':', ':', line, pos - offset]); 118 | break; 119 | 120 | case semicolon: 121 | tokens.push([';', ';', line, pos - offset]); 122 | break; 123 | 124 | case openBracket: 125 | tokens.push(['(', '(', line, pos - offset]); 126 | break; 127 | 128 | case closeBracket: 129 | tokens.push([')', ')', line, pos - offset]); 130 | break; 131 | 132 | case singleQuote: 133 | case doubleQuote: 134 | quote = code === singleQuote ? "'" : '"'; 135 | tokens.push([quote, quote, line, pos - offset]); 136 | next = pos + 1; 137 | 138 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeString(input, line, next, offset, quote); 139 | tokens = tokens.concat(t); 140 | next = p; 141 | line = l; 142 | offset = o; 143 | 144 | pos = next; 145 | break; 146 | 147 | case at: 148 | tokens.push(['@', '@', line, pos - offset]); 149 | break; 150 | 151 | case backslash: 152 | next = pos; 153 | escape = true; 154 | while ( css.charCodeAt(next + 1) === backslash ) { 155 | next += 1; 156 | escape = !escape; 157 | } 158 | code = css.charCodeAt(next + 1); 159 | if ( escape && (code !== space && 160 | code !== newline && 161 | code !== tab && 162 | code !== cr && 163 | code !== feed ) ) { 164 | next += 1; 165 | } 166 | tokens.push(['word', css.slice(pos, next + 1), 167 | line, pos - offset, 168 | line, next - offset 169 | ]); 170 | pos = next; 171 | break; 172 | 173 | default: 174 | ident.lastIndex = pos; 175 | number.lastIndex = pos; 176 | wordEnd.lastIndex = pos; 177 | 178 | if ( code === slash && css.charCodeAt(pos + 1) === asterisk ) { 179 | inComment = true; 180 | tokens.push(['startComment', '/*', line, pos + 1 - offset]); 181 | next = pos + 1; 182 | 183 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeComment(input, line, next + 1, offset); 184 | tokens = tokens.concat(t); 185 | next = p; 186 | line = l; 187 | offset = o; 188 | 189 | pos = next; 190 | break; 191 | } 192 | 193 | if ( code === asterisk && css.charCodeAt(pos + 1) !== slash) { 194 | tokens.push(['*', '*', line, pos - offset]); 195 | break; 196 | } 197 | 198 | if ( inComment && code === asterisk && css.charCodeAt(pos + 1) === slash ) { 199 | inComment = false; 200 | tokens.push(['endComment', '*/', line, pos + 1 - offset]); 201 | pos += 2; 202 | break; 203 | } 204 | 205 | if ( code === slash && css.charCodeAt(pos + 1) !== slash ) { 206 | tokens.push(['/', '/', line, pos - offset]); 207 | pos += 2; 208 | break; 209 | } 210 | 211 | if ( code === hash && css.charCodeAt(pos + 1) === openCurly ) { 212 | inInterpolant = true; 213 | tokens.push(['startInterpolant', '#{', line, pos + 1 - offset]); 214 | next = pos + 1; 215 | 216 | let { tokens: t, pos: p } = tokenizeInterpolant(input, line, next + 1); 217 | tokens = tokens.concat(t); 218 | next = p; 219 | 220 | pos = next; 221 | break; 222 | } 223 | 224 | if ( code === slash && css.charCodeAt(pos + 1) === slash ) { 225 | next = css.indexOf('\n\n', pos + 2); 226 | next = next > 0 ? next : css.length; 227 | 228 | tokens.push(['scssComment', css.slice(pos, next), 229 | line, pos - offset, 230 | line, next - offset 231 | ]); 232 | 233 | pos = next; 234 | break; 235 | } 236 | 237 | if ( ident.test(css) && ( ident.lastIndex = pos || 1 ) && ident.exec(css).index === pos ) { 238 | next = ident.lastIndex - 1; 239 | 240 | tokens.push(['ident', css.slice(pos, next + 1), 241 | line, pos - offset, 242 | line, next - offset 243 | ]); 244 | 245 | pos = next; 246 | break; 247 | } 248 | 249 | if ( number.test(css) && ( number.lastIndex = pos || 1) && number.exec(css).index === pos ) { 250 | next = number.lastIndex - 1; 251 | 252 | tokens.push(['number', css.slice(pos, next + 1), 253 | line, pos - offset, 254 | line, next - offset 255 | ]); 256 | 257 | pos = next; 258 | break; 259 | } 260 | 261 | wordEnd.lastIndex = pos + 1; 262 | wordEnd.test(css); 263 | if ( wordEnd.lastIndex === 0 ) { 264 | next = css.length - 1; 265 | } else { 266 | next = wordEnd.lastIndex - 2; 267 | } 268 | 269 | tokens.push(['word', css.slice(pos, next + 1), 270 | line, pos - offset, 271 | line, next - offset 272 | ]); 273 | 274 | pos = next; 275 | 276 | break; 277 | } 278 | 279 | pos++; 280 | } 281 | 282 | return { tokens, line, pos, offset }; 283 | } 284 | -------------------------------------------------------------------------------- /src/tokenize.js: -------------------------------------------------------------------------------- 1 | import Input from './input'; 2 | import tokenizeString from './tokenize-string'; 3 | import tokenizeComment from './tokenize-comment'; 4 | import tokenizeInterpolant from './tokenize-interpolant'; 5 | 6 | let singleQuote = "'".charCodeAt(0), 7 | doubleQuote = '"'.charCodeAt(0), 8 | dollar = '$'.charCodeAt(0), 9 | hash = '#'.charCodeAt(0), 10 | backslash = '\\'.charCodeAt(0), 11 | slash = '/'.charCodeAt(0), 12 | newline = '\n'.charCodeAt(0), 13 | space = ' '.charCodeAt(0), 14 | feed = '\f'.charCodeAt(0), 15 | tab = '\t'.charCodeAt(0), 16 | cr = '\r'.charCodeAt(0), 17 | openBracket = '('.charCodeAt(0), 18 | closeBracket = ')'.charCodeAt(0), 19 | openCurly = '{'.charCodeAt(0), 20 | closeCurly = '}'.charCodeAt(0), 21 | semicolon = ';'.charCodeAt(0), 22 | asterisk = '*'.charCodeAt(0), 23 | colon = ':'.charCodeAt(0), 24 | at = '@'.charCodeAt(0), 25 | comma = ','.charCodeAt(0), 26 | plus = '+'.charCodeAt(0), 27 | minus = '-'.charCodeAt(0), 28 | decComb = '>'.charCodeAt(0), 29 | adjComb = '~'.charCodeAt(0), 30 | number = /[+-]?(\d+(\.\d+)?|\.\d+)|(e[+-]\d+)/gi, 31 | sQuoteEnd = /(.*?)[^\\](?=((#{)|'))/gm, 32 | dQuoteEnd = /(.*?)[^\\](?=((#{)|"))/gm, 33 | wordEnd = /[ \n\t\r\(\)\{\},:;@!'"\\]|\/(?=\*)|#(?={)/g, 34 | ident = /-?([a-z_]|\\[^\\])([a-z-_0-9]|\\[^\\])*/gi; 35 | 36 | export default function tokenize(input, l, p) { 37 | let tokens = []; 38 | let css = input.css.valueOf(); 39 | 40 | let code, next, quote, lines, last, content, escape, 41 | nextLine, nextOffset, escaped, escapePos, 42 | inInterpolant, inComment, inString; 43 | 44 | let length = css.length; 45 | let offset = -1; 46 | let line = l || 1; 47 | let pos = p || 0; 48 | 49 | while ( pos < length ) { 50 | code = css.charCodeAt(pos); 51 | 52 | if ( code === newline ) { 53 | offset = pos; 54 | line += 1; 55 | } 56 | 57 | switch ( code ) { 58 | case space: 59 | case tab: 60 | case cr: 61 | case feed: 62 | next = pos; 63 | do { 64 | next += 1; 65 | code = css.charCodeAt(next); 66 | if ( code === newline ) { 67 | offset = next; 68 | line += 1; 69 | } 70 | } while ( code === space || 71 | code === tab || 72 | code === cr || 73 | code === feed ); 74 | 75 | tokens.push(['space', css.slice(pos, next)]); 76 | pos = next - 1; 77 | break; 78 | 79 | case newline: 80 | tokens.push(['newline', '\n', line, pos - offset]); 81 | break; 82 | 83 | case plus: 84 | tokens.push(['+', '+', line, pos - offset]); 85 | break; 86 | 87 | case minus: 88 | tokens.push(['-', '-', line, pos - offset]); 89 | break; 90 | 91 | case decComb: 92 | tokens.push(['>', '>', line, pos - offset]); 93 | break; 94 | 95 | case adjComb: 96 | tokens.push(['~', '~', line, pos - offset]); 97 | break; 98 | 99 | case openCurly: 100 | tokens.push(['{', '{', line, pos - offset]); 101 | break; 102 | 103 | case closeCurly: 104 | if (inInterpolant) { 105 | inInterpolant = false; 106 | tokens.push(['endInterpolant', '}', line, pos - offset]); 107 | } else { 108 | tokens.push(['}', '}', line, pos - offset]); 109 | } 110 | break; 111 | 112 | case comma: 113 | tokens.push([',', ',', line, pos - offset]); 114 | break; 115 | 116 | case dollar: 117 | tokens.push(['$', '$', line, pos - offset]); 118 | break; 119 | 120 | case colon: 121 | tokens.push([':', ':', line, pos - offset]); 122 | break; 123 | 124 | case semicolon: 125 | tokens.push([';', ';', line, pos - offset]); 126 | break; 127 | 128 | case openBracket: 129 | tokens.push(['(', '(', line, pos - offset]); 130 | break; 131 | 132 | case closeBracket: 133 | tokens.push([')', ')', line, pos - offset]); 134 | break; 135 | 136 | case singleQuote: 137 | case doubleQuote: 138 | quote = code === singleQuote ? "'" : '"'; 139 | tokens.push([quote, quote, line, pos - offset]); 140 | next = pos + 1; 141 | 142 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeString(input, line, next, offset, quote); 143 | tokens = tokens.concat(t); 144 | next = p; 145 | line = l; 146 | offset = o; 147 | 148 | pos = next; 149 | break; 150 | 151 | case at: 152 | tokens.push(['@', '@', line, pos - offset]); 153 | break; 154 | 155 | case backslash: 156 | next = pos; 157 | escape = true; 158 | while ( css.charCodeAt(next + 1) === backslash ) { 159 | next += 1; 160 | escape = !escape; 161 | } 162 | code = css.charCodeAt(next + 1); 163 | if ( escape && (code !== space && 164 | code !== newline && 165 | code !== tab && 166 | code !== cr && 167 | code !== feed ) ) { 168 | next += 1; 169 | } 170 | tokens.push(['word', css.slice(pos, next + 1), 171 | line, pos - offset, 172 | line, next - offset 173 | ]); 174 | pos = next; 175 | break; 176 | 177 | default: 178 | ident.lastIndex = pos; 179 | number.lastIndex = pos; 180 | wordEnd.lastIndex = pos; 181 | 182 | if ( code === slash && css.charCodeAt(pos + 1) === asterisk ) { 183 | inComment = true; 184 | tokens.push(['startComment', '/*', line, pos + 1 - offset]); 185 | next = pos + 1; 186 | 187 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeComment(input, line, next + 1, offset); 188 | tokens = tokens.concat(t); 189 | next = p; 190 | line = l; 191 | offset = o; 192 | 193 | pos = next; 194 | break; 195 | } 196 | 197 | if ( code === asterisk && css.charCodeAt(pos + 1) !== slash) { 198 | tokens.push(['*', '*', line, pos - offset]); 199 | break; 200 | } 201 | 202 | if ( inComment && code === asterisk && css.charCodeAt(pos + 1) === slash ) { 203 | inComment = false; 204 | tokens.push(['endComment', '*/', line, pos + 1 - offset]); 205 | pos += 2; 206 | break; 207 | } 208 | 209 | if ( code === slash && css.charCodeAt(pos + 1) !== slash ) { 210 | tokens.push(['/', '/', line, pos - offset]); 211 | break; 212 | } 213 | 214 | if ( code === hash && css.charCodeAt(pos + 1) === openCurly ) { 215 | inInterpolant = true; 216 | tokens.push(['startInterpolant', '#{', line, pos + 1 - offset]); 217 | next = pos + 1; 218 | 219 | let { tokens: t, line: l, pos: p, offset: o } = tokenizeInterpolant(input, line, next + 1, offset); 220 | tokens = tokens.concat(t); 221 | next = p; 222 | line = l; 223 | offset = o; 224 | 225 | pos = next; 226 | break; 227 | } 228 | 229 | if ( code === slash && css.charCodeAt(pos + 1) === slash ) { 230 | next = css.indexOf('\n', pos + 2); 231 | next = (next > 0 ? next : css.length) - 1; 232 | 233 | tokens.push(['scssComment', css.slice(pos, next + 1), 234 | line, pos - offset, 235 | line, next - offset 236 | ]); 237 | 238 | pos = next; 239 | break; 240 | } 241 | 242 | if ( ident.test(css) && ( ident.lastIndex = pos || 1 ) && ident.exec(css).index === pos ) { 243 | next = ident.lastIndex - 1; 244 | 245 | tokens.push(['ident', css.slice(pos, next + 1), 246 | line, pos - offset, 247 | line, next - offset 248 | ]); 249 | 250 | pos = next; 251 | break; 252 | } 253 | 254 | if ( number.test(css) && ( number.lastIndex = pos || 1 ) && number.exec(css).index === pos ) { 255 | next = number.lastIndex - 1; 256 | 257 | tokens.push(['number', css.slice(pos, next + 1), 258 | line, pos - offset, 259 | line, next - offset 260 | ]); 261 | 262 | pos = next; 263 | break; 264 | } 265 | 266 | wordEnd.lastIndex = pos + 1; 267 | wordEnd.test(css); 268 | if ( wordEnd.lastIndex === 0 ) { 269 | next = css.length - 1; 270 | } else { 271 | next = wordEnd.lastIndex - 2; 272 | } 273 | 274 | tokens.push(['word', css.slice(pos, next + 1), 275 | line, pos - offset, 276 | line, next - offset 277 | ]); 278 | 279 | pos = next; 280 | 281 | break; 282 | } 283 | 284 | pos++; 285 | } 286 | 287 | return tokens; 288 | } 289 | --------------------------------------------------------------------------------