// Acorn is a tiny, fast JavaScript parser written in JavaScript. // // Acorn was written by Marijn Haverbeke and various contributors and // released under an MIT license. The Unicode regexps (for identifiers // and whitespace) were taken from [Esprima](http://esprima.org) by // Ariya Hidayat. // // Git repositories for Acorn are available at // // http://marijnhaverbeke.nl/git/acorn // https://github.com/marijnh/acorn.git // // Please use the [github bug tracker][ghbt] to report issues. // // [ghbt]: https://github.com/marijnh/acorn/issues // // This file defines the main parser interface. The library also comes // with a [error-tolerant parser][dammit] and an // [abstract syntax tree walker][walk], defined in other files. // // [dammit]: acorn_loose.js // [walk]: util/walk.js (function(root, mod) { if (typeof exports == "object" && typeof module == "object") return mod(exports); // CommonJS if (typeof define == "function" && define.amd) return define(["exports"], mod); // AMD mod(root.acorn || (root.acorn = {})); // Plain browser env })(this, function(exports) { "use strict"; exports.version = "0.12.1"; // The main exported interface (under `self.acorn` when in the // browser) is a `parse` function that takes a code string and // returns an abstract syntax tree as specified by [Mozilla parser // API][api], with the caveat that inline XML is not recognized. // // [api]: https://developer.mozilla.org/en-US/docs/SpiderMonkey/Parser_API var options, input, inputLen, sourceFile; exports.parse = function(inpt, opts) { input = String(inpt); inputLen = input.length; setOptions(opts); initTokenState(); var startPos = options.locations ? [tokPos, curPosition()] : tokPos; initParserState(); return parseTopLevel(options.program || startNodeAt(startPos)); }; // A second optional argument can be given to further configure // the parser process. These options are recognized: var defaultOptions = exports.defaultOptions = { // `ecmaVersion` indicates the ECMAScript version to parse. Must // be either 3, or 5, or 6. This influences support for strict // mode, the set of reserved words, support for getters and // setters and other features. ecmaVersion: 5, // Turn on `strictSemicolons` to prevent the parser from doing // automatic semicolon insertion. strictSemicolons: false, // When `allowTrailingCommas` is false, the parser will not allow // trailing commas in array and object literals. allowTrailingCommas: true, // By default, reserved words are not enforced. Enable // `forbidReserved` to enforce them. When this option has the // value "everywhere", reserved words and keywords can also not be // used as property names. forbidReserved: false, // When enabled, a return at the top level is not considered an // error. allowReturnOutsideFunction: false, // When enabled, import/export statements are not constrained to // appearing at the top of the program. allowImportExportEverywhere: false, // When enabled, hashbang directive in the beginning of file // is allowed and treated as a line comment. allowHashBang: false, // When `locations` is on, `loc` properties holding objects with // `start` and `end` properties in `{line, column}` form (with // line being 1-based and column 0-based) will be attached to the // nodes. locations: false, // A function can be passed as `onToken` option, which will // cause Acorn to call that function with object in the same // format as tokenize() returns. Note that you are not // allowed to call the parser from the callback—that will // corrupt its internal state. onToken: null, // A function can be passed as `onComment` option, which will // cause Acorn to call that function with `(block, text, start, // end)` parameters whenever a comment is skipped. `block` is a // boolean indicating whether this is a block (`/* */`) comment, // `text` is the content of the comment, and `start` and `end` are // character offsets that denote the start and end of the comment. // When the `locations` option is on, two more parameters are // passed, the full `{line, column}` locations of the start and // end of the comments. Note that you are not allowed to call the // parser from the callback—that will corrupt its internal state. onComment: null, // Nodes have their start and end characters offsets recorded in // `start` and `end` properties (directly on the node, rather than // the `loc` object, which holds line/column data. To also add a // [semi-standardized][range] `range` property holding a `[start, // end]` array with the same numbers, set the `ranges` option to // `true`. // // [range]: https://bugzilla.mozilla.org/show_bug.cgi?id=745678 ranges: false, // It is possible to parse multiple files into a single AST by // passing the tree produced by parsing the first file as // `program` option in subsequent parses. This will add the // toplevel forms of the parsed file to the `Program` (top) node // of an existing parse tree. program: null, // When `locations` is on, you can pass this to record the source // file in every node's `loc` object. sourceFile: null, // This value, if given, is stored in every node, whether // `locations` is on or off. directSourceFile: null, // When enabled, parenthesized expressions are represented by // (non-standard) ParenthesizedExpression nodes preserveParens: false }; // This function tries to parse a single expression at a given // offset in a string. Useful for parsing mixed-language formats // that embed JavaScript expressions. exports.parseExpressionAt = function(inpt, pos, opts) { input = String(inpt); inputLen = input.length; setOptions(opts); initTokenState(pos); initParserState(); return parseExpression(); }; var isArray = function (obj) { return Object.prototype.toString.call(obj) === "[object Array]"; }; function setOptions(opts) { options = {}; for (var opt in defaultOptions) options[opt] = opts && has(opts, opt) ? opts[opt] : defaultOptions[opt]; sourceFile = options.sourceFile || null; if (isArray(options.onToken)) { var tokens = options.onToken; options.onToken = function (token) { tokens.push(token); }; } if (isArray(options.onComment)) { var comments = options.onComment; options.onComment = function (block, text, start, end, startLoc, endLoc) { var comment = { type: block ? 'Block' : 'Line', value: text, start: start, end: end }; if (options.locations) { comment.loc = new SourceLocation(); comment.loc.start = startLoc; comment.loc.end = endLoc; } if (options.ranges) comment.range = [start, end]; comments.push(comment); }; } isKeyword = options.ecmaVersion >= 6 ? isEcma6Keyword : isEcma5AndLessKeyword; } // The `getLineInfo` function is mostly useful when the // `locations` option is off (for performance reasons) and you // want to find the line/column position for a given character // offset. `input` should be the code string that the offset refers // into. var getLineInfo = exports.getLineInfo = function(input, offset) { for (var line = 1, cur = 0;;) { lineBreak.lastIndex = cur; var match = lineBreak.exec(input); if (match && match.index < offset) { ++line; cur = match.index + match[0].length; } else break; } return {line: line, column: offset - cur}; }; function Token() { this.type = tokType; this.value = tokVal; this.start = tokStart; this.end = tokEnd; if (options.locations) { this.loc = new SourceLocation(); this.loc.end = tokEndLoc; } if (options.ranges) this.range = [tokStart, tokEnd]; } exports.Token = Token; // Acorn is organized as a tokenizer and a recursive-descent parser. // The `tokenize` export provides an interface to the tokenizer. // Because the tokenizer is optimized for being efficiently used by // the Acorn parser itself, this interface is somewhat crude and not // very modular. Performing another parse or call to `tokenize` will // reset the internal state, and invalidate existing tokenizers. exports.tokenize = function(inpt, opts) { input = String(inpt); inputLen = input.length; setOptions(opts); initTokenState(); skipSpace(); function getToken() { lastEnd = tokEnd; readToken(); return new Token(); } getToken.jumpTo = function(pos, exprAllowed) { tokPos = pos; if (options.locations) { tokCurLine = 1; tokLineStart = lineBreak.lastIndex = 0; var match; while ((match = lineBreak.exec(input)) && match.index < pos) { ++tokCurLine; tokLineStart = match.index + match[0].length; } } tokExprAllowed = !!exprAllowed; skipSpace(); }; getToken.current = function() { return new Token(); }; if (typeof Symbol !== 'undefined') { getToken[Symbol.iterator] = function () { return { next: function () { var token = getToken(); return { done: token.type === _eof, value: token }; } }; }; } getToken.options = options; return getToken; }; // State is kept in (closure-)global variables. We already saw the // `options`, `input`, and `inputLen` variables above. // The current position of the tokenizer in the input. var tokPos; // The start and end offsets of the current token. var tokStart, tokEnd; // When `options.locations` is true, these hold objects // containing the tokens start and end line/column pairs. var tokStartLoc, tokEndLoc; // The type and value of the current token. Token types are objects, // named by variables against which they can be compared, and // holding properties that describe them (indicating, for example, // the precedence of an infix operator, and the original name of a // keyword token). The kind of value that's held in `tokVal` depends // on the type of the token. For literals, it is the literal value, // for operators, the operator name, and so on. var tokType, tokVal; // Internal state for the tokenizer. To distinguish between division // operators and regular expressions, it remembers whether the last // token was one that is allowed to be followed by an expression. In // some cases, notably after ')' or '}' tokens, the situation // depends on the context before the matching opening bracket, so // tokContext keeps a stack of information about current bracketed // forms. var tokContext, tokExprAllowed; // When `options.locations` is true, these are used to keep // track of the current line, and know when a new line has been // entered. var tokCurLine, tokLineStart; // These store the position of the previous token, which is useful // when finishing a node and assigning its `end` position. var lastStart, lastEnd, lastEndLoc; // This is the parser's state. `inFunction` is used to reject // `return` statements outside of functions, `inGenerator` to // reject `yield`s outside of generators, `labels` to verify // that `break` and `continue` have somewhere to jump to, and // `strict` indicates whether strict mode is on. var inFunction, inGenerator, labels, strict; function initParserState() { lastStart = lastEnd = tokPos; if (options.locations) lastEndLoc = curPosition(); inFunction = inGenerator = false; labels = []; skipSpace(); readToken(); } // This function is used to raise exceptions on parse errors. It // takes an offset integer (into the current `input`) to indicate // the location of the error, attaches the position to the end // of the error message, and then raises a `SyntaxError` with that // message. function raise(pos, message) { var loc = getLineInfo(input, pos); message += " (" + loc.line + ":" + loc.column + ")"; var err = new SyntaxError(message); err.pos = pos; err.loc = loc; err.raisedAt = tokPos; throw err; } function fullCharCodeAtPos() { var code = input.charCodeAt(tokPos); if (code <= 0xd7ff || code >= 0xe000) return code; var next = input.charCodeAt(tokPos + 1); return (code << 10) + next - 0x35fdc00; } function skipChar(code) { if (code <= 0xffff) tokPos++; else tokPos += 2; } // Reused empty array added for node fields that are always empty. var empty = []; // ## Token types // The assignment of fine-grained, information-carrying type objects // allows the tokenizer to store the information it has about a // token in a way that is very cheap for the parser to look up. // All token type variables start with an underscore, to make them // easy to recognize. // These are the general types. The `type` property is only used to // make them recognizeable when debugging. var _num = {type: "num"}, _regexp = {type: "regexp"}, _string = {type: "string"}; var _name = {type: "name"}, _eof = {type: "eof"}; // Keyword tokens. The `keyword` property (also used in keyword-like // operators) indicates that the token originated from an // identifier-like word, which is used when parsing property names. // // The `beforeExpr` property is used to disambiguate between regular // expressions and divisions. It is set on all token types that can // be followed by an expression (thus, a slash after them would be a // regular expression). // // `isLoop` marks a keyword as starting a loop, which is important // to know when parsing a label, in order to allow or disallow // continue jumps to that label. var _break = {keyword: "break"}, _case = {keyword: "case", beforeExpr: true}, _catch = {keyword: "catch"}; var _continue = {keyword: "continue"}, _debugger = {keyword: "debugger"}, _default = {keyword: "default"}; var _do = {keyword: "do", isLoop: true}, _else = {keyword: "else", beforeExpr: true}; var _finally = {keyword: "finally"}, _for = {keyword: "for", isLoop: true}, _function = {keyword: "function"}; var _if = {keyword: "if"}, _return = {keyword: "return", beforeExpr: true}, _switch = {keyword: "switch"}; var _throw = {keyword: "throw", beforeExpr: true}, _try = {keyword: "try"}, _var = {keyword: "var"}; var _let = {keyword: "let"}, _const = {keyword: "const"}; var _while = {keyword: "while", isLoop: true}, _with = {keyword: "with"}, _new = {keyword: "new", beforeExpr: true}; var _this = {keyword: "this"}; var _class = {keyword: "class"}, _extends = {keyword: "extends", beforeExpr: true}; var _export = {keyword: "export"}, _import = {keyword: "import"}; var _yield = {keyword: "yield", beforeExpr: true}; // The keywords that denote values. var _null = {keyword: "null", atomValue: null}, _true = {keyword: "true", atomValue: true}; var _false = {keyword: "false", atomValue: false}; // Some keywords are treated as regular operators. `in` sometimes // (when parsing `for`) needs to be tested against specifically, so // we assign a variable name to it for quick comparing. var _in = {keyword: "in", binop: 7, beforeExpr: true}; // Map keyword names to token types. var keywordTypes = {"break": _break, "case": _case, "catch": _catch, "continue": _continue, "debugger": _debugger, "default": _default, "do": _do, "else": _else, "finally": _finally, "for": _for, "function": _function, "if": _if, "return": _return, "switch": _switch, "throw": _throw, "try": _try, "var": _var, "let": _let, "const": _const, "while": _while, "with": _with, "null": _null, "true": _true, "false": _false, "new": _new, "in": _in, "instanceof": {keyword: "instanceof", binop: 7, beforeExpr: true}, "this": _this, "typeof": {keyword: "typeof", prefix: true, beforeExpr: true}, "void": {keyword: "void", prefix: true, beforeExpr: true}, "delete": {keyword: "delete", prefix: true, beforeExpr: true}, "class": _class, "extends": _extends, "export": _export, "import": _import, "yield": _yield}; // Punctuation token types. Again, the `type` property is purely for debugging. var _bracketL = {type: "[", beforeExpr: true}, _bracketR = {type: "]"}, _braceL = {type: "{", beforeExpr: true}; var _braceR = {type: "}"}, _parenL = {type: "(", beforeExpr: true}, _parenR = {type: ")"}; var _comma = {type: ",", beforeExpr: true}, _semi = {type: ";", beforeExpr: true}; var _colon = {type: ":", beforeExpr: true}, _dot = {type: "."}, _question = {type: "?", beforeExpr: true}; var _arrow = {type: "=>", beforeExpr: true}, _template = {type: "template"}; var _ellipsis = {type: "...", beforeExpr: true}; var _backQuote = {type: "`"}, _dollarBraceL = {type: "${", beforeExpr: true}; // Operators. These carry several kinds of properties to help the // parser use them properly (the presence of these properties is // what categorizes them as operators). // // `binop`, when present, specifies that this operator is a binary // operator, and will refer to its precedence. // // `prefix` and `postfix` mark the operator as a prefix or postfix // unary operator. `isUpdate` specifies that the node produced by // the operator should be of type UpdateExpression rather than // simply UnaryExpression (`++` and `--`). // // `isAssign` marks all of `=`, `+=`, `-=` etcetera, which act as // binary operators with a very low precedence, that should result // in AssignmentExpression nodes. var _slash = {binop: 10, beforeExpr: true}, _eq = {isAssign: true, beforeExpr: true}; var _assign = {isAssign: true, beforeExpr: true}; var _incDec = {postfix: true, prefix: true, isUpdate: true}, _prefix = {prefix: true, beforeExpr: true}; var _logicalOR = {binop: 1, beforeExpr: true}; var _logicalAND = {binop: 2, beforeExpr: true}; var _bitwiseOR = {binop: 3, beforeExpr: true}; var _bitwiseXOR = {binop: 4, beforeExpr: true}; var _bitwiseAND = {binop: 5, beforeExpr: true}; var _equality = {binop: 6, beforeExpr: true}; var _relational = {binop: 7, beforeExpr: true}; var _bitShift = {binop: 8, beforeExpr: true}; var _plusMin = {binop: 9, prefix: true, beforeExpr: true}; var _modulo = {binop: 10, beforeExpr: true}; // '*' may be multiply or have special meaning in ES6 var _star = {binop: 10, beforeExpr: true}; // Provide access to the token types for external users of the // tokenizer. exports.tokTypes = {bracketL: _bracketL, bracketR: _bracketR, braceL: _braceL, braceR: _braceR, parenL: _parenL, parenR: _parenR, comma: _comma, semi: _semi, colon: _colon, dot: _dot, ellipsis: _ellipsis, question: _question, slash: _slash, eq: _eq, name: _name, eof: _eof, num: _num, regexp: _regexp, string: _string, arrow: _arrow, template: _template, star: _star, assign: _assign, backQuote: _backQuote, dollarBraceL: _dollarBraceL}; for (var kw in keywordTypes) exports.tokTypes["_" + kw] = keywordTypes[kw]; // This is a trick taken from Esprima. It turns out that, on // non-Chrome browsers, to check whether a string is in a set, a // predicate containing a big ugly `switch` statement is faster than // a regular expression, and on Chrome the two are about on par. // This function uses `eval` (non-lexical) to produce such a // predicate from a space-separated string of words. // // It starts by sorting the words by length. function makePredicate(words) { words = words.split(" "); var f = "", cats = []; out: for (var i = 0; i < words.length; ++i) { for (var j = 0; j < cats.length; ++j) if (cats[j][0].length == words[i].length) { cats[j].push(words[i]); continue out; } cats.push([words[i]]); } function compareTo(arr) { if (arr.length == 1) return f += "return str === " + JSON.stringify(arr[0]) + ";"; f += "switch(str){"; for (var i = 0; i < arr.length; ++i) f += "case " + JSON.stringify(arr[i]) + ":"; f += "return true}return false;"; } // When there are more than three length categories, an outer // switch first dispatches on the lengths, to save on comparisons. if (cats.length > 3) { cats.sort(function(a, b) {return b.length - a.length;}); f += "switch(str.length){"; for (var i = 0; i < cats.length; ++i) { var cat = cats[i]; f += "case " + cat[0].length + ":"; compareTo(cat); } f += "}"; // Otherwise, simply generate a flat `switch` statement. } else { compareTo(words); } return new Function("str", f); } // The ECMAScript 3 reserved word list. var isReservedWord3 = makePredicate("abstract boolean byte char class double enum export extends final float goto implements import int interface long native package private protected public short static super synchronized throws transient volatile"); // ECMAScript 5 reserved words. var isReservedWord5 = makePredicate("class enum extends super const export import"); // The additional reserved words in strict mode. var isStrictReservedWord = makePredicate("implements interface let package private protected public static yield"); // The forbidden variable names in strict mode. var isStrictBadIdWord = makePredicate("eval arguments"); // And the keywords. var ecma5AndLessKeywords = "break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this"; var isEcma5AndLessKeyword = makePredicate(ecma5AndLessKeywords); var isEcma6Keyword = makePredicate(ecma5AndLessKeywords + " let const class extends export import yield"); var isKeyword = isEcma5AndLessKeyword; // ## Character categories // Big ugly regular expressions that match characters in the // whitespace, identifier, and identifier-start categories. These // are only applied when a character is found to actually have a // code point above 128. // Generated by `tools/generate-identifier-regex.js`. var nonASCIIwhitespace = /[\u1680\u180e\u2000-\u200a\u202f\u205f\u3000\ufeff]/; var nonASCIIidentifierStartChars = "\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0-\u08b2\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309b-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua7ad\ua7b0\ua7b1\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab5f\uab64\uab65\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc"; var nonASCIIidentifierChars = "\u200c\u200d\xb7\u0300-\u036f\u0387\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u064b-\u0669\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7\u06e8\u06ea-\u06ed\u06f0-\u06f9\u0711\u0730-\u074a\u07a6-\u07b0\u07c0-\u07c9\u07eb-\u07f3\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08e4-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09cb-\u09cd\u09d7\u09e2\u09e3\u09e6-\u09ef\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b62\u0b63\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c00-\u0c03\u0c3e-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62\u0c63\u0c66-\u0c6f\u0c81-\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2\u0ce3\u0ce6-\u0cef\u0d01-\u0d03\u0d3e-\u0d44\u0d46-\u0d48\u0d4a-\u0d4d\u0d57\u0d62\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2\u0df3\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0e50-\u0e59\u0eb1\u0eb4-\u0eb9\u0ebb\u0ebc\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e\u0f3f\u0f71-\u0f84\u0f86\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102b-\u103e\u1040-\u1049\u1056-\u1059\u105e-\u1060\u1062-\u1064\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u1369-\u1371\u1712-\u1714\u1732-\u1734\u1752\u1753\u1772\u1773\u17b4-\u17d3\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u18a9\u1920-\u192b\u1930-\u193b\u1946-\u194f\u19b0-\u19c0\u19c8\u19c9\u19d0-\u19da\u1a17-\u1a1b\u1a55-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1ab0-\u1abd\u1b00-\u1b04\u1b34-\u1b44\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1b82\u1ba1-\u1bad\u1bb0-\u1bb9\u1be6-\u1bf3\u1c24-\u1c37\u1c40-\u1c49\u1c50-\u1c59\u1cd0-\u1cd2\u1cd4-\u1ce8\u1ced\u1cf2-\u1cf4\u1cf8\u1cf9\u1dc0-\u1df5\u1dfc-\u1dff\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302f\u3099\u309a\ua620-\ua629\ua66f\ua674-\ua67d\ua69f\ua6f0\ua6f1\ua802\ua806\ua80b\ua823-\ua827\ua880\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8e0-\ua8f1\ua900-\ua909\ua926-\ua92d\ua947-\ua953\ua980-\ua983\ua9b3-\ua9c0\ua9d0-\ua9d9\ua9e5\ua9f0-\ua9f9\uaa29-\uaa36\uaa43\uaa4c\uaa4d\uaa50-\uaa59\uaa7b-\uaa7d\uaab0\uaab2-\uaab4\uaab7\uaab8\uaabe\uaabf\uaac1\uaaeb-\uaaef\uaaf5\uaaf6\uabe3-\uabea\uabec\uabed\uabf0-\uabf9\ufb1e\ufe00-\ufe0f\ufe20-\ufe2d\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f"; var nonASCIIidentifierStart = new RegExp("[" + nonASCIIidentifierStartChars + "]"); var nonASCIIidentifier = new RegExp("[" + nonASCIIidentifierStartChars + nonASCIIidentifierChars + "]"); nonASCIIidentifierStartChars = nonASCIIidentifierChars = null; // These are a run-length and offset encoded representation of the // >0xffff code points that are a valid part of identifiers. The // offset starts at 0x10000, and each pair of numbers represents an // offset to the next range, and then a size of the range. They were // generated by tools/generate-identifier-regex.js var astralIdentifierStartCodes = [0,11,2,25,2,18,2,1,2,14,3,13,35,122,70,52,268,28,4,48,48,31,17,26,6,37,11,29,3,35,5,7,2,4,43,157,99,39,9,51,157,310,10,21,11,7,153,5,3,0,2,43,2,1,4,0,3,22,11,22,10,30,98,21,11,25,71,55,7,1,65,0,16,3,2,2,2,26,45,28,4,28,36,7,2,27,28,53,11,21,11,18,14,17,111,72,955,52,76,44,33,24,27,35,42,34,4,0,13,47,15,3,22,0,38,17,2,24,133,46,39,7,3,1,3,21,2,6,2,1,2,4,4,0,32,4,287,47,21,1,2,0,185,46,82,47,21,0,60,42,502,63,32,0,449,56,1288,920,104,110,2962,1070,13266,568,8,30,114,29,19,47,17,3,32,20,6,18,881,68,12,0,67,12,16481,1,3071,106,6,12,4,8,8,9,5991,84,2,70,2,1,3,0,3,1,3,3,2,11,2,0,2,6,2,64,2,3,3,7,2,6,2,27,2,3,2,4,2,0,4,6,2,339,3,24,2,24,2,30,2,24,2,30,2,24,2,30,2,24,2,30,2,24,2,7,4149,196,1340,3,2,26,2,1,2,0,3,0,2,9,2,3,2,0,2,0,7,0,5,0,2,0,2,0,2,2,2,1,2,0,3,0,2,0,2,0,2,0,2,0,2,1,2,0,3,3,2,6,2,3,2,3,2,0,2,9,2,16,6,2,2,4,2,16,4421,42710,42,4148,12,221,16355,541]; var astralIdentifierCodes = [509,0,227,0,150,4,294,9,1368,2,2,1,6,3,41,2,5,0,166,1,1306,2,54,14,32,9,16,3,46,10,54,9,7,2,37,13,2,9,52,0,13,2,49,13,16,9,83,11,168,11,6,9,8,2,57,0,2,6,3,1,3,2,10,0,11,1,3,6,4,4,316,19,13,9,214,6,3,8,112,16,16,9,82,12,9,9,535,9,20855,9,135,4,60,6,26,9,1016,45,17,3,19723,1,5319,4,4,5,9,7,3,6,31,3,149,2,1418,49,4305,6,792618,239]; // This has a complexity linear to the value of the code. The // assumption is that looking up astral identifier characters is // rare. function isInAstralSet(code, set) { var pos = 0x10000; for (var i = 0; i < set.length; i += 2) { pos += set[i]; if (pos > code) return false; pos += set[i + 1]; if (pos >= code) return true; } } // Whether a single character denotes a newline. var newline = /[\n\r\u2028\u2029]/; function isNewLine(code) { return code === 10 || code === 13 || code === 0x2028 || code == 0x2029; } // Matches a whole line break (where CRLF is considered a single // line break). Used to count lines. var lineBreak = /\r\n|[\n\r\u2028\u2029]/g; // Test whether a given character code starts an identifier. var isIdentifierStart = exports.isIdentifierStart = function(code, astral) { if (code < 65) return code === 36; if (code < 91) return true; if (code < 97) return code === 95; if (code < 123) return true; if (code <= 0xffff) return code >= 0xaa && nonASCIIidentifierStart.test(String.fromCharCode(code)); if (astral === false) return false; return isInAstralSet(code, astralIdentifierStartCodes); }; // Test whether a given character is part of an identifier. var isIdentifierChar = exports.isIdentifierChar = function(code, astral) { if (code < 48) return code === 36; if (code < 58) return true; if (code < 65) return false; if (code < 91) return true; if (code < 97) return code === 95; if (code < 123) return true; if (code <= 0xffff) return code >= 0xaa && nonASCIIidentifier.test(String.fromCharCode(code)); if (astral === false) return false; return isInAstralSet(code, astralIdentifierStartCodes) || isInAstralSet(code, astralIdentifierCodes); }; // ## Tokenizer // These are used when `options.locations` is on, for the // `tokStartLoc` and `tokEndLoc` properties. function Position(line, col) { this.line = line; this.column = col; } Position.prototype.offset = function(n) { return new Position(this.line, this.column + n); }; function curPosition() { return new Position(tokCurLine, tokPos - tokLineStart); } // Reset the token state. Used at the start of a parse. function initTokenState(pos) { if (pos) { tokPos = pos; tokLineStart = Math.max(0, input.lastIndexOf("\n", pos)); tokCurLine = input.slice(0, tokLineStart).split(newline).length; } else { tokCurLine = 1; tokPos = tokLineStart = 0; } tokType = _eof; tokContext = [b_stat]; tokExprAllowed = true; strict = false; if (tokPos === 0 && options.allowHashBang && input.slice(0, 2) === '#!') { skipLineComment(2); } } // The algorithm used to determine whether a regexp can appear at a // given point in the program is loosely based on sweet.js' approach. // See https://github.com/mozilla/sweet.js/wiki/design var b_stat = {token: "{", isExpr: false}, b_expr = {token: "{", isExpr: true}, b_tmpl = {token: "${", isExpr: true}; var p_stat = {token: "(", isExpr: false}, p_expr = {token: "(", isExpr: true}; var q_tmpl = {token: "`", isExpr: true}, f_expr = {token: "function", isExpr: true}; function curTokContext() { return tokContext[tokContext.length - 1]; } function braceIsBlock(prevType) { var parent; if (prevType === _colon && (parent = curTokContext()).token == "{") return !parent.isExpr; if (prevType === _return) return newline.test(input.slice(lastEnd, tokStart)); if (prevType === _else || prevType === _semi || prevType === _eof) return true; if (prevType == _braceL) return curTokContext() === b_stat; return !tokExprAllowed; } // Called at the end of every token. Sets `tokEnd`, `tokVal`, and // maintains `tokContext` and `tokExprAllowed`, and skips the space // after the token, so that the next one's `tokStart` will point at // the right position. function finishToken(type, val) { tokEnd = tokPos; if (options.locations) tokEndLoc = curPosition(); var prevType = tokType, preserveSpace = false; tokType = type; tokVal = val; // Update context info if (type === _parenR || type === _braceR) { var out = tokContext.pop(); if (out === b_tmpl) { preserveSpace = true; } else if (out === b_stat && curTokContext() === f_expr) { tokContext.pop(); tokExprAllowed = false; } else { tokExprAllowed = !(out && out.isExpr); } } else if (type === _braceL) { tokContext.push(braceIsBlock(prevType) ? b_stat : b_expr); tokExprAllowed = true; } else if (type === _dollarBraceL) { tokContext.push(b_tmpl); tokExprAllowed = true; } else if (type == _parenL) { var statementParens = prevType === _if || prevType === _for || prevType === _with || prevType === _while; tokContext.push(statementParens ? p_stat : p_expr); tokExprAllowed = true; } else if (type == _incDec) { // tokExprAllowed stays unchanged } else if (type.keyword && prevType == _dot) { tokExprAllowed = false; } else if (type == _function) { if (curTokContext() !== b_stat) { tokContext.push(f_expr); } tokExprAllowed = false; } else if (type === _backQuote) { if (curTokContext() === q_tmpl) { tokContext.pop(); } else { tokContext.push(q_tmpl); preserveSpace = true; } tokExprAllowed = false; } else { tokExprAllowed = type.beforeExpr; } if (!preserveSpace) skipSpace(); } function skipBlockComment() { var startLoc = options.onComment && options.locations && curPosition(); var start = tokPos, end = input.indexOf("*/", tokPos += 2); if (end === -1) raise(tokPos - 2, "Unterminated comment"); tokPos = end + 2; if (options.locations) { lineBreak.lastIndex = start; var match; while ((match = lineBreak.exec(input)) && match.index < tokPos) { ++tokCurLine; tokLineStart = match.index + match[0].length; } } if (options.onComment) options.onComment(true, input.slice(start + 2, end), start, tokPos, startLoc, options.locations && curPosition()); } function skipLineComment(startSkip) { var start = tokPos; var startLoc = options.onComment && options.locations && curPosition(); var ch = input.charCodeAt(tokPos+=startSkip); while (tokPos < inputLen && ch !== 10 && ch !== 13 && ch !== 8232 && ch !== 8233) { ++tokPos; ch = input.charCodeAt(tokPos); } if (options.onComment) options.onComment(false, input.slice(start + startSkip, tokPos), start, tokPos, startLoc, options.locations && curPosition()); } // Called at the start of the parse and after every token. Skips // whitespace and comments, and. function skipSpace() { while (tokPos < inputLen) { var ch = input.charCodeAt(tokPos); if (ch === 32) { // ' ' ++tokPos; } else if (ch === 13) { ++tokPos; var next = input.charCodeAt(tokPos); if (next === 10) { ++tokPos; } if (options.locations) { ++tokCurLine; tokLineStart = tokPos; } } else if (ch === 10 || ch === 8232 || ch === 8233) { ++tokPos; if (options.locations) { ++tokCurLine; tokLineStart = tokPos; } } else if (ch > 8 && ch < 14) { ++tokPos; } else if (ch === 47) { // '/' var next = input.charCodeAt(tokPos + 1); if (next === 42) { // '*' skipBlockComment(); } else if (next === 47) { // '/' skipLineComment(2); } else break; } else if (ch === 160) { // '\xa0' ++tokPos; } else if (ch >= 5760 && nonASCIIwhitespace.test(String.fromCharCode(ch))) { ++tokPos; } else { break; } } } // ### Token reading // This is the function that is called to fetch the next token. It // is somewhat obscure, because it works in character codes rather // than characters, and because operator parsing has been inlined // into it. // // All in the name of speed. // function readToken_dot() { var next = input.charCodeAt(tokPos + 1); if (next >= 48 && next <= 57) return readNumber(true); var next2 = input.charCodeAt(tokPos + 2); if (options.ecmaVersion >= 6 && next === 46 && next2 === 46) { // 46 = dot '.' tokPos += 3; return finishToken(_ellipsis); } else { ++tokPos; return finishToken(_dot); } } function readToken_slash() { // '/' var next = input.charCodeAt(tokPos + 1); if (tokExprAllowed) {++tokPos; return readRegexp();} if (next === 61) return finishOp(_assign, 2); return finishOp(_slash, 1); } function readToken_mult_modulo(code) { // '%*' var next = input.charCodeAt(tokPos + 1); if (next === 61) return finishOp(_assign, 2); return finishOp(code === 42 ? _star : _modulo, 1); } function readToken_pipe_amp(code) { // '|&' var next = input.charCodeAt(tokPos + 1); if (next === code) return finishOp(code === 124 ? _logicalOR : _logicalAND, 2); if (next === 61) return finishOp(_assign, 2); return finishOp(code === 124 ? _bitwiseOR : _bitwiseAND, 1); } function readToken_caret() { // '^' var next = input.charCodeAt(tokPos + 1); if (next === 61) return finishOp(_assign, 2); return finishOp(_bitwiseXOR, 1); } function readToken_plus_min(code) { // '+-' var next = input.charCodeAt(tokPos + 1); if (next === code) { if (next == 45 && input.charCodeAt(tokPos + 2) == 62 && newline.test(input.slice(lastEnd, tokPos))) { // A `-->` line comment skipLineComment(3); skipSpace(); return readToken(); } return finishOp(_incDec, 2); } if (next === 61) return finishOp(_assign, 2); return finishOp(_plusMin, 1); } function readToken_lt_gt(code) { // '<>' var next = input.charCodeAt(tokPos + 1); var size = 1; if (next === code) { size = code === 62 && input.charCodeAt(tokPos + 2) === 62 ? 3 : 2; if (input.charCodeAt(tokPos + size) === 61) return finishOp(_assign, size + 1); return finishOp(_bitShift, size); } if (next == 33 && code == 60 && input.charCodeAt(tokPos + 2) == 45 && input.charCodeAt(tokPos + 3) == 45) { // `