Browse Source

tools: replace closure-linter with eslint

PR-URL: https://github.com/iojs/io.js/pull/1539
Fixes: https://github.com/iojs/io.js/issues/1253
Reviewed-By: Jeremiah Senkpiel <fishrock123@rocketmail.com>
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Reviewed-By: Roman Reiss <me@silverwind.io>
Reviewed-By: Chris Dickinson <christopher.s.dickinson@gmail.com>
Reviewed-By: Johan Bergström <bugs@bergstroem.nu>
Reviewed-By: Fedor Indutny <fedor.indutny@gmail.com>
v2.0.2
Yosuke Furukawa 10 years ago
parent
commit
f9dd34d301
  1. 1
      .eslintignore
  2. 91
      .eslintrc
  3. 5
      Makefile
  4. 6
      tools/closure_linter/AUTHORS
  5. 176
      tools/closure_linter/LICENSE
  6. 9
      tools/closure_linter/README
  7. 16
      tools/closure_linter/build/lib/closure_linter/__init__.py
  8. 248
      tools/closure_linter/build/lib/closure_linter/aliaspass.py
  9. 191
      tools/closure_linter/build/lib/closure_linter/aliaspass_test.py
  10. 108
      tools/closure_linter/build/lib/closure_linter/checker.py
  11. 192
      tools/closure_linter/build/lib/closure_linter/checkerbase.py
  12. 578
      tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py
  13. 873
      tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py
  14. 16
      tools/closure_linter/build/lib/closure_linter/common/__init__.py
  15. 65
      tools/closure_linter/build/lib/closure_linter/common/error.py
  16. 46
      tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py
  17. 61
      tools/closure_linter/build/lib/closure_linter/common/errorhandler.py
  18. 52
      tools/closure_linter/build/lib/closure_linter/common/erroroutput.py
  19. 115
      tools/closure_linter/build/lib/closure_linter/common/filetestcase.py
  20. 170
      tools/closure_linter/build/lib/closure_linter/common/htmlutil.py
  21. 39
      tools/closure_linter/build/lib/closure_linter/common/lintrunner.py
  22. 60
      tools/closure_linter/build/lib/closure_linter/common/matcher.py
  23. 126
      tools/closure_linter/build/lib/closure_linter/common/position.py
  24. 190
      tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py
  25. 185
      tools/closure_linter/build/lib/closure_linter/common/tokenizer.py
  26. 145
      tools/closure_linter/build/lib/closure_linter/common/tokens.py
  27. 113
      tools/closure_linter/build/lib/closure_linter/common/tokens_test.py
  28. 844
      tools/closure_linter/build/lib/closure_linter/ecmalintrules.py
  29. 574
      tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py
  30. 95
      tools/closure_linter/build/lib/closure_linter/error_check.py
  31. 618
      tools/closure_linter/build/lib/closure_linter/error_fixer.py
  32. 57
      tools/closure_linter/build/lib/closure_linter/error_fixer_test.py
  33. 66
      tools/closure_linter/build/lib/closure_linter/errorrecord.py
  34. 72
      tools/closure_linter/build/lib/closure_linter/errorrules.py
  35. 117
      tools/closure_linter/build/lib/closure_linter/errorrules_test.py
  36. 154
      tools/closure_linter/build/lib/closure_linter/errors.py
  37. 66
      tools/closure_linter/build/lib/closure_linter/fixjsstyle.py
  38. 615
      tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py
  39. 121
      tools/closure_linter/build/lib/closure_linter/full_test.py
  40. 319
      tools/closure_linter/build/lib/closure_linter/gjslint.py
  41. 617
      tools/closure_linter/build/lib/closure_linter/indentation.py
  42. 754
      tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py
  43. 150
      tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py
  44. 278
      tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py
  45. 463
      tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py
  46. 153
      tools/closure_linter/build/lib/closure_linter/javascripttokens.py
  47. 74
      tools/closure_linter/build/lib/closure_linter/not_strict_test.py
  48. 329
      tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py
  49. 155
      tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py
  50. 198
      tools/closure_linter/build/lib/closure_linter/runner.py
  51. 101
      tools/closure_linter/build/lib/closure_linter/runner_test.py
  52. 206
      tools/closure_linter/build/lib/closure_linter/scopeutil.py
  53. 222
      tools/closure_linter/build/lib/closure_linter/scopeutil_test.py
  54. 1294
      tools/closure_linter/build/lib/closure_linter/statetracker.py
  55. 123
      tools/closure_linter/build/lib/closure_linter/statetracker_test.py
  56. 67
      tools/closure_linter/build/lib/closure_linter/strict_test.py
  57. 94
      tools/closure_linter/build/lib/closure_linter/testutil.py
  58. 697
      tools/closure_linter/build/lib/closure_linter/tokenutil.py
  59. 297
      tools/closure_linter/build/lib/closure_linter/tokenutil_test.py
  60. 401
      tools/closure_linter/build/lib/closure_linter/typeannotation.py
  61. 232
      tools/closure_linter/build/lib/closure_linter/typeannotation_test.py
  62. 10
      tools/closure_linter/closure_linter.egg-info/PKG-INFO
  63. 63
      tools/closure_linter/closure_linter.egg-info/SOURCES.txt
  64. 1
      tools/closure_linter/closure_linter.egg-info/dependency_links.txt
  65. 4
      tools/closure_linter/closure_linter.egg-info/entry_points.txt
  66. 1
      tools/closure_linter/closure_linter.egg-info/requires.txt
  67. 1
      tools/closure_linter/closure_linter.egg-info/top_level.txt
  68. 16
      tools/closure_linter/closure_linter/__init__.py
  69. 248
      tools/closure_linter/closure_linter/aliaspass.py
  70. 191
      tools/closure_linter/closure_linter/aliaspass_test.py
  71. 108
      tools/closure_linter/closure_linter/checker.py
  72. 192
      tools/closure_linter/closure_linter/checkerbase.py
  73. 578
      tools/closure_linter/closure_linter/closurizednamespacesinfo.py
  74. 873
      tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
  75. 16
      tools/closure_linter/closure_linter/common/__init__.py
  76. 65
      tools/closure_linter/closure_linter/common/error.py
  77. 46
      tools/closure_linter/closure_linter/common/erroraccumulator.py
  78. 61
      tools/closure_linter/closure_linter/common/errorhandler.py
  79. 52
      tools/closure_linter/closure_linter/common/erroroutput.py
  80. 115
      tools/closure_linter/closure_linter/common/filetestcase.py
  81. 170
      tools/closure_linter/closure_linter/common/htmlutil.py
  82. 39
      tools/closure_linter/closure_linter/common/lintrunner.py
  83. 60
      tools/closure_linter/closure_linter/common/matcher.py
  84. 126
      tools/closure_linter/closure_linter/common/position.py
  85. 190
      tools/closure_linter/closure_linter/common/simplefileflags.py
  86. 185
      tools/closure_linter/closure_linter/common/tokenizer.py
  87. 145
      tools/closure_linter/closure_linter/common/tokens.py
  88. 113
      tools/closure_linter/closure_linter/common/tokens_test.py
  89. 844
      tools/closure_linter/closure_linter/ecmalintrules.py
  90. 574
      tools/closure_linter/closure_linter/ecmametadatapass.py
  91. 95
      tools/closure_linter/closure_linter/error_check.py
  92. 618
      tools/closure_linter/closure_linter/error_fixer.py
  93. 57
      tools/closure_linter/closure_linter/error_fixer_test.py
  94. 66
      tools/closure_linter/closure_linter/errorrecord.py
  95. 72
      tools/closure_linter/closure_linter/errorrules.py
  96. 117
      tools/closure_linter/closure_linter/errorrules_test.py
  97. 154
      tools/closure_linter/closure_linter/errors.py
  98. 66
      tools/closure_linter/closure_linter/fixjsstyle.py
  99. 615
      tools/closure_linter/closure_linter/fixjsstyle_test.py
  100. 121
      tools/closure_linter/closure_linter/full_test.py

1
.eslintignore

@ -0,0 +1 @@
lib/punycode.js

91
.eslintrc

@ -0,0 +1,91 @@
env:
node: true
# enable ECMAScript features
ecmaFeatures:
blockBindings: true
templateStrings: true
octalLiterals: true
binaryLiterals: true
rules:
# Possible Errors
# list: https://github.com/eslint/eslint/tree/master/docs/rules#possible-errors
## check debugger sentence
no-debugger: 2
## check duplicate arguments
no-dupe-args: 2
## check duplicate object keys
no-dupe-keys: 2
## check duplicate switch-case
no-duplicate-case: 2
## disallow assignment of exceptional params
no-ex-assign: 2
## disallow use of reserved words as keys like enum, class
no-reserved-keys: 2
## disallow unreachable code
no-unreachable: 2
## require valid typeof compared string like typeof foo === 'strnig'
valid-typeof: 2
# Best Practices
# list: https://github.com/eslint/eslint/tree/master/docs/rules#best-practices
## require falls through comment on switch-case
no-fallthrough: 2
# Stylistic Issues
# list: https://github.com/eslint/eslint/tree/master/docs/rules#stylistic-issues
## use single quote, we can use double quote when escape chars
quotes:
- 2
- "single"
- "avoid-escape"
## 2 space indentation
indent:
- 2
- 2
## add space after comma
comma-spacing: 2
## put semi-colon
semi: 2
## require spaces operator like var sum = 1 + 1;
space-infix-ops: 2
## require spaces return, throw, case
space-return-throw-case: 2
## require parens for Constructor
new-parens: 2
## max 80 length
max-len:
- 2
- 80
- 2
# Strict Mode
# list: https://github.com/eslint/eslint/tree/master/docs/rules#strict-mode
## 'use strict' on top
strict:
- 2
- "global"
# Global scoped method and vars
globals:
DTRACE_HTTP_CLIENT_REQUEST: true
LTTNG_HTTP_CLIENT_REQUEST: true
COUNTER_HTTP_CLIENT_REQUEST: true
DTRACE_HTTP_CLIENT_RESPONSE: true
LTTNG_HTTP_CLIENT_RESPONSE: true
COUNTER_HTTP_CLIENT_RESPONSE: true
DTRACE_HTTP_SERVER_REQUEST: true
LTTNG_HTTP_SERVER_REQUEST: true
COUNTER_HTTP_SERVER_REQUEST: true
DTRACE_HTTP_SERVER_RESPONSE: true
LTTNG_HTTP_SERVER_RESPONSE: true
COUNTER_HTTP_SERVER_RESPONSE: true
DTRACE_NET_STREAM_END: true
LTTNG_NET_STREAM_END: true
COUNTER_NET_SERVER_CONNECTION_CLOSE: true
DTRACE_NET_SERVER_CONNECTION: true
LTTNG_NET_SERVER_CONNECTION: true
COUNTER_NET_SERVER_CONNECTION: true

5
Makefile

@ -386,11 +386,8 @@ bench-idle:
sleep 1
./$(NODE_EXE) benchmark/idle_clients.js &
jslintfix:
PYTHONPATH=tools/closure_linter/:tools/gflags/ $(PYTHON) tools/closure_linter/closure_linter/fixjsstyle.py --strict --nojsdoc -r lib/ -r src/ --exclude_files lib/punycode.js
jslint:
PYTHONPATH=tools/closure_linter/:tools/gflags/ $(PYTHON) tools/closure_linter/closure_linter/gjslint.py --unix_mode --strict --nojsdoc -r lib/ -r src/ --exclude_files lib/punycode.js
./$(NODE_EXE) tools/eslint/bin/eslint.js src/*.js lib/*.js --reset --quiet
CPPLINT_EXCLUDE ?=
CPPLINT_EXCLUDE += src/node_lttng.cc

6
tools/closure_linter/AUTHORS

@ -1,6 +0,0 @@
# This is a list of contributors to the Closure Linter.
# Names should be added to this file like so:
# Name or Organization <email address>
Google Inc.

176
tools/closure_linter/LICENSE

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

9
tools/closure_linter/README

@ -1,9 +0,0 @@
This repository contains the Closure Linter - a style checker for JavaScript.
To install the application, run
python ./setup.py install
After installing, you get two helper applications installed into /usr/local/bin:
gjslint.py - runs the linter and checks for errors
fixjsstyle.py - tries to fix errors automatically

16
tools/closure_linter/build/lib/closure_linter/__init__.py

@ -1,16 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint."""

248
tools/closure_linter/build/lib/closure_linter/aliaspass.py

@ -1,248 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pass that scans for goog.scope aliases and lint/usage errors."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter import scopeutil
from closure_linter import tokenutil
from closure_linter.common import error
# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
# and related classes onto it.
def _GetAliasForIdentifier(identifier, alias_map):
"""Returns the aliased_symbol name for an identifier.
Example usage:
>>> alias_map = {'MyClass': 'goog.foo.MyClass'}
>>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
'goog.foo.MyClass.prototype.action'
>>> _GetAliasForIdentifier('MyClass.prototype.action', {})
None
Args:
identifier: The identifier.
alias_map: A dictionary mapping a symbol to an alias.
Returns:
The aliased symbol name or None if not found.
"""
ns = identifier.split('.', 1)[0]
aliased_symbol = alias_map.get(ns)
if aliased_symbol:
return aliased_symbol + identifier[len(ns):]
def _SetTypeAlias(js_type, alias_map):
"""Updates the alias for identifiers in a type.
Args:
js_type: A typeannotation.TypeAnnotation instance.
alias_map: A dictionary mapping a symbol to an alias.
"""
aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
if aliased_symbol:
js_type.alias = aliased_symbol
for sub_type in js_type.IterTypes():
_SetTypeAlias(sub_type, alias_map)
class AliasPass(object):
"""Pass to identify goog.scope() usages.
Identifies goog.scope() usages and finds lint/usage errors. Notes any
aliases of symbols in Closurized namespaces (that is, reassignments
such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
when they're using an alias (so they may be expanded to the full symbol
later -- that "MyClass.prototype.action" refers to
"goog.foo.MyClass.prototype.action" when expanded.).
"""
def __init__(self, closurized_namespaces=None, error_handler=None):
"""Creates a new pass.
Args:
closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
error_handler: An error handler to report lint errors to.
"""
self._error_handler = error_handler
# If we have namespaces, freeze the set.
if closurized_namespaces:
closurized_namespaces = frozenset(closurized_namespaces)
self._closurized_namespaces = closurized_namespaces
def Process(self, start_token):
"""Runs the pass on a token stream.
Args:
start_token: The first token in the stream.
"""
if start_token is None:
return
# TODO(nnaze): Add more goog.scope usage checks.
self._CheckGoogScopeCalls(start_token)
# If we have closurized namespaces, identify aliased identifiers.
if self._closurized_namespaces:
context = start_token.metadata.context
root_context = context.GetRoot()
self._ProcessRootContext(root_context)
def _CheckGoogScopeCalls(self, start_token):
"""Check goog.scope calls for lint/usage errors."""
def IsScopeToken(token):
return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
token.string == 'goog.scope')
# Find all the goog.scope tokens in the file
scope_tokens = [t for t in start_token if IsScopeToken(t)]
for token in scope_tokens:
scope_context = token.metadata.context
if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
self._MaybeReportError(
error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
'goog.scope call not in global scope', token))
# There should be only one goog.scope reference. Register errors for
# every instance after the first.
for token in scope_tokens[1:]:
self._MaybeReportError(
error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
'More than one goog.scope call in file.', token))
def _MaybeReportError(self, err):
"""Report an error to the handler (if registered)."""
if self._error_handler:
self._error_handler.HandleError(err)
@classmethod
def _YieldAllContexts(cls, context):
"""Yields all contexts that are contained by the given context."""
yield context
for child_context in context.children:
for descendent_child in cls._YieldAllContexts(child_context):
yield descendent_child
@staticmethod
def _IsTokenInParentBlock(token, parent_block):
"""Determines whether the given token is contained by the given block.
Args:
token: A token
parent_block: An EcmaContext.
Returns:
Whether the token is in a context that is or is a child of the given
parent_block context.
"""
context = token.metadata.context
while context:
if context is parent_block:
return True
context = context.parent
return False
def _ProcessRootContext(self, root_context):
"""Processes all goog.scope blocks under the root context."""
assert root_context.type is ecmametadatapass.EcmaContext.ROOT
# Process aliases in statements in the root scope for goog.module-style
# aliases.
global_alias_map = {}
for context in root_context.children:
if context.type == ecmametadatapass.EcmaContext.STATEMENT:
for statement_child in context.children:
if statement_child.type == ecmametadatapass.EcmaContext.VAR:
match = scopeutil.MatchModuleAlias(statement_child)
if match:
# goog.require aliases cannot use further aliases, the symbol is
# the second part of match, directly.
symbol = match[1]
if scopeutil.IsInClosurizedNamespace(symbol,
self._closurized_namespaces):
global_alias_map[match[0]] = symbol
# Process each block to find aliases.
for context in root_context.children:
self._ProcessBlock(context, global_alias_map)
def _ProcessBlock(self, context, global_alias_map):
"""Scans a goog.scope block to find aliases and mark alias tokens."""
alias_map = global_alias_map.copy()
# Iterate over every token in the context. Each token points to one
# context, but multiple tokens may point to the same context. We only want
# to check each context once, so keep track of those we've seen.
seen_contexts = set()
token = context.start_token
while token and self._IsTokenInParentBlock(token, context):
token_context = token.metadata.context if token.metadata else None
# Check to see if this token is an alias.
if token_context and token_context not in seen_contexts:
seen_contexts.add(token_context)
# If this is a alias statement in the goog.scope block.
if (token_context.type == ecmametadatapass.EcmaContext.VAR and
scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
match = scopeutil.MatchAlias(token_context)
# If this is an alias, remember it in the map.
if match:
alias, symbol = match
symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
if scopeutil.IsInClosurizedNamespace(symbol,
self._closurized_namespaces):
alias_map[alias] = symbol
# If this token is an identifier that matches an alias,
# mark the token as an alias to the original symbol.
if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
identifier = tokenutil.GetIdentifierForToken(token)
if identifier:
aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
if aliased_symbol:
token.metadata.aliased_symbol = aliased_symbol
elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
flag = token.attached_object
if flag and flag.HasType() and flag.jstype:
_SetTypeAlias(flag.jstype, alias_map)
token = token.next # Get next token

191
tools/closure_linter/build/lib/closure_linter/aliaspass_test.py

@ -1,191 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the aliaspass module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import testutil
from closure_linter.common import erroraccumulator
def _GetTokenByLineAndString(start_token, string, line_number):
for token in start_token:
if token.line_number == line_number and token.string == string:
return token
class AliasPassTest(googletest.TestCase):
def testInvalidGoogScopeCall(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(1, len(alias_errors))
alias_error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
self.assertEquals('goog.scope', alias_error.token.string)
def testAliasedIdentifiers(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
alias_pass.Process(start_token)
alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
self.assertTrue(alias_token.metadata.is_alias_definition)
my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
self.assertIsNone(my_class_token.metadata.aliased_symbol)
component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
self.assertEquals('goog.ui.Component',
component_token.metadata.aliased_symbol)
event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
self.assertEquals('goog.events.Event.Something',
event_token.metadata.aliased_symbol)
non_closurized_token = _GetTokenByLineAndString(
start_token, 'NonClosurizedClass', 18)
self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
long_start_token.metadata.aliased_symbol)
def testAliasedDoctypes(self):
"""Tests that aliases are correctly expanded within type annotations."""
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
tracker = javascriptstatetracker.JavaScriptStateTracker()
tracker.DocFlagPass(start_token, error_handler=None)
alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
alias_pass.Process(start_token)
flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
self.assertEquals(
'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
repr(flag_token.attached_object.jstype))
def testModuleAlias(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass("""
goog.module('goog.test');
var Alias = goog.require('goog.Alias');
Alias.use();
""")
alias_pass = aliaspass.AliasPass(set(['goog']))
alias_pass.Process(start_token)
alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
self.assertTrue(alias_token.metadata.is_alias_definition)
def testMultipleGoogScopeCalls(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(
_TEST_MULTIPLE_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
set(['goog', 'myproject']),
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(3, len(alias_errors))
error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[1]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[2]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(11, error.token.line_number)
_TEST_ALIAS_SCRIPT = """
goog.scope(function() {
var events = goog.events; // scope alias
var Event = events.
Event; // nested multiline scope alias
// This should not be registered as an aliased identifier because
// it appears before the alias.
var myClass = new MyClass();
var Component = goog.ui.Component; // scope alias
var MyClass = myproject.foo.MyClass; // scope alias
// Scope alias of non-Closurized namespace.
var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
var component = new Component(Event.Something);
var nonClosurized = NonClosurizedClass();
/**
* A created namespace with a really long identifier.
* @type {events.Event.<Component,Array<MyClass>}
*/
Event.
MultilineIdentifier.
someMethod = function() {};
});
"""
_TEST_SCOPE_SCRIPT = """
function foo () {
// This goog.scope call is invalid.
goog.scope(function() {
});
}
"""
_TEST_MULTIPLE_SCOPE_SCRIPT = """
goog.scope(function() {
// do nothing
});
function foo() {
var test = goog.scope; // We should not see goog.scope mentioned.
}
// This goog.scope invalid. There can be only one.
goog.scope(function() {
});
"""
if __name__ == '__main__':
googletest.main()

108
tools/closure_linter/build/lib/closure_linter/checker.py

@ -1,108 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking JS files for common style guide violations."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import gflags as flags
from closure_linter import aliaspass
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptlintrules
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
def __init__(self, state_tracker, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
state_tracker: State tracker.
error_handler: Error handler to pass all errors to.
"""
self._namespaces_info = None
self._alias_pass = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
self._alias_pass = aliaspass.AliasPass(
flags.FLAGS.closurized_namespaces, error_handler)
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=state_tracker)
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
start_token: The first token in the token stream.
limited_doc_checks: Whether to perform limited checks.
is_html: Whether this token stream is HTML.
stop_token: If given, checks should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
self._state_tracker.DocFlagPass(start_token, self._error_handler)
if self._alias_pass:
self._alias_pass.Process(start_token)
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info:
self._namespaces_info.Reset()
self._ExecutePass(start_token, self._DependencyPass, stop_token)
self._ExecutePass(start_token, self._LintPass, stop_token)
# If we have a stop_token, we didn't end up reading the whole file and,
# thus, don't call Finalize to do end-of-file checks.
if not stop_token:
self._lint_rules.Finalize(self._state_tracker)
def _DependencyPass(self, token):
"""Processes an individual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)

192
tools/closure_linter/build/lib/closure_linter/checkerbase.py

@ -1,192 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for writing checkers that operate on tokens."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
from closure_linter import errorrules
from closure_linter.common import error
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
def __init__(self):
self.__checker = None
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initializes to prepare to check a file.
Args:
checker: Class to report errors to.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file is an HTML file with extracted contents.
"""
self.__checker = checker
self._limited_doc_checks = limited_doc_checks
self._is_html = is_html
def _HandleError(self, code, message, token, position=None,
fix_data=None):
"""Call the HandleError function for the checker we are associated with."""
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
def _SetLimitedDocChecks(self, limited_doc_checks):
"""Sets whether doc checking is relaxed for this file.
Args:
limited_doc_checks: Whether doc checking is relaxed for this file.
"""
self._limited_doc_checks = limited_doc_checks
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration.
parser_state: Object that indicates the parser state in the page.
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method CheckToken not implemented')
def Finalize(self, parser_state):
"""Perform all checks that need to occur after all lines are processed.
Args:
parser_state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method Finalize not implemented')
class CheckerBase(object):
"""This class handles checking a LintRules object against a file."""
def __init__(self, error_handler, lint_rules, state_tracker):
"""Initialize a checker object.
Args:
error_handler: Object that handles errors.
lint_rules: LintRules object defining lint errors given a token
and state_tracker object.
state_tracker: Object that tracks the current state in the token stream.
"""
self._error_handler = error_handler
self._lint_rules = lint_rules
self._state_tracker = state_tracker
self._has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
"""Prints out the given error message including a line number.
Args:
code: The error code.
message: The error to print.
token: The token where the error occurred, or None if it was a file-wide
issue.
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
self._has_errors = True
self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
"""Returns true if the style checker has found any errors.
Returns:
True if the style checker has found any errors.
"""
return self._has_errors
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream, reporting errors to the error reporter.
Args:
start_token: First token in token stream.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file being checked is an HTML file with extracted
contents.
stop_token: If given, check should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
self._lint_rules.Finalize(self._state_tracker)
def _LintPass(self, token):
"""Checks an individual token for lint warnings/errors.
Used to encapsulate the logic needed to check an individual token so that it
can be passed to _ExecutePass.
Args:
token: The token to check.
"""
self._lint_rules.CheckToken(token, self._state_tracker)
def _ExecutePass(self, token, pass_function, stop_token=None):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
stop_token: The last token to check (if given).
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token:
# When we are looking at a token and decided to delete the whole line, we
# will delete all of them in the "HandleToken()" below. So the current
# token and subsequent ones may already be deleted here. The way we
# delete a token does not wipe out the previous and next pointers of the
# deleted token. So we need to check the token itself to make sure it is
# not deleted.
if not token.is_deleted:
# End the pass at the stop token
if stop_token and token is stop_token:
return
self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
token = token.next

578
tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo.py

@ -1,578 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
import re
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class UsedNamespace(object):
"""A type for information about a used namespace."""
def __init__(self, namespace, identifier, token, alias_definition):
"""Initializes the instance.
Args:
namespace: the namespace of an identifier used in the file
identifier: the complete identifier
token: the token that uses the namespace
alias_definition: a boolean stating whether the namespace is only to used
for an alias definition and should not be required.
"""
self.namespace = namespace
self.identifier = identifier
self.token = token
self.alias_definition = alias_definition
def GetLine(self):
return self.token.line_number
def __repr__(self):
return 'UsedNamespace(%s)' % ', '.join(
['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file, the second is the identifier itself and the third is
# the line number where it's created.
self._created_namespaces = []
# A list of UsedNamespace instances.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return set(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return set(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
if self.GetClosurizedNamespace(namespace) is None:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier, _ in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
if self.GetClosurizedNamespace(namespace) is None:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for ns in self._used_namespaces:
if (not ns.alias_definition and (
namespace == ns.namespace or namespace == ns.identifier)):
return False
return True
def GetMissingProvides(self):
"""Returns the dict of missing provided namespaces for the current file.
Returns:
Returns a dictionary of key as string and value as integer where each
string(key) is a namespace that should be provided by this file, but is
not and integer(value) is first line number where it's defined.
"""
missing_provides = dict()
for namespace, identifier, line_number in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces and
namespace not in missing_provides):
missing_provides[namespace] = line_number
return missing_provides
def GetMissingRequires(self):
"""Returns the dict of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a dictionary of key as string and value integer where each
string(key) is a namespace that should be required by this file, but is
not and integer(value) is first line number where it's used.
"""
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
# goog.module is treated as a builtin, too (for goog.module.get).
external_dependencies.add('goog.module')
created_identifiers = set()
for unused_namespace, identifier, unused_line_number in (
self._created_namespaces):
created_identifiers.add(identifier)
missing_requires = dict()
illegal_alias_statements = dict()
def ShouldRequireNamespace(namespace, identifier):
"""Checks if a namespace would normally be required."""
return (
not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers and
namespace not in missing_requires)
# First check all the used identifiers where we know that their namespace
# needs to be provided (unless they are optional).
for ns in self._used_namespaces:
namespace = ns.namespace
identifier = ns.identifier
if (not ns.alias_definition and
ShouldRequireNamespace(namespace, identifier)):
missing_requires[namespace] = ns.GetLine()
# Now that all required namespaces are known, we can check if the alias
# definitions (that are likely being used for typeannotations that don't
# need explicit goog.require statements) are already covered. If not
# the user shouldn't use the alias.
for ns in self._used_namespaces:
if (not ns.alias_definition or
not ShouldRequireNamespace(ns.namespace, ns.identifier)):
continue
if self._FindNamespace(ns.identifier, self._provided_namespaces,
created_identifiers, external_dependencies,
missing_requires):
continue
namespace = ns.identifier.rsplit('.', 1)[0]
illegal_alias_statements[namespace] = ns.token
return missing_requires, illegal_alias_statements
def _FindNamespace(self, identifier, *namespaces_list):
"""Finds the namespace of an identifier given a list of other namespaces.
Args:
identifier: An identifier whose parent needs to be defined.
e.g. for goog.bar.foo we search something that provides
goog.bar.
*namespaces_list: var args of iterables of namespace identifiers
Returns:
The namespace that the given identifier is part of or None.
"""
identifier = identifier.rsplit('.', 1)[0]
identifier_prefix = identifier + '.'
for namespaces in namespaces_list:
for namespace in namespaces:
if namespace == identifier or namespace.startswith(identifier_prefix):
return namespace
return None
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifier is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = tokenutil.GetIdentifierForToken(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
if self._HasSuppression(state_tracker, 'extraRequire'):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace, token)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
if self._HasSuppression(state_tracker, 'extraProvide'):
self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.scope':
self._scopified_file = True
elif token.string == 'goog.setTestOnly':
# Since the message is optional, we don't want to scan to later lines.
for t in tokenutil.GetAllTokensInSameLine(token):
if t.type == TokenType.STRING_TEXT:
message = t.string
if re.match(r'^\w+(\.\w+)+$', message):
# This looks like a namespace. If it's a Closurized namespace,
# consider it created.
base_namespace = message.split('.', 1)[0]
if base_namespace in self._closurized_namespaces:
self._AddCreatedNamespace(state_tracker, message,
token.line_number)
break
else:
jsdoc = state_tracker.GetDocComment()
if token.metadata and token.metadata.aliased_symbol:
whole_identifier_string = token.metadata.aliased_symbol
elif (token.string == 'goog.module.get' and
not self._HasSuppression(state_tracker, 'extraRequire')):
# Cannot use _AddUsedNamespace as this is not an identifier, but
# already the entire namespace that's required.
namespace = tokenutil.GetStringAfterToken(token)
namespace = UsedNamespace(namespace, namespace, token,
alias_definition=False)
self._used_namespaces.append(namespace)
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
token.line_number,
namespace=self.GetClosurizedNamespace(
whole_identifier_string))
else:
is_alias_definition = (token.metadata and
token.metadata.is_alias_definition)
self._AddUsedNamespace(state_tracker, whole_identifier_string,
token, is_alias_definition)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
start_token = tokenutil.GetIdentifierStart(token)
if start_token and start_token != token:
# Multi-line identifier being assigned. Get the whole identifier.
identifier = tokenutil.GetIdentifierForToken(start_token)
else:
start_token = token
# If an alias is defined on the start_token, use it instead.
if (start_token and
start_token.metadata and
start_token.metadata.aliased_symbol and
not start_token.metadata.is_alias_definition):
identifier = start_token.metadata.aliased_symbol
if identifier:
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier, token)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier,
token.line_number, namespace=namespace)
elif token.type == TokenType.DOC_FLAG:
flag = token.attached_object
flag_type = flag.flag_type
if flag and flag.HasType() and flag.jstype:
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends'
and is_interface):
identifier = flag.jstype.alias or flag.jstype.identifier
self._AddUsedNamespace(state_tracker, identifier, token)
# Since we process doctypes only for implements and extends, the
# type is a simple one and we don't need any iteration for subtypes.
def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
line_number: Line number where namespace is created.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
if self._HasSuppression(state_tracker, 'missingProvide'):
return
self._created_namespaces.append([namespace, identifier, line_number])
def _AddUsedNamespace(self, state_tracker, identifier, token,
is_alias_definition=False):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
token: The token in which the namespace is used.
is_alias_definition: If the used namespace is part of an alias_definition.
Aliased symbols need their parent namespace to be available, if it is
not yet required through another symbol, an error will be thrown.
"""
if self._HasSuppression(state_tracker, 'missingRequire'):
return
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
namespace = UsedNamespace(namespace, identifier, token,
is_alias_definition)
self._used_namespaces.append(namespace)
def _HasSuppression(self, state_tracker, suppression):
jsdoc = state_tracker.GetDocComment()
return jsdoc and suppression in jsdoc.suppressions
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None

873
tools/closure_linter/build/lib/closure_linter/closurizednamespacesinfo_test.py

@ -1,873 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
def _ToLineDict(illegal_alias_stmts):
"""Replaces tokens with the respective line number."""
return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['multi.part'])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
"""Tests unused require with multi-part closurized namespaces."""
input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['multi.part'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['goog'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(
input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals(1, len(missing_provides))
missing_provide = missing_provides.popitem()
self.assertEquals('package.Foo', missing_provide[0])
self.assertEquals(1, missing_provide[1])
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredNamespace(self):
"""Tests that required namespaces satisfy the namespace."""
input_lines = [
'goog.require(\'package.soy.fooTemplate\');',
'render(package.soy.fooTemplate);'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_req = missing_requires.popitem()
self.assertEquals('package.Foo', missing_req[0])
self.assertEquals(1, missing_req[1])
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();',
'package.Foo.anotherMethodName1();',
'package.Foo.anotherMethodName2();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_require = missing_requires.popitem()
self.assertEquals('package.Foo', missing_require[0])
# Make sure line number of first occurrence is reported
self.assertEquals(2, missing_require[1])
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_implements(self):
"""Tests that a parametrized type requires the correct identifier."""
input_lines = [
'/** @constructor @implements {package.Bar<T>} */',
'package.Foo = function();',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertItemsEqual({'package.Bar': 1}, missing_requires)
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires),
'The whole class, not the object, should be required.')
def testGetMissingRequires_variableWithSameName(self):
"""Tests that we should not goog.require variables and parameters.
b/5362203 Variables in scope are not missing namespaces.
"""
input_lines = [
'goog.provide(\'Foo\');',
'Foo.A = function();',
'Foo.A.prototype.method = function(ab) {',
' if (ab) {',
' var docs;',
' var lvalue = new Obj();',
' // Variable in scope hence not goog.require here.',
' docs.foo.abc = 1;',
' lvalue.next();',
' }',
' // Since js is function scope this should also not goog.require.',
' docs.foo.func();',
' // Its not a variable in scope hence goog.require.',
' dummy.xyz.reset();',
' return this.method2();',
'};',
'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
' // Parameter hence not goog.require.',
' docs.nodes.length = 2;',
' lvalue.abc.reset();',
'};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
'docs',
'lvalue',
'dummy'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(2, len(missing_requires))
self.assertItemsEqual(
{'dummy.xyz': 14,
'lvalue.abc': 20}, missing_requires)
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = testutil.TokenizeSource(input_lines)
self.assertEquals('package.Foo.veryLong.identifier',
tokenutil.GetIdentifierForToken(token))
self.assertEquals(None,
tokenutil.GetIdentifierForToken(token.next))
def testScopified(self):
"""Tests that a goog.scope call is noticed."""
input_lines = [
'goog.scope(function() {',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertTrue(namespaces_info._scopified_file)
def testScope_unusedAlias(self):
"""Tests that an unused alias symbol is illegal."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_usedMultilevelAlias(self):
"""Tests that an used alias symbol in a deep namespace is ok."""
input_lines = [
'goog.require(\'goog.Events\');',
'goog.scope(function() {',
'var Event = goog.Events.DeepNamespace.Event;',
'Event();',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_usedAlias(self):
"""Tests that aliased symbols result in correct requires."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'var dom = goog.dom;',
'Event(dom.classes.get);',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, illegal_alias_stmts)
self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
missing_requires)
def testModule_alias(self):
"""Tests that goog.module style aliases are supported."""
input_lines = [
'goog.module(\'test.module\');',
'var Unused = goog.require(\'goog.Unused\');',
'var AliasedClass = goog.require(\'goog.AliasedClass\');',
'var x = new AliasedClass();',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
namespaceToken = self._GetRequireTokens('goog.AliasedClass')
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
'AliasedClass should be marked as used')
unusedToken = self._GetRequireTokens('goog.Unused')
self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
'Unused should be marked as not used')
def testModule_aliasInScope(self):
"""Tests that goog.module style aliases are supported."""
input_lines = [
'goog.module(\'test.module\');',
'var AliasedClass = goog.require(\'goog.AliasedClass\');',
'goog.scope(function() {',
'var x = new AliasedClass();',
'});',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
namespaceToken = self._GetRequireTokens('goog.AliasedClass')
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
'AliasedClass should be marked as used')
def testModule_getAlwaysProvided(self):
"""Tests that goog.module.get is recognized as a built-in."""
input_lines = [
'goog.provide(\'test.MyClass\');',
'goog.require(\'goog.someModule\');',
'goog.scope(function() {',
'var someModule = goog.module.get(\'goog.someModule\');',
'test.MyClass = function() {};',
'});',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
def testModule_requireForGet(self):
"""Tests that goog.module.get needs a goog.require call."""
input_lines = [
'goog.provide(\'test.MyClass\');',
'function foo() {',
' var someModule = goog.module.get(\'goog.someModule\');',
' someModule.doSth();',
'}',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertEquals({'goog.someModule': 3},
namespaces_info.GetMissingRequires()[0])
def testScope_usedTypeAlias(self):
"""Tests aliased symbols in type annotations."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'/** @type {Event} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_partialAlias_typeOnly(self):
"""Tests a partial alias only used in type annotations.
In this example, some goog.events namespace would need to be required
so that evaluating goog.events.bar doesn't throw an error.
"""
input_lines = [
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Foo} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_partialAlias(self):
"""Tests a partial alias in conjunction with a type annotation.
In this example, the partial alias is already defined by another type,
therefore the doc-only type doesn't need to be required.
"""
input_lines = [
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_partialAliasRequires(self):
"""Tests partial aliases with correct requires."""
input_lines = [
'goog.require(\'goog.events.bar.EventType\');',
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_partialAliasRequiresBoth(self):
"""Tests partial aliases with correct requires."""
input_lines = [
'goog.require(\'goog.events.bar.Event\');',
'goog.require(\'goog.events.bar.EventType\');',
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
event_token = self._GetRequireTokens('goog.events.bar.Event')
self.assertTrue(namespaces_info.IsExtraRequire(event_token))
def testScope_partialAliasNoSubtypeRequires(self):
"""Tests that partial aliases don't yield subtype requires (regression)."""
input_lines = [
'goog.provide(\'goog.events.Foo\');',
'goog.scope(function() {',
'goog.events.Foo = {};',
'var Foo = goog.events.Foo;'
'Foo.CssName_ = {};'
'var CssName_ = Foo.CssName_;'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
def testScope_aliasNamespace(self):
"""Tests that an unused alias namespace is not required when available.
In the example goog.events.Bar is not required, because the namespace
goog.events is already defined because goog.events.Foo is required.
"""
input_lines = [
'goog.require(\'goog.events.Foo\');',
'goog.scope(function() {',
'var Bar = goog.events.Bar;',
'/** @type {Bar} */;',
'goog.events.Foo;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_aliasNamespaceIllegal(self):
"""Tests that an unused alias namespace is not required when available."""
input_lines = [
'goog.scope(function() {',
'var Bar = goog.events.Bar;',
'/** @type {Bar} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_provides(self):
"""Tests that aliased symbols result in correct provides."""
input_lines = [
'goog.scope(function() {',
'goog.bar = {};',
'var bar = goog.bar;',
'bar.Foo = {};',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
_, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, illegal_alias_stmts)
def testSetTestOnlyNamespaces(self):
"""Tests that a namespace in setTestOnly makes it a valid provide."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'goog.foo.barTest\');'
], ['goog'])
token = self._GetProvideTokens('goog.foo.barTest')
self.assertFalse(namespaces_info.IsExtraProvide(token))
token = self._GetProvideTokens('goog.foo.bazTest')
self.assertTrue(namespaces_info.IsExtraProvide(token))
def testSetTestOnlyComment(self):
"""Ensure a comment in setTestOnly does not cause a created namespace."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'this is a comment\');'
], ['goog'])
self.assertEquals(
[], namespaces_info._created_namespaces,
'A comment in setTestOnly should not modify created namespaces.')
def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
_, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
script, closurized_namespaces)
return namespaces_info
def _GetStartTokenAndNamespacesInfoForScript(
self, script, closurized_namespaces):
token = testutil.TokenizeSource(script)
return token, self._GetInitializedNamespacesInfo(
token, closurized_namespaces, [])
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
ecma_pass.Process(token)
state_tracker.DocFlagPass(token, error_handler=None)
alias_pass = aliaspass.AliasPass(closurized_namespaces)
alias_pass.Process(token)
while token:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
namespaces_info.ProcessToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
if __name__ == '__main__':
googletest.main()

16
tools/closure_linter/build/lib/closure_linter/common/__init__.py

@ -1,16 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint.common."""

65
tools/closure_linter/build/lib/closure_linter/common/error.py

@ -1,65 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error object commonly used in linters."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Error(object):
"""Object representing a style error."""
def __init__(self, code, message, token=None, position=None, fix_data=None):
"""Initialize the error object.
Args:
code: The numeric error code.
message: The error message string.
token: The tokens.Token where the error occurred.
position: The position of the error within the token.
fix_data: Data to be used in autofixing. Codes with fix_data are:
GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
class names in goog.requires calls.
"""
self.code = code
self.message = message
self.token = token
self.position = position
if token:
self.start_index = token.start_index
else:
self.start_index = 0
self.fix_data = fix_data
if self.position:
self.start_index += self.position.start
def Compare(a, b):
"""Compare two error objects, by source code order.
Args:
a: First error object.
b: Second error object.
Returns:
A Negative/0/Positive number when a is before/the same as/after b.
"""
line_diff = a.token.line_number - b.token.line_number
if line_diff:
return line_diff
return a.start_index - b.start_index
Compare = staticmethod(Compare)

46
tools/closure_linter/build/lib/closure_linter/common/erroraccumulator.py

@ -1,46 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that accumulates an array of errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import errorhandler
class ErrorAccumulator(errorhandler.ErrorHandler):
"""Error handler object that accumulates errors in a list."""
def __init__(self):
self._errors = []
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""
return self._errors

61
tools/closure_linter/build/lib/closure_linter/common/errorhandler.py

@ -1,61 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for a linter error handler.
Error handlers aggregate a set of errors from multiple files and can optionally
perform some action based on the reported errors, for example, logging the error
or automatically fixing it.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class ErrorHandler(object):
"""Error handler interface."""
def __init__(self):
if self.__class__ == ErrorHandler:
raise NotImplementedError('class ErrorHandler is abstract')
def HandleFile(self, filename, first_token):
"""Notifies this ErrorHandler that subsequent errors are in filename.
Args:
filename: The file being linted.
first_token: The first token of the file.
"""
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
def FinishFile(self):
"""Finishes handling the current file.
Should be called after all errors in a file have been handled.
"""
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""

52
tools/closure_linter/build/lib/closure_linter/common/erroroutput.py

@ -1,52 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to format errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)')
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def GetErrorOutput(error, new_error=False):
"""Get a output line for an error in regular format."""
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
error_message = error.message
if new_error:
error_message = 'New Error ' + error_message
return '%s%s: %s' % (line, code, error.message)

115
tools/closure_linter/build/lib/closure_linter/common/filetestcase.py

@ -1,115 +0,0 @@
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test case that runs a checker on a file, matching errors against annotations.
Runs the given checker on the given file, accumulating all errors. The list
of errors is then matched against those annotated in the file. Based heavily
on devtools/javascript/gpylint/full_test.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import re
import gflags as flags
import unittest as googletest
from closure_linter.common import erroraccumulator
class AnnotatedFileTestCase(googletest.TestCase):
"""Test case to run a linter against a single file."""
# Matches an all caps letters + underscores error identifer
_MESSAGE = {'msg': '[A-Z][A-Z_]+'}
# Matches a //, followed by an optional line number with a +/-, followed by a
# list of message IDs. Used to extract expected messages from testdata files.
# TODO(robbyw): Generalize to use different commenting patterns.
_EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
def __init__(self, filename, lint_callable, converter):
"""Create a single file lint test case.
Args:
filename: Filename to test.
lint_callable: Callable that lints a file. This is usually runner.Run().
converter: Function taking an error string and returning an error code.
"""
googletest.TestCase.__init__(self, 'runTest')
self._filename = filename
self._messages = []
self._lint_callable = lint_callable
self._converter = converter
def setUp(self):
flags.FLAGS.dot_on_next_line = True
def tearDown(self):
flags.FLAGS.dot_on_next_line = False
def shortDescription(self):
"""Provides a description for the test."""
return 'Run linter on %s' % self._filename
def runTest(self):
"""Runs the test."""
try:
filename = self._filename
stream = open(filename)
except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex))
expected = self._GetExpectedMessages(stream)
got = self._ProcessFileAndGetMessages(filename)
self.assertEqual(expected, got)
def _GetExpectedMessages(self, stream):
"""Parse a file and get a sorted list of expected messages."""
messages = []
for i, line in enumerate(stream):
match = self._EXPECTED_RE.search(line)
if match:
line = match.group('line')
msg_ids = match.group('msgs')
if line is None:
line = i + 1
elif line.startswith('+') or line.startswith('-'):
line = i + 1 + int(line)
else:
line = int(line)
for msg_id in msg_ids.split(','):
# Ignore a spurious message from the license preamble.
if msg_id != 'WITHOUT':
messages.append((line, self._converter(msg_id.strip())))
stream.seek(0)
messages.sort()
return messages
def _ProcessFileAndGetMessages(self, filename):
"""Trap gjslint's output parse it to get messages added."""
error_accumulator = erroraccumulator.ErrorAccumulator()
self._lint_callable(filename, error_accumulator)
errors = error_accumulator.GetErrors()
# Convert to expected tuple format.
error_msgs = [(error.token.line_number, error.code) for error in errors]
error_msgs.sort()
return error_msgs

170
tools/closure_linter/build/lib/closure_linter/common/htmlutil.py

@ -1,170 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with HTML."""
__author__ = ('robbyw@google.com (Robert Walker)')
import cStringIO
import formatter
import htmllib
import HTMLParser
import re
class ScriptExtractor(htmllib.HTMLParser):
"""Subclass of HTMLParser that extracts script contents from an HTML file.
Also inserts appropriate blank lines so that line numbers in the extracted
code match the line numbers in the original HTML.
"""
def __init__(self):
"""Initialize a ScriptExtractor."""
htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
self._in_script = False
self._text = ''
def start_script(self, attrs):
"""Internal handler for the start of a script tag.
Args:
attrs: The attributes of the script tag, as a list of tuples.
"""
for attribute in attrs:
if attribute[0].lower() == 'src':
# Skip script tags with a src specified.
return
self._in_script = True
def end_script(self):
"""Internal handler for the end of a script tag."""
self._in_script = False
def handle_data(self, data):
"""Internal handler for character data.
Args:
data: The character data from the HTML file.
"""
if self._in_script:
# If the last line contains whitespace only, i.e. is just there to
# properly align a </script> tag, strip the whitespace.
if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
data = data.rstrip(' \t')
self._text += data
else:
self._AppendNewlines(data)
def handle_comment(self, data):
"""Internal handler for HTML comments.
Args:
data: The text of the comment.
"""
self._AppendNewlines(data)
def _AppendNewlines(self, data):
"""Count the number of newlines in the given string and append them.
This ensures line numbers are correct for reported errors.
Args:
data: The data to count newlines in.
"""
# We append 'x' to both sides of the string to ensure that splitlines
# gives us an accurate count.
for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
self._text += '\n'
def GetScriptLines(self):
"""Return the extracted script lines.
Returns:
The extracted script lines as a list of strings.
"""
return self._text.splitlines()
def GetScriptLines(f):
"""Extract script tag contents from the given HTML file.
Args:
f: The HTML file.
Returns:
Lines in the HTML file that are from script tags.
"""
extractor = ScriptExtractor()
# The HTML parser chokes on text like Array.<!string>, so we patch
# that bug by replacing the < with &lt; - escaping all text inside script
# tags would be better but it's a bit of a catch 22.
contents = f.read()
contents = re.sub(r'<([^\s\w/])',
lambda x: '&lt;%s' % x.group(1),
contents)
extractor.feed(contents)
extractor.close()
return extractor.GetScriptLines()
def StripTags(str):
"""Returns the string with HTML tags stripped.
Args:
str: An html string.
Returns:
The html string with all tags stripped. If there was a parse error, returns
the text successfully parsed so far.
"""
# Brute force approach to stripping as much HTML as possible. If there is a
# parsing error, don't strip text before parse error position, and continue
# trying from there.
final_text = ''
finished = False
while not finished:
try:
strip = _HtmlStripper()
strip.feed(str)
strip.close()
str = strip.get_output()
final_text += str
finished = True
except HTMLParser.HTMLParseError, e:
final_text += str[:e.offset]
str = str[e.offset + 1:]
return final_text
class _HtmlStripper(HTMLParser.HTMLParser):
"""Simple class to strip tags from HTML.
Does so by doing nothing when encountering tags, and appending character data
to a buffer when that is encountered.
"""
def __init__(self):
self.reset()
self.__output = cStringIO.StringIO()
def handle_data(self, d):
self.__output.write(d)
def get_output(self):
return self.__output.getvalue()

39
tools/closure_linter/build/lib/closure_linter/common/lintrunner.py

@ -1,39 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for a lint running wrapper."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class LintRunner(object):
"""Interface for a lint running wrapper."""
def __init__(self):
if self.__class__ == LintRunner:
raise NotImplementedError('class LintRunner is abstract')
def Run(self, filenames, error_handler):
"""Run a linter on the given filenames.
Args:
filenames: The filenames to check
error_handler: An ErrorHandler object
Returns:
The error handler, which may have been used to collect error info.
"""

60
tools/closure_linter/build/lib/closure_linter/common/matcher.py

@ -1,60 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript matcher classes."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import position
from closure_linter.common import tokens
# Shorthand
Token = tokens.Token
Position = position.Position
class Matcher(object):
"""A token matcher.
Specifies a pattern to match, the type of token it represents, what mode the
token changes to, and what mode the token applies to.
Modes allow more advanced grammars to be incorporated, and are also necessary
to tokenize line by line. We can have different patterns apply to different
modes - i.e. looking for documentation while in comment mode.
Attributes:
regex: The regular expression representing this matcher.
type: The type of token indicated by a successful match.
result_mode: The mode to move to after a successful match.
"""
def __init__(self, regex, token_type, result_mode=None, line_start=False):
"""Create a new matcher template.
Args:
regex: The regular expression to match.
token_type: The type of token a successful match indicates.
result_mode: What mode to change to after a successful match. Defaults to
None, which means to not change the current mode.
line_start: Whether this matcher should only match string at the start
of a line.
"""
self.regex = regex
self.type = token_type
self.result_mode = result_mode
self.line_start = line_start

126
tools/closure_linter/build/lib/closure_linter/common/position.py

@ -1,126 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent positions within strings."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Position(object):
"""Object representing a segment of a string.
Attributes:
start: The index in to the string where the segment starts.
length: The length of the string segment.
"""
def __init__(self, start, length):
"""Initialize the position object.
Args:
start: The start index.
length: The number of characters to include.
"""
self.start = start
self.length = length
def Get(self, string):
"""Returns this range of the given string.
Args:
string: The string to slice.
Returns:
The string within the range specified by this object.
"""
return string[self.start:self.start + self.length]
def Set(self, target, source):
"""Sets this range within the target string to the source string.
Args:
target: The target string.
source: The source string.
Returns:
The resulting string
"""
return target[:self.start] + source + target[self.start + self.length:]
def AtEnd(string):
"""Create a Position representing the end of the given string.
Args:
string: The string to represent the end of.
Returns:
The created Position object.
"""
return Position(len(string), 0)
AtEnd = staticmethod(AtEnd)
def IsAtEnd(self, string):
"""Returns whether this position is at the end of the given string.
Args:
string: The string to test for the end of.
Returns:
Whether this position is at the end of the given string.
"""
return self.start == len(string) and self.length == 0
def AtBeginning():
"""Create a Position representing the beginning of any string.
Returns:
The created Position object.
"""
return Position(0, 0)
AtBeginning = staticmethod(AtBeginning)
def IsAtBeginning(self):
"""Returns whether this position is at the beginning of any string.
Returns:
Whether this position is at the beginning of any string.
"""
return self.start == 0 and self.length == 0
def All(string):
"""Create a Position representing the entire string.
Args:
string: The string to represent the entirety of.
Returns:
The created Position object.
"""
return Position(0, len(string))
All = staticmethod(All)
def Index(index):
"""Returns a Position object for the specified index.
Args:
index: The index to select, inclusively.
Returns:
The created Position object.
"""
return Position(index, 1)
Index = staticmethod(Index)

190
tools/closure_linter/build/lib/closure_linter/common/simplefileflags.py

@ -1,190 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Determines the list of files to be checked from command line arguments."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import glob
import os
import re
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_multistring(
'recurse',
None,
'Recurse in to the subdirectories of the given path',
short_name='r')
flags.DEFINE_list(
'exclude_directories',
('_demos'),
'Exclude the specified directories (only applicable along with -r or '
'--presubmit)',
short_name='e')
flags.DEFINE_list(
'exclude_files',
('deps.js'),
'Exclude the specified files',
short_name='x')
def MatchesSuffixes(filename, suffixes):
"""Returns whether the given filename matches one of the given suffixes.
Args:
filename: Filename to check.
suffixes: Sequence of suffixes to check.
Returns:
Whether the given filename matches one of the given suffixes.
"""
suffix = filename[filename.rfind('.'):]
return suffix in suffixes
def _GetUserSpecifiedFiles(argv, suffixes):
"""Returns files to be linted, specified directly on the command line.
Can handle the '*' wildcard in filenames, but no other wildcards.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type being checked.
Returns:
A sequence of files to be linted.
"""
files = argv[1:] or []
all_files = []
lint_files = []
# Perform any necessary globs.
for f in files:
if f.find('*') != -1:
for result in glob.glob(f):
all_files.append(result)
else:
all_files.append(f)
for f in all_files:
if MatchesSuffixes(f, suffixes):
lint_files.append(f)
return lint_files
def _GetRecursiveFiles(suffixes):
"""Returns files to be checked specified by the --recurse flag.
Args:
suffixes: Expected suffixes for the file type being checked.
Returns:
A list of files to be checked.
"""
lint_files = []
# Perform any request recursion
if FLAGS.recurse:
for start in FLAGS.recurse:
for root, subdirs, files in os.walk(start):
for f in files:
if MatchesSuffixes(f, suffixes):
lint_files.append(os.path.join(root, f))
return lint_files
def GetAllSpecifiedFiles(argv, suffixes):
"""Returns all files specified by the user on the commandline.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type
Returns:
A list of all files specified directly or indirectly (via flags) on the
command line by the user.
"""
files = _GetUserSpecifiedFiles(argv, suffixes)
if FLAGS.recurse:
files += _GetRecursiveFiles(suffixes)
return FilterFiles(files)
def FilterFiles(files):
"""Filters the list of files to be linted be removing any excluded files.
Filters out files excluded using --exclude_files and --exclude_directories.
Args:
files: Sequence of files that needs filtering.
Returns:
Filtered list of files to be linted.
"""
num_files = len(files)
ignore_dirs_regexs = []
for ignore in FLAGS.exclude_directories:
ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
result_files = []
for f in files:
add_file = True
for exclude in FLAGS.exclude_files:
if f.endswith('/' + exclude) or f == exclude:
add_file = False
break
for ignore in ignore_dirs_regexs:
if ignore.search(f):
# Break out of ignore loop so we don't add to
# filtered files.
add_file = False
break
if add_file:
# Convert everything to absolute paths so we can easily remove duplicates
# using a set.
result_files.append(os.path.abspath(f))
skipped = num_files - len(result_files)
if skipped:
print 'Skipping %d file(s).' % skipped
return set(result_files)
def GetFileList(argv, file_type, suffixes):
"""Parse the flags and return the list of files to check.
Args:
argv: Sequence of command line arguments.
suffixes: Sequence of acceptable suffixes for the file type.
Returns:
The list of files to check.
"""
return sorted(GetAllSpecifiedFiles(argv, suffixes))
def IsEmptyArgumentList(argv):
return not (len(argv[1:]) or FLAGS.recurse)

185
tools/closure_linter/build/lib/closure_linter/common/tokenizer.py

@ -1,185 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based lexer."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
# Shorthand
Type = tokens.TokenType
class Tokenizer(object):
"""General purpose tokenizer.
Attributes:
mode: The latest mode of the tokenizer. This allows patterns to distinguish
if they are mid-comment, mid-parameter list, etc.
matchers: Dictionary of modes to sequences of matchers that define the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
def __init__(self, starting_mode, matchers, default_types):
"""Initialize the tokenizer.
Args:
starting_mode: Mode to start in.
matchers: Dictionary of modes to sequences of matchers that defines the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
self.__starting_mode = starting_mode
self.matchers = matchers
self.default_types = default_types
def TokenizeFile(self, file):
"""Tokenizes the given file.
Args:
file: An iterable that yields one line of the file at a time.
Returns:
The first token in the file
"""
# The current mode.
self.mode = self.__starting_mode
# The first token in the stream.
self.__first_token = None
# The last token added to the token stream.
self.__last_token = None
# The current line number.
self.__line_number = 0
for line in file:
self.__line_number += 1
self.__TokenizeLine(line)
return self.__first_token
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new Token object (or subclass).
Args:
string: The string of input the token represents.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
Returns:
The newly created Token object.
"""
return tokens.Token(string, token_type, line, line_number, values,
line_number)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
Args:
line: The contents of the line.
"""
string = line.rstrip('\n\r\f')
line_number = self.__line_number
self.__start_index = 0
if not string:
self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
return
normal_token = ''
index = 0
while index < len(string):
for matcher in self.matchers[self.mode]:
if matcher.line_start and index > 0:
continue
match = matcher.regex.match(string, index)
if match:
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line,
line_number))
normal_token = ''
# Add the match.
self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
line_number, match.groupdict()))
# Change the mode to the correct one for after this match.
self.mode = matcher.result_mode or self.mode
# Shorten the string to be matched.
index = match.end()
break
else:
# If the for loop finishes naturally (i.e. no matches) we just add the
# first character to the string of consecutive non match characters.
# These will constitute a NORMAL token.
if string:
normal_token += string[index:index + 1]
index += 1
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line, line_number))
def __CreateNormalToken(self, mode, string, line, line_number):
"""Creates a normal token.
Args:
mode: The current mode.
string: The string to tokenize.
line: The line of text.
line_number: The line number within the file.
Returns:
A Token object, of the default type for the current mode.
"""
type = Type.NORMAL
if mode in self.default_types:
type = self.default_types[mode]
return self._CreateToken(string, type, line, line_number)
def __AddToken(self, token):
"""Add the given token to the token stream.
Args:
token: The token to add.
"""
# Store the first token, or point the previous token to this one.
if not self.__first_token:
self.__first_token = token
else:
self.__last_token.next = token
# Establish the doubly linked list
token.previous = self.__last_token
self.__last_token = token
# Compute the character indices
token.start_index = self.__start_index
self.__start_index += token.length

145
tools/closure_linter/build/lib/closure_linter/common/tokens.py

@ -1,145 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent tokens and positions within them."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class TokenType(object):
"""Token types common to all languages."""
NORMAL = 'normal'
WHITESPACE = 'whitespace'
BLANK_LINE = 'blank line'
class Token(object):
"""Token class for intelligent text splitting.
The token class represents a string of characters and an identifying type.
Attributes:
type: The type of token.
string: The characters the token comprises.
length: The length of the token.
line: The text of the line the token is found in.
line_number: The number of the line the token is found in.
values: Dictionary of values returned from the tokens regex match.
previous: The token before this one.
next: The token after this one.
start_index: The character index in the line where this token starts.
attached_object: Object containing more information about this token.
metadata: Object containing metadata about this token. Must be added by
a separate metadata pass.
"""
def __init__(self, string, token_type, line, line_number, values=None,
orig_line_number=None):
"""Creates a new Token object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
orig_line_number: The line number of the original file this token comes
from. This should be only set during the tokenization process. For newly
created error fix tokens after that, it should be None.
"""
self.type = token_type
self.string = string
self.length = len(string)
self.line = line
self.line_number = line_number
self.orig_line_number = orig_line_number
self.values = values
self.is_deleted = False
# These parts can only be computed when the file is fully tokenized
self.previous = None
self.next = None
self.start_index = None
# This part is set in statetracker.py
# TODO(robbyw): Wrap this in to metadata
self.attached_object = None
# This part is set in *metadatapass.py
self.metadata = None
def IsFirstInLine(self):
"""Tests if this token is the first token in its line.
Returns:
Whether the token is the first token in its line.
"""
return not self.previous or self.previous.line_number != self.line_number
def IsLastInLine(self):
"""Tests if this token is the last token in its line.
Returns:
Whether the token is the last token in its line.
"""
return not self.next or self.next.line_number != self.line_number
def IsType(self, token_type):
"""Tests if this token is of the given type.
Args:
token_type: The type to test for.
Returns:
True if the type of this token matches the type passed in.
"""
return self.type == token_type
def IsAnyType(self, *token_types):
"""Tests if this token is any of the given types.
Args:
token_types: The types to check. Also accepts a single array.
Returns:
True if the type of this token is any of the types passed in.
"""
if not isinstance(token_types[0], basestring):
return self.type in token_types[0]
else:
return self.type in token_types
def __repr__(self):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
def __iter__(self):
"""Returns a token iterator."""
node = self
while node:
yield node
node = node.next
def __reversed__(self):
"""Returns a reverse-direction token iterator."""
node = self
while node:
yield node
node = node.previous

113
tools/closure_linter/build/lib/closure_linter/common/tokens_test.py

@ -1,113 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()

844
tools/closure_linter/build/lib/closure_linter/ecmalintrules.py

@ -1,844 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
import gflags as flags
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(user): When flipping this to True, remove logic from unit tests
# that overrides this flag.
flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
'placed on the next line for wrapped expressions')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(
['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@fileoverview', '@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
# Dots are acceptable places to wrap (may be tokenized as identifiers).
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max_parts = 1
if '@param' in parts:
max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
> max_parts):
self._HandleError(
errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token, js_type):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
js_type: The flag's typeannotation.TypeAnnotation instance.
"""
if not js_type: return
if js_type.type_group and len(js_type.sub_types) == 2:
identifiers = [t.identifier for t in js_type.sub_types]
if 'null' in identifiers:
# Don't warn if the identifier is a template type (e.g. {TYPE|null}.
if not identifiers[0].isupper() and not identifiers[1].isupper():
self._HandleError(
errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
# TODO(user): We should report an error for wrong usage of '?' and '|'
# e.g. {?number|string|null} etc.
for sub_type in js_type.IterTypes():
self._CheckJsDocType(token, sub_type)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
position=Position.AtBeginning())
def _CheckOperator(self, token):
"""Checks an operator for spacing and line style.
Args:
token: The operator token.
"""
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
last_code.line_number == token.line_number):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
token.previous, position=Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
not tokenutil.IsDot(token) and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
position=Position.AtBeginning())
# Check wrapping of operators.
next_code = tokenutil.GetNextCodeToken(token)
is_dot = tokenutil.IsDot(token)
wrapped_before = last_code and last_code.line_number != token.line_number
wrapped_after = next_code and next_code.line_number != token.line_number
if FLAGS.dot_on_next_line and is_dot and wrapped_after:
self._HandleError(
errors.LINE_ENDS_WITH_DOT,
'"." must go on the following line',
token)
if (not is_dot and wrapped_before and
not token.metadata.IsUnaryOperator()):
self._HandleError(
errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator must go on previous line "%s"' % token.string,
token)
def _IsLabel(self, token):
# A ':' token is considered part of a label if it occurs in a case
# statement, a plain label, or an object literal, i.e. is not part of a
# ternary.
return (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT))
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
if tokenutil.IsDot(token):
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if self._IsLabel(token):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
token_type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, position=Position(0, space_count))
elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif token_type == Type.END_BLOCK:
last_code = token.metadata.last_code
if state.InFunction() and state.IsFunctionClose():
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(
errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, position=Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called or used by a dot operator.
if (state.InAssignedFunction() and token.next
and token.next.type != Type.SEMICOLON):
next_token = tokenutil.GetNextCodeToken(token)
is_immediately_used = (next_token.type == Type.START_PAREN or
tokenutil.IsDot(next_token))
if not is_immediately_used:
self._HandleError(
errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, position=Position.AtEnd(token.string))
if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
and last_code.metadata.context.type != Context.OBJECT_LITERAL):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, position=Position.All(token.next.string))
elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(
last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, position=Position.All(token.string))
elif token_type == Type.START_PAREN:
# Ensure that opening parentheses have a space before any keyword
# that is not being invoked like a member function.
if (token.previous and token.previous.type == Type.KEYWORD and
(not token.previous.metadata or
not token.previous.metadata.last_code or
not token.previous.metadata.last_code.string or
token.previous.metadata.last_code.string[-1:] != '.')):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
# Ensure that there is no extra space before a function invocation,
# even if the function being invoked happens to be a keyword.
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER or
(before_space.type == Type.KEYWORD and before_space.metadata and
before_space.metadata.last_code and
before_space.metadata.last_code.string and
before_space.metadata.last_code.string[-1:] == '.')):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, position=Position.All(token.previous.string))
elif token_type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous,
position=Position.All(token.previous.string))
elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
position=Position(1, len(token.string) - 1))
elif token_type == Type.OPERATOR:
self._CheckOperator(token)
elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(
errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.jstype.IterIdentifiers():
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(
errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type, token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after email address',
token.next,
position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.HasType():
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.jstype and not flag.jstype.IsEmpty():
self._CheckJsDocType(token, flag.jstype)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(
errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(
errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
token_type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(
errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
('underscore' not in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(
errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(
errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(
errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(
errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(
errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(
errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(
errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (
Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(
errors.EXTRA_SPACE, 'Extra space at end of line', token,
position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, position=Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
token.previous.type not in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed.
Args:
state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (
state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False

574
tools/closure_linter/build/lib/closure_linter/ecmametadatapass.py

@ -1,574 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata pass for annotating tokens in EcmaScript files."""
__author__ = ('robbyw@google.com (Robert Walker)')
from closure_linter import javascripttokens
from closure_linter import tokenutil
TokenType = javascripttokens.JavaScriptTokenType
class ParseError(Exception):
"""Exception indicating a parse error at the given token.
Attributes:
token: The token where the parse error occurred.
"""
def __init__(self, token, message=None):
"""Initialize a parse error at the given token with an optional message.
Args:
token: The token where the parse error occurred.
message: A message describing the parse error.
"""
Exception.__init__(self, message)
self.token = token
class EcmaContext(object):
"""Context object for EcmaScript languages.
Attributes:
type: The context type.
start_token: The token where this context starts.
end_token: The token where this context ends.
parent: The parent context.
"""
# The root context.
ROOT = 'root'
# A block of code.
BLOCK = 'block'
# A pseudo-block of code for a given case or default section.
CASE_BLOCK = 'case_block'
# Block of statements in a for loop's parentheses.
FOR_GROUP_BLOCK = 'for_block'
# An implied block of code for 1 line if, while, and for statements
IMPLIED_BLOCK = 'implied_block'
# An index in to an array or object.
INDEX = 'index'
# An array literal in [].
ARRAY_LITERAL = 'array_literal'
# An object literal in {}.
OBJECT_LITERAL = 'object_literal'
# An individual element in an array or object literal.
LITERAL_ELEMENT = 'literal_element'
# The portion of a ternary statement between ? and :
TERNARY_TRUE = 'ternary_true'
# The portion of a ternary statment after :
TERNARY_FALSE = 'ternary_false'
# The entire switch statment. This will contain a GROUP with the variable
# and a BLOCK with the code.
# Since that BLOCK is not a normal block, it can not contain statements except
# for case and default.
SWITCH = 'switch'
# A normal comment.
COMMENT = 'comment'
# A JsDoc comment.
DOC = 'doc'
# An individual statement.
STATEMENT = 'statement'
# Code within parentheses.
GROUP = 'group'
# Parameter names in a function declaration.
PARAMETERS = 'parameters'
# A set of variable declarations appearing after the 'var' keyword.
VAR = 'var'
# Context types that are blocks.
BLOCK_TYPES = frozenset([
ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
def __init__(self, context_type, start_token, parent=None):
"""Initializes the context object.
Args:
context_type: The context type.
start_token: The token where this context starts.
parent: The parent context.
Attributes:
type: The context type.
start_token: The token where this context starts.
end_token: The token where this context ends.
parent: The parent context.
children: The child contexts of this context, in order.
"""
self.type = context_type
self.start_token = start_token
self.end_token = None
self.parent = None
self.children = []
if parent:
parent.AddChild(self)
def __repr__(self):
"""Returns a string representation of the context object."""
stack = []
context = self
while context:
stack.append(context.type)
context = context.parent
return 'Context(%s)' % ' > '.join(stack)
def AddChild(self, child):
"""Adds a child to this context and sets child's parent to this context.
Args:
child: A child EcmaContext. The child's parent will be set to this
context.
"""
child.parent = self
self.children.append(child)
self.children.sort(EcmaContext._CompareContexts)
def GetRoot(self):
"""Get the root context that contains this context, if any."""
context = self
while context:
if context.type is EcmaContext.ROOT:
return context
context = context.parent
@staticmethod
def _CompareContexts(context1, context2):
"""Sorts contexts 1 and 2 by start token document position."""
return tokenutil.Compare(context1.start_token, context2.start_token)
class EcmaMetaData(object):
"""Token metadata for EcmaScript languages.
Attributes:
last_code: The last code token to appear before this one.
context: The context this token appears in.
operator_type: The operator type, will be one of the *_OPERATOR constants
defined below.
aliased_symbol: The full symbol being identified, as a string (e.g. an
'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
tokens. This is set in aliaspass.py and is a best guess.
is_alias_definition: True if the symbol is part of an alias definition.
If so, these symbols won't be counted towards goog.requires/provides.
"""
UNARY_OPERATOR = 'unary'
UNARY_POST_OPERATOR = 'unary_post'
BINARY_OPERATOR = 'binary'
TERNARY_OPERATOR = 'ternary'
def __init__(self):
"""Initializes a token metadata object."""
self.last_code = None
self.context = None
self.operator_type = None
self.is_implied_semicolon = False
self.is_implied_block = False
self.is_implied_block_close = False
self.aliased_symbol = None
self.is_alias_definition = False
def __repr__(self):
"""Returns a string representation of the context object."""
parts = ['%r' % self.context]
if self.operator_type:
parts.append('optype: %r' % self.operator_type)
if self.is_implied_semicolon:
parts.append('implied;')
if self.aliased_symbol:
parts.append('alias for: %s' % self.aliased_symbol)
return 'MetaData(%s)' % ', '.join(parts)
def IsUnaryOperator(self):
return self.operator_type in (EcmaMetaData.UNARY_OPERATOR,
EcmaMetaData.UNARY_POST_OPERATOR)
def IsUnaryPostOperator(self):
return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR
class EcmaMetaDataPass(object):
"""A pass that iterates over all tokens and builds metadata about them."""
def __init__(self):
"""Initialize the meta data pass object."""
self.Reset()
def Reset(self):
"""Resets the metadata pass to prepare for the next file."""
self._token = None
self._context = None
self._AddContext(EcmaContext.ROOT)
self._last_code = None
def _CreateContext(self, context_type):
"""Overridable by subclasses to create the appropriate context type."""
return EcmaContext(context_type, self._token, self._context)
def _CreateMetaData(self):
"""Overridable by subclasses to create the appropriate metadata type."""
return EcmaMetaData()
def _AddContext(self, context_type):
"""Adds a context of the given type to the context stack.
Args:
context_type: The type of context to create
"""
self._context = self._CreateContext(context_type)
def _PopContext(self):
"""Moves up one level in the context stack.
Returns:
The former context.
Raises:
ParseError: If the root context is popped.
"""
top_context = self._context
top_context.end_token = self._token
self._context = top_context.parent
if self._context:
return top_context
else:
raise ParseError(self._token)
def _PopContextType(self, *stop_types):
"""Pops the context stack until a context of the given type is popped.
Args:
*stop_types: The types of context to pop to - stops at the first match.
Returns:
The context object of the given type that was popped.
"""
last = None
while not last or last.type not in stop_types:
last = self._PopContext()
return last
def _EndStatement(self):
"""Process the end of a statement."""
self._PopContextType(EcmaContext.STATEMENT)
if self._context.type == EcmaContext.IMPLIED_BLOCK:
self._token.metadata.is_implied_block_close = True
self._PopContext()
def _ProcessContext(self):
"""Process the context at the current token.
Returns:
The context that should be assigned to the current token, or None if
the current context after this method should be used.
Raises:
ParseError: When the token appears in an invalid context.
"""
token = self._token
token_type = token.type
if self._context.type in EcmaContext.BLOCK_TYPES:
# Whenever we're in a block, we add a statement context. We make an
# exception for switch statements since they can only contain case: and
# default: and therefore don't directly contain statements.
# The block we add here may be immediately removed in some cases, but
# that causes no harm.
parent = self._context.parent
if not parent or parent.type != EcmaContext.SWITCH:
self._AddContext(EcmaContext.STATEMENT)
elif self._context.type == EcmaContext.ARRAY_LITERAL:
self._AddContext(EcmaContext.LITERAL_ELEMENT)
if token_type == TokenType.START_PAREN:
if self._last_code and self._last_code.IsKeyword('for'):
# for loops contain multiple statements in the group unlike while,
# switch, if, etc.
self._AddContext(EcmaContext.FOR_GROUP_BLOCK)
else:
self._AddContext(EcmaContext.GROUP)
elif token_type == TokenType.END_PAREN:
result = self._PopContextType(EcmaContext.GROUP,
EcmaContext.FOR_GROUP_BLOCK)
keyword_token = result.start_token.metadata.last_code
# keyword_token will not exist if the open paren is the first line of the
# file, for example if all code is wrapped in an immediately executed
# annonymous function.
if keyword_token and keyword_token.string in ('if', 'for', 'while'):
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
if next_code.type != TokenType.START_BLOCK:
# Check for do-while.
is_do_while = False
pre_keyword_token = keyword_token.metadata.last_code
if (pre_keyword_token and
pre_keyword_token.type == TokenType.END_BLOCK):
start_block_token = pre_keyword_token.metadata.context.start_token
is_do_while = start_block_token.metadata.last_code.string == 'do'
# If it's not do-while, it's an implied block.
if not is_do_while:
self._AddContext(EcmaContext.IMPLIED_BLOCK)
token.metadata.is_implied_block = True
return result
# else (not else if) with no open brace after it should be considered the
# start of an implied block, similar to the case with if, for, and while
# above.
elif (token_type == TokenType.KEYWORD and
token.string == 'else'):
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
if (next_code.type != TokenType.START_BLOCK and
(next_code.type != TokenType.KEYWORD or next_code.string != 'if')):
self._AddContext(EcmaContext.IMPLIED_BLOCK)
token.metadata.is_implied_block = True
elif token_type == TokenType.START_PARAMETERS:
self._AddContext(EcmaContext.PARAMETERS)
elif token_type == TokenType.END_PARAMETERS:
return self._PopContextType(EcmaContext.PARAMETERS)
elif token_type == TokenType.START_BRACKET:
if (self._last_code and
self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
self._AddContext(EcmaContext.INDEX)
else:
self._AddContext(EcmaContext.ARRAY_LITERAL)
elif token_type == TokenType.END_BRACKET:
return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL)
elif token_type == TokenType.START_BLOCK:
if (self._last_code.type in (TokenType.END_PAREN,
TokenType.END_PARAMETERS) or
self._last_code.IsKeyword('else') or
self._last_code.IsKeyword('do') or
self._last_code.IsKeyword('try') or
self._last_code.IsKeyword('finally') or
(self._last_code.IsOperator(':') and
self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)):
# else, do, try, and finally all might have no () before {.
# Also, handle the bizzare syntax case 10: {...}.
self._AddContext(EcmaContext.BLOCK)
else:
self._AddContext(EcmaContext.OBJECT_LITERAL)
elif token_type == TokenType.END_BLOCK:
context = self._PopContextType(EcmaContext.BLOCK,
EcmaContext.OBJECT_LITERAL)
if self._context.type == EcmaContext.SWITCH:
# The end of the block also means the end of the switch statement it
# applies to.
return self._PopContext()
return context
elif token.IsKeyword('switch'):
self._AddContext(EcmaContext.SWITCH)
elif (token_type == TokenType.KEYWORD and
token.string in ('case', 'default') and
self._context.type != EcmaContext.OBJECT_LITERAL):
# Pop up to but not including the switch block.
while self._context.parent.type != EcmaContext.SWITCH:
self._PopContext()
if self._context.parent is None:
raise ParseError(token, 'Encountered case/default statement '
'without switch statement')
elif token.IsOperator('?'):
self._AddContext(EcmaContext.TERNARY_TRUE)
elif token.IsOperator(':'):
if self._context.type == EcmaContext.OBJECT_LITERAL:
self._AddContext(EcmaContext.LITERAL_ELEMENT)
elif self._context.type == EcmaContext.TERNARY_TRUE:
self._PopContext()
self._AddContext(EcmaContext.TERNARY_FALSE)
# Handle nested ternary statements like:
# foo = bar ? baz ? 1 : 2 : 3
# When we encounter the second ":" the context is
# ternary_false > ternary_true > statement > root
elif (self._context.type == EcmaContext.TERNARY_FALSE and
self._context.parent.type == EcmaContext.TERNARY_TRUE):
self._PopContext() # Leave current ternary false context.
self._PopContext() # Leave current parent ternary true
self._AddContext(EcmaContext.TERNARY_FALSE)
elif self._context.parent.type == EcmaContext.SWITCH:
self._AddContext(EcmaContext.CASE_BLOCK)
elif token.IsKeyword('var'):
self._AddContext(EcmaContext.VAR)
elif token.IsOperator(','):
while self._context.type not in (EcmaContext.VAR,
EcmaContext.ARRAY_LITERAL,
EcmaContext.OBJECT_LITERAL,
EcmaContext.STATEMENT,
EcmaContext.PARAMETERS,
EcmaContext.GROUP):
self._PopContext()
elif token_type == TokenType.SEMICOLON:
self._EndStatement()
def Process(self, first_token):
"""Processes the token stream starting with the given token."""
self._token = first_token
while self._token:
self._ProcessToken()
if self._token.IsCode():
self._last_code = self._token
self._token = self._token.next
try:
self._PopContextType(self, EcmaContext.ROOT)
except ParseError:
# Ignore the "popped to root" error.
pass
def _ProcessToken(self):
"""Process the given token."""
token = self._token
token.metadata = self._CreateMetaData()
context = (self._ProcessContext() or self._context)
token.metadata.context = context
token.metadata.last_code = self._last_code
# Determine the operator type of the token, if applicable.
if token.type == TokenType.OPERATOR:
token.metadata.operator_type = self._GetOperatorType(token)
# Determine if there is an implied semicolon after the token.
if token.type != TokenType.SEMICOLON:
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
# A statement like if (x) does not need a semicolon after it
is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
is_last_code_in_line = token.IsCode() and (
not next_code or next_code.line_number != token.line_number)
is_continued_operator = (token.type == TokenType.OPERATOR and
not token.metadata.IsUnaryPostOperator())
is_continued_dot = token.string == '.'
next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
is_end_of_block = (
token.type == TokenType.END_BLOCK and
token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
is_multiline_string = token.type == TokenType.STRING_TEXT
is_continued_var_decl = (token.IsKeyword('var') and
next_code and
(next_code.type in [TokenType.IDENTIFIER,
TokenType.SIMPLE_LVALUE]) and
token.line_number < next_code.line_number)
next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
if (is_last_code_in_line and
self._StatementCouldEndInContext() and
not is_multiline_string and
not is_end_of_block and
not is_continued_var_decl and
not is_continued_operator and
not is_continued_dot and
not next_code_is_operator and
not is_implied_block and
not next_code_is_block):
token.metadata.is_implied_semicolon = True
self._EndStatement()
def _StatementCouldEndInContext(self):
"""Returns if the current statement (if any) may end in this context."""
# In the basic statement or variable declaration context, statement can
# always end in this context.
if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
return True
# End of a ternary false branch inside a statement can also be the
# end of the statement, for example:
# var x = foo ? foo.bar() : null
# In this case the statement ends after the null, when the context stack
# looks like ternary_false > var > statement > root.
if (self._context.type == EcmaContext.TERNARY_FALSE and
self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)):
return True
# In all other contexts like object and array literals, ternary true, etc.
# the statement can't yet end.
return False
def _GetOperatorType(self, token):
"""Returns the operator type of the given operator token.
Args:
token: The token to get arity for.
Returns:
The type of the operator. One of the *_OPERATOR constants defined in
EcmaMetaData.
"""
if token.string == '?':
return EcmaMetaData.TERNARY_OPERATOR
if token.string in TokenType.UNARY_OPERATORS:
return EcmaMetaData.UNARY_OPERATOR
last_code = token.metadata.last_code
if not last_code or last_code.type == TokenType.END_BLOCK:
return EcmaMetaData.UNARY_OPERATOR
if (token.string in TokenType.UNARY_POST_OPERATORS and
last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
return EcmaMetaData.UNARY_POST_OPERATOR
if (token.string in TokenType.UNARY_OK_OPERATORS and
last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and
last_code.string not in TokenType.UNARY_POST_OPERATORS):
return EcmaMetaData.UNARY_OPERATOR
return EcmaMetaData.BINARY_OPERATOR

95
tools/closure_linter/build/lib/closure_linter/error_check.py

@ -1,95 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specific JSLint errors checker."""
import gflags as flags
FLAGS = flags.FLAGS
class Rule(object):
"""Different rules to check."""
# Documentations for specific rules goes in flag definition.
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
INDENTATION = 'indentation'
WELL_FORMED_AUTHOR = 'well_formed_author'
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
BRACES_AROUND_TYPE = 'braces_around_type'
OPTIONAL_TYPE_MARKER = 'optional_type_marker'
VARIABLE_ARG_MARKER = 'variable_arg_marker'
UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
# Rule to raise all known errors.
ALL = 'all'
# All rules that are to be checked when using the strict flag. E.g. the rules
# that are specific to the stricter Closure style.
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
INDENTATION,
WELL_FORMED_AUTHOR,
NO_BRACES_AROUND_INHERIT_DOC,
BRACES_AROUND_TYPE,
OPTIONAL_TYPE_MARKER,
VARIABLE_ARG_MARKER])
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style. '
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
flags.DEFINE_multistring('jslint_error', [],
'List of specific lint errors to check. Here is a list'
' of accepted values:\n'
' - ' + Rule.ALL + ': enables all following errors.\n'
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
'number of blank lines between blocks at top level.\n'
' - ' + Rule.INDENTATION + ': checks correct '
'indentation of code.\n'
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
'@author JsDoc tags.\n'
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
'forbids braces around @inheritdoc JsDoc tags.\n'
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
'around types in JsDoc tags.\n'
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
'use of optional marker = in param types.\n'
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
'unused private variables.\n'
' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
'unused local variables.\n')
def ShouldCheck(rule):
"""Returns whether the optional rule should be checked.
Computes different flags (strict, jslint_error, jslint_noerror) to find out if
this specific rule should be checked.
Args:
rule: Name of the rule (see Rule).
Returns:
True if the rule should be checked according to the flags, otherwise False.
"""
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
return True
# Checks strict rules.
return FLAGS.strict and rule in Rule.CLOSURE_RULES

618
tools/closure_linter/build/lib/closure_linter/error_fixer.py

@ -1,618 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main class responsible for automatically fixing simple style violations."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = 'robbyw@google.com (Robert Walker)'
import re
import gflags as flags
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import errorhandler
# Shorthand
Token = javascripttokens.JavaScriptToken
Type = javascripttokens.JavaScriptTokenType
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
# Regex to represent common mistake inverting author name and email as
# @author User Name (user@company)
INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
r'(?P<name>[^(]+)'
r'(?P<whitespace_after_name>\s+)'
r'\('
r'(?P<email>[^\s]+@[^)\s]+)'
r'\)'
r'(?P<trailing_characters>.*)')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.')
flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
'fix. Defaults to all supported error codes when empty. '
'See errors.py for a list of error codes.')
class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors."""
def __init__(self, external_file=None):
"""Initialize the error fixer.
Args:
external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in.
"""
errorhandler.ErrorHandler.__init__(self)
self._file_name = None
self._file_token = None
self._external_file = external_file
try:
self._fix_error_codes = set([errors.ByName(error.upper()) for error in
FLAGS.fix_error_codes])
except KeyError as ke:
raise ValueError('Unknown error code ' + ke.args[0])
def HandleFile(self, filename, first_token):
"""Notifies this ErrorPrinter that subsequent errors are in filename.
Args:
filename: The name of the file about to be checked.
first_token: The first token in the file.
"""
self._file_name = filename
self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
self._file_token = first_token
self._file_fix_count = 0
self._file_changed_lines = set()
def _AddFix(self, tokens):
"""Adds the fix to the internal count.
Args:
tokens: The token or sequence of tokens changed to fix an error.
"""
self._file_fix_count += 1
if hasattr(tokens, 'line_number'):
self._file_changed_lines.add(tokens.line_number)
else:
for token in tokens:
self._file_changed_lines.add(token.line_number)
def _FixJsDocPipeNull(self, js_type):
"""Change number|null or null|number to ?number.
Args:
js_type: The typeannotation.TypeAnnotation instance to fix.
"""
# Recurse into all sub_types if the error was at a deeper level.
map(self._FixJsDocPipeNull, js_type.IterTypes())
if js_type.type_group and len(js_type.sub_types) == 2:
# Find and remove the null sub_type:
sub_type = None
for sub_type in js_type.sub_types:
if sub_type.identifier == 'null':
map(tokenutil.DeleteToken, sub_type.tokens)
self._AddFix(sub_type.tokens)
break
else:
return
first_token = js_type.FirstToken()
question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
first_token.line_number)
tokenutil.InsertTokenBefore(question_mark, first_token)
js_type.tokens.insert(0, question_mark)
js_type.tokens.remove(sub_type)
js_type.or_null = True
# Now also remove the separator, which is in the parent's token list,
# either before or after the sub_type, there is exactly one. Scan for it.
for token in js_type.tokens:
if (token and isinstance(token, Token) and
token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
tokenutil.DeleteToken(token)
self._AddFix(token)
break
def HandleError(self, error):
"""Attempts to fix the error.
Args:
error: The error object
"""
code = error.code
token = error.token
if self._fix_error_codes and code not in self._fix_error_codes:
return
if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
self._FixJsDocPipeNull(token.attached_object.jstype)
elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
iterator = token.attached_object.type_end_token
if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
iterator = iterator.previous
ending_space = len(iterator.string) - len(iterator.string.rstrip())
iterator.string = '%s=%s' % (iterator.string.rstrip(),
' ' * ending_space)
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
iterator = token.attached_object.type_start_token
if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
iterator = iterator.next
starting_space = len(iterator.string) - len(iterator.string.lstrip())
iterator.string = '%s...%s' % (' ' * starting_space,
iterator.string.lstrip())
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
errors.MISSING_SEMICOLON):
semicolon_token = Token(';', Type.SEMICOLON, token.line,
token.line_number)
tokenutil.InsertTokenAfter(semicolon_token, token)
token.metadata.is_implied_semicolon = False
semicolon_token.metadata.is_implied_semicolon = False
self._AddFix(token)
elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
errors.REDUNDANT_SEMICOLON,
errors.COMMA_AT_END_OF_LITERAL):
self._DeleteToken(token)
self._AddFix(token)
elif code == errors.INVALID_JSDOC_TAG:
if token.string == '@returns':
token.string = '@return'
self._AddFix(token)
elif code == errors.FILE_MISSING_NEWLINE:
# This error is fixed implicitly by the way we restore the file
self._AddFix(token)
elif code == errors.MISSING_SPACE:
if error.fix_data:
token.string = error.fix_data
self._AddFix(token)
elif error.position:
if error.position.IsAtBeginning():
tokenutil.InsertSpaceTokenAfter(token.previous)
elif error.position.IsAtEnd(token.string):
tokenutil.InsertSpaceTokenAfter(token)
else:
token.string = error.position.Set(token.string, ' ')
self._AddFix(token)
elif code == errors.EXTRA_SPACE:
if error.position:
token.string = error.position.Set(token.string, '')
self._AddFix(token)
elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning():
tokenutil.InsertBlankLineAfter(token.previous)
else:
tokenutil.InsertBlankLineAfter(token)
self._AddFix(token)
elif code == errors.EXTRA_LINE:
self._DeleteToken(token)
self._AddFix(token)
elif code == errors.WRONG_BLANK_LINE_COUNT:
if not token.previous:
# TODO(user): Add an insertBefore method to tokenutil.
return
num_lines = error.fix_data
should_delete = False
if num_lines < 0:
num_lines *= -1
should_delete = True
for unused_i in xrange(1, num_lines + 1):
if should_delete:
# TODO(user): DeleteToken should update line numbers.
self._DeleteToken(token.previous)
else:
tokenutil.InsertBlankLineAfter(token.previous)
self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote:
single_quote_start = Token(
"'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
single_quote_end = Token(
"'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote)
self._DeleteToken(token)
self._DeleteToken(end_quote)
self._AddFix([token, end_quote])
elif code == errors.MISSING_BRACES_AROUND_TYPE:
fixed_tokens = []
start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE:
leading_space = (
len(start_token.string) - len(start_token.string.lstrip()))
if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token
new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token
fixed_tokens.append(new_token)
end_token = token.attached_object.type_end_token
if end_token.type != Type.DOC_END_BRACE:
# If the start token was a brace, the end token will be a
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type.
last_type = end_token
if not fixed_tokens:
last_type = end_token.previous
while last_type.string.isspace():
last_type = last_type.previous
# If there was no starting brace then a lone end brace wouldn't have
# been type end token. Now that we've added any missing start brace,
# see if the last effective type token was an end brace.
if last_type.type != Type.DOC_END_BRACE:
trailing_space = (len(last_type.string) -
len(last_type.string.rstrip()))
if trailing_space:
tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space)
new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token
fixed_tokens.append(new_token)
self._AddFix(fixed_tokens)
elif code == errors.LINE_STARTS_WITH_OPERATOR:
# Remove whitespace following the operator so the line starts clean.
self._StripSpace(token, before=False)
# Remove the operator.
tokenutil.DeleteToken(token)
self._AddFix(token)
insertion_point = tokenutil.GetPreviousCodeToken(token)
# Insert a space between the previous token and the new operator.
space = Token(' ', Type.WHITESPACE, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenAfter(space, insertion_point)
# Insert the operator on the end of the previous line.
new_token = Token(token.string, token.type, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenAfter(new_token, space)
self._AddFix(new_token)
elif code == errors.LINE_ENDS_WITH_DOT:
# Remove whitespace preceding the operator to remove trailing whitespace.
self._StripSpace(token, before=True)
# Remove the dot.
tokenutil.DeleteToken(token)
self._AddFix(token)
insertion_point = tokenutil.GetNextCodeToken(token)
# Insert the dot at the beginning of the next line of code.
new_token = Token(token.string, token.type, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenBefore(new_token, insertion_point)
self._AddFix(new_token)
elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
require_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(require_start_token)
self._AddFix(require_start_token)
elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
provide_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixProvides(provide_start_token)
self._AddFix(provide_start_token)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}':
self._DeleteToken(token.previous)
self._DeleteToken(token.next)
self._AddFix([token])
elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
match = INVERTED_AUTHOR_SPEC.match(token.string)
if match:
token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
match.group('email'),
match.group('whitespace_after_name'),
match.group('name'),
match.group('trailing_characters'))
self._AddFix(token)
elif (code == errors.WRONG_INDENTATION and
not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start
expected = error.position.length
# Cases where first token is param but with leading spaces.
if (len(token.string.lstrip()) == len(token.string) - actual and
token.string.lstrip()):
token.string = token.string.lstrip()
actual = 0
if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token])
else:
# We need to add indentation.
new_token = Token(' ' * expected, Type.WHITESPACE,
token.line, token.line_number)
# Note that we'll never need to add indentation at the first line,
# since it will always not be indented. Therefore it's safe to assume
# token.previous exists.
tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token])
elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
errors.MISSING_END_OF_SCOPE_COMMENT]:
# Only fix cases where }); is found with no trailing content on the line
# other than a comment. Value of 'token' is set to } for this error.
if (token.type == Type.END_BLOCK and
token.next.type == Type.END_PAREN and
token.next.next.type == Type.SEMICOLON):
current_token = token.next.next.next
removed_tokens = []
while current_token and current_token.line_number == token.line_number:
if current_token.IsAnyType(Type.WHITESPACE,
Type.START_SINGLE_LINE_COMMENT,
Type.COMMENT):
removed_tokens.append(current_token)
current_token = current_token.next
else:
return
if removed_tokens:
self._DeleteTokens(removed_tokens[0], len(removed_tokens))
whitespace_token = Token(' ', Type.WHITESPACE, token.line,
token.line_number)
start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
token.line, token.line_number)
comment_token = Token(' goog.scope', Type.COMMENT, token.line,
token.line_number)
insertion_tokens = [whitespace_token, start_comment_token,
comment_token]
tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
self._AddFix(removed_tokens + insertion_tokens)
elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
num_delete_tokens = len(tokens_in_line)
# If line being deleted is preceded and succeed with blank lines then
# delete one blank line also.
if (tokens_in_line[0].previous and tokens_in_line[-1].next
and tokens_in_line[0].previous.type == Type.BLANK_LINE
and tokens_in_line[-1].next.type == Type.BLANK_LINE):
num_delete_tokens += 1
self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
self._AddFix(tokens_in_line)
elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
missing_namespaces = error.fix_data[0]
need_blank_line = error.fix_data[1] or (not token.previous)
insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
dummy_first_token = insert_location
tokenutil.InsertTokenBefore(insert_location, token)
# If inserting a blank line check blank line does not exist before
# token to avoid extra blank lines.
if (need_blank_line and insert_location.previous
and insert_location.previous.type != Type.BLANK_LINE):
tokenutil.InsertBlankLineAfter(insert_location)
insert_location = insert_location.next
for missing_namespace in missing_namespaces:
new_tokens = self._GetNewRequireOrProvideTokens(
code == errors.MISSING_GOOG_PROVIDE,
missing_namespace, insert_location.line_number + 1)
tokenutil.InsertLineAfter(insert_location, new_tokens)
insert_location = new_tokens[-1]
self._AddFix(new_tokens)
# If inserting a blank line check blank line does not exist after
# token to avoid extra blank lines.
if (need_blank_line and insert_location.next
and insert_location.next.type != Type.BLANK_LINE):
tokenutil.InsertBlankLineAfter(insert_location)
tokenutil.DeleteToken(dummy_first_token)
def _StripSpace(self, token, before):
"""Strip whitespace tokens either preceding or following the given token.
Args:
token: The token.
before: If true, strip space before the token, if false, after it.
"""
token = token.previous if before else token.next
while token and token.type == Type.WHITESPACE:
tokenutil.DeleteToken(token)
token = token.previous if before else token.next
def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
"""Returns a list of tokens to create a goog.require/provide statement.
Args:
is_provide: True if getting tokens for a provide, False for require.
namespace: The required or provided namespaces to get tokens for.
line_number: The line number the new require or provide statement will be
on.
Returns:
Tokens to create a new goog.require or goog.provide statement.
"""
string = 'goog.require'
if is_provide:
string = 'goog.provide'
line_text = string + '(\'' + namespace + '\');\n'
return [
Token(string, Type.IDENTIFIER, line_text, line_number),
Token('(', Type.START_PAREN, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
Token(namespace, Type.STRING_TEXT, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
Token(')', Type.END_PAREN, line_text, line_number),
Token(';', Type.SEMICOLON, line_text, line_number)
]
def _DeleteToken(self, token):
"""Deletes the specified token from the linked list of tokens.
Updates instance variables pointing to tokens such as _file_token if
they reference the deleted token.
Args:
token: The token to delete.
"""
if token == self._file_token:
self._file_token = token.next
tokenutil.DeleteToken(token)
def _DeleteTokens(self, token, token_count):
"""Deletes the given number of tokens starting with the given token.
Updates instance variables pointing to tokens such as _file_token if
they reference the deleted token.
Args:
token: The first token to delete.
token_count: The total number of tokens to delete.
"""
if token == self._file_token:
for unused_i in xrange(token_count):
self._file_token = self._file_token.next
tokenutil.DeleteTokens(token, token_count)
def FinishFile(self):
"""Called when the current file has finished style checking.
Used to go back and fix any errors in the file. It currently supports both
js and html files. For js files it does a simple dump of all tokens, but in
order to support html file, we need to merge the original file with the new
token set back together. This works because the tokenized html file is the
original html file with all non js lines kept but blanked out with one blank
line token per line of html.
"""
if self._file_fix_count:
# Get the original file content for html.
if self._file_is_html:
f = open(self._file_name, 'r')
original_lines = f.readlines()
f.close()
f = self._external_file
if not f:
error_noun = 'error' if self._file_fix_count == 1 else 'errors'
print 'Fixed %d %s in %s' % (
self._file_fix_count, error_noun, self._file_name)
f = open(self._file_name, 'w')
token = self._file_token
# Finding the first not deleted token.
while token.is_deleted:
token = token.next
# If something got inserted before first token (e.g. due to sorting)
# then move to start. Bug 8398202.
while token.previous:
token = token.previous
char_count = 0
line = ''
while token:
line += token.string
char_count += len(token.string)
if token.IsLastInLine():
# We distinguish if a blank line in html was from stripped original
# file or newly added error fix by looking at the "org_line_number"
# field on the token. It is only set in the tokenizer, so for all
# error fixes, the value should be None.
if (line or not self._file_is_html or
token.orig_line_number is None):
f.write(line)
f.write('\n')
else:
f.write(original_lines[token.orig_line_number - 1])
line = ''
if char_count > 80 and token.line_number in self._file_changed_lines:
print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
token.line_number, self._file_name)
char_count = 0
token = token.next
if not self._external_file:
# Close the file if we created it
f.close()

57
tools/closure_linter/build/lib/closure_linter/error_fixer_test.py

@ -1,57 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the error_fixer module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
import unittest as googletest
from closure_linter import error_fixer
from closure_linter import testutil
class ErrorFixerTest(googletest.TestCase):
"""Unit tests for error_fixer."""
def setUp(self):
self.error_fixer = error_fixer.ErrorFixer()
def testDeleteToken(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
second_token = start_token.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteToken(start_token)
self.assertEqual(second_token, self.error_fixer._file_token)
def testDeleteTokens(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
fourth_token = start_token.next.next.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteTokens(start_token, 3)
self.assertEqual(fourth_token, self.error_fixer._file_token)
_TEST_SCRIPT = """\
var x = 3;
"""
if __name__ == '__main__':
googletest.main()

66
tools/closure_linter/build/lib/closure_linter/errorrecord.py

@ -1,66 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, pickle-serializable class to represent a lint error."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import gflags as flags
from closure_linter import errors
from closure_linter.common import erroroutput
FLAGS = flags.FLAGS
class ErrorRecord(object):
"""Record-keeping struct that can be serialized back from a process.
Attributes:
path: Path to the file.
error_string: Error string for the user.
new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
"""
def __init__(self, path, error_string, new_error):
self.path = path
self.error_string = error_string
self.new_error = new_error
def MakeErrorRecord(path, error):
"""Make an error record with correctly formatted error string.
Errors are not able to be serialized (pickled) over processes because of
their pointers to the complex token/context graph. We use an intermediary
serializable class to pass back just the relevant information.
Args:
path: Path of file the error was found in.
error: An error.Error instance.
Returns:
_ErrorRecord instance.
"""
new_error = error.code in errors.NEW_ERRORS
if FLAGS.unix_mode:
error_string = erroroutput.GetUnixErrorOutput(
path, error, new_error=new_error)
else:
error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
return ErrorRecord(path, error_string, new_error)

72
tools/closure_linter/build/lib/closure_linter/errorrules.py

@ -1,72 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = 'robbyw@google.com (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
flags.DEFINE_list('disable', None,
'Disable specific error. Usage Ex.: gjslint --disable 1,'
'0011 foo.js.')
flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
'without warning.', lower_bound=1)
disabled_error_nums = None
def GetMaxLineLength():
"""Returns allowed maximum length of line.
Returns:
Length of line allowed without any warning.
"""
return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors and disabled
errors. For missing documentation, it returns the value of the
jsdoc flag.
"""
global disabled_error_nums
if disabled_error_nums is None:
disabled_error_nums = []
if FLAGS.disable:
for error_str in FLAGS.disable:
error_num = 0
try:
error_num = int(error_str)
except ValueError:
pass
disabled_error_nums.append(error_num)
return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)) and
(not FLAGS.disable or error not in disabled_error_nums))

117
tools/closure_linter/build/lib/closure_linter/errorrules_test.py

@ -1,117 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gjslint errorrules.
Currently its just verifying that warnings can't be disabled.
"""
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class ErrorRulesTest(googletest.TestCase):
"""Test case to for gjslint errorrules."""
def testNoMaxLineLengthFlagExists(self):
"""Tests that --max_line_length flag does not exists."""
self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
def testGetMaxLineLength(self):
"""Tests warning are reported for line greater than 80.
"""
# One line > 100 and one line > 80 and < 100. So should produce two
# line too long error.
original = [
'goog.require(\'dummy.aa\');',
'',
'function a() {',
' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18;',
'}',
''
]
# Expect line too long.
expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
self._AssertErrors(original, expected)
def testNoDisableFlagExists(self):
"""Tests that --disable flag does not exists."""
self.assertTrue('disable' not in flags.FLAGS.FlagDict())
def testWarningsNotDisabled(self):
"""Tests warnings are reported when nothing is disabled.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
errors.FILE_MISSING_NEWLINE]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors, include_header=True):
"""Asserts that the error fixer corrects original to expected."""
if include_header:
original = self._GetHeader() + original
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
'// Copyright 2011 Google Inc. All Rights Reserved.',
'',
'/**',
' * @fileoverview Fake file overview.',
' * @author fake@google.com (Fake Person)',
' */',
''
]
if __name__ == '__main__':
googletest.main()

154
tools/closure_linter/build/lib/closure_linter/errors.py

@ -1,154 +0,0 @@
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
LINE_ENDS_WITH_DOT = 122
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
JSDOC_DOES_NOT_PARSE = 236
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.15:
ALIAS_STMT_NEEDS_GOOG_REQUIRE,
JSDOC_DOES_NOT_PARSE,
LINE_ENDS_WITH_DOT,
# Errors added after 2.3.17:
])

66
tools/closure_linter/build/lib/closure_linter/fixjsstyle.py

@ -1,66 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatically fix simple style guide violations."""
__author__ = 'robbyw@google.com (Robert Walker)'
import StringIO
import sys
import gflags as flags
from closure_linter import error_fixer
from closure_linter import runner
from closure_linter.common import simplefileflags as fileflags
FLAGS = flags.FLAGS
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
def main(argv=None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
output_buffer = None
if FLAGS.dry_run:
output_buffer = StringIO.StringIO()
fixer = error_fixer.ErrorFixer(output_buffer)
# Check the list of files.
for filename in files:
runner.Run(filename, fixer)
if FLAGS.dry_run:
print output_buffer.getvalue()
if __name__ == '__main__':
main()

615
tools/closure_linter/build/lib/closure_linter/fixjsstyle_test.py

@ -1,615 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gpylint auto-fixer."""
__author__ = 'robbyw@google.com (Robby Walker)'
import StringIO
import gflags as flags
import unittest as googletest
from closure_linter import error_fixer
from closure_linter import runner
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def setUp(self):
flags.FLAGS.dot_on_next_line = True
def tearDown(self):
flags.FLAGS.dot_on_next_line = False
def testFixJsStyle(self):
test_cases = [
['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js'],
['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
for [running_input_file, running_output_file] in test_cases:
print 'Checking %s vs %s' % (running_input_file, running_output_file)
input_filename = None
golden_filename = None
current_filename = None
try:
input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
runner.Run(input_filename, error_fixer.ErrorFixer(actual))
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
# Uncomment to generate new golden files and run
# open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
# actual.seek(0)
self.assertEqual(actual.readlines(), expected.readlines())
def testAddProvideFirstLine(self):
"""Tests handling of case where goog.provide is added."""
original = [
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testAddRequireFirstLine(self):
"""Tests handling of case where goog.require is added."""
original = [
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteProvideAndAddProvideFirstLine(self):
"""Tests handling of case where goog.provide is deleted and added.
Bug 14832597.
"""
original = [
'goog.provide(\'dummy.aa\');',
'',
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.provide(\'dummy.aa\');',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteProvideAndAddRequireFirstLine(self):
"""Tests handling where goog.provide is deleted and goog.require added.
Bug 14832597.
"""
original = [
'goog.provide(\'dummy.aa\');',
'',
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.provide(\'dummy.aa\');',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteRequireAndAddRequireFirstLine(self):
"""Tests handling of case where goog.require is deleted and added.
Bug 14832597.
"""
original = [
'goog.require(\'dummy.aa\');',
'',
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.require(\'dummy.aa\');',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteRequireAndAddProvideFirstLine(self):
"""Tests handling where goog.require is deleted and goog.provide added.
Bug 14832597.
"""
original = [
'goog.require(\'dummy.aa\');',
'',
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.require(\'dummy.aa\');',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testMultipleProvideInsert(self):
original = [
'goog.provide(\'dummy.bb\');',
'goog.provide(\'dummy.dd\');',
'',
'dummy.aa.ff = 1;',
'dummy.bb.ff = 1;',
'dummy.cc.ff = 1;',
'dummy.dd.ff = 1;',
'dummy.ee.ff = 1;',
]
expected = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.bb\');',
'goog.provide(\'dummy.cc\');',
'goog.provide(\'dummy.dd\');',
'goog.provide(\'dummy.ee\');',
'',
'dummy.aa.ff = 1;',
'dummy.bb.ff = 1;',
'dummy.cc.ff = 1;',
'dummy.dd.ff = 1;',
'dummy.ee.ff = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testMultipleRequireInsert(self):
original = [
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.dd\');',
'',
'a = dummy.aa.ff;',
'b = dummy.bb.ff;',
'c = dummy.cc.ff;',
'd = dummy.dd.ff;',
'e = dummy.ee.ff;',
]
expected = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.cc\');',
'goog.require(\'dummy.dd\');',
'goog.require(\'dummy.ee\');',
'',
'a = dummy.aa.ff;',
'b = dummy.bb.ff;',
'c = dummy.cc.ff;',
'd = dummy.dd.ff;',
'e = dummy.ee.ff;',
]
self._AssertFixes(original, expected, include_header=False)
def testUnsortedRequires(self):
"""Tests handling of unsorted goog.require statements without header.
Bug 8398202.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
expected = [
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'goog.require(\'dummy.aa\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
self._AssertFixes(original, expected, include_header=False)
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'var x = new dummy.Bb();',
'dummy.Cc.someMethod();',
'dummy.aa.someMethod();',
]
expected = [
'goog.require(\'dummy.Bb\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.aa\');',
'',
'var x = new dummy.Bb();',
'dummy.Cc.someMethod();',
'dummy.aa.someMethod();',
]
self._AssertFixes(original, expected)
def testExtraRequireOnFirstLine(self):
"""Tests handling of extra goog.require statement on the first line.
There was a bug when fixjsstyle quits with an exception. It happened if
- the first line of the file is an extra goog.require() statement,
- goog.require() statements are not sorted.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.cc\');',
'goog.require(\'dummy.bb\');',
'',
'var x = new dummy.bb();',
'var y = new dummy.cc();',
]
expected = [
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.cc\');',
'',
'var x = new dummy.bb();',
'var y = new dummy.cc();',
]
self._AssertFixes(original, expected, include_header=False)
def testUnsortedProvides(self):
"""Tests handling of unsorted goog.provide statements without header.
Bug 8398202.
"""
original = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'',
'dummy.aa = function() {};'
'dummy.Cc = function() {};'
'dummy.Dd = function() {};'
]
expected = [
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'goog.provide(\'dummy.aa\');',
'',
'dummy.aa = function() {};'
'dummy.Cc = function() {};'
'dummy.Dd = function() {};'
]
self._AssertFixes(original, expected, include_header=False)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'',
'dummy.Cc = function() {};',
'dummy.Bb = function() {};',
'dummy.aa.someMethod = function();',
]
expected = [
'goog.provide(\'dummy.Bb\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.aa\');',
'',
'dummy.Cc = function() {};',
'dummy.Bb = function() {};',
'dummy.aa.someMethod = function();',
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
'goog.provide(\'dummy.Something\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
expected = [
'goog.provide(\'dummy.Something\');',
'',
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
expected = [
'goog.provide(\'dummy.Something\');',
'',
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
self._AssertFixes(original, expected)
def testOutputOkayWhenFirstTokenIsDeleted(self):
"""Tests that autofix output is is correct when first token is deleted.
Regression test for bug 4581567
"""
original = ['"use strict";']
expected = ["'use strict';"]
self._AssertFixes(original, expected, include_header=False)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testEndsWithIdentifier(self):
"""Tests Handling case where script ends with identifier. Bug 7643404."""
original = [
'goog.provide(\'xyz\');',
'',
'abc'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected)
def testFileStartsWithSemicolon(self):
"""Tests handling files starting with semicolon.
b/10062516
"""
original = [
';goog.provide(\'xyz\');',
'',
'abc;'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected, include_header=False)
def testCodeStartsWithSemicolon(self):
"""Tests handling code in starting with semicolon after comments.
b/10062516
"""
original = [
';goog.provide(\'xyz\');',
'',
'abc;'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected, include_header=True):
"""Asserts that the error fixer corrects original to expected."""
if include_header:
original = self._GetHeader() + original
expected = self._GetHeader() + expected
actual = StringIO.StringIO()
runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
actual.seek(0)
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
'// Copyright 2011 Google Inc. All Rights Reserved.',
'',
'/**',
' * @fileoverview Fake file overview.',
' * @author fake@google.com (Fake Person)',
' */',
''
]
if __name__ == '__main__':
googletest.main()

121
tools/closure_linter/build/lib/closure_linter/full_test.py

@ -1,121 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full regression-type (Medium) tests for gjslint.
Tests every error that can be thrown by gjslint. Based heavily on
devtools/javascript/gpylint/full_test.py
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import error_check
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'empty_file.js',
'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
'limited_doc_checks.js',
'minimal.js',
'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_alias.js',
'require_all_caps.js',
'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
'require_interface_alias.js',
'require_interface_base.js',
'require_lower_case.js',
'require_missing.js',
'require_numeric.js',
'require_provide_blank.js',
'require_provide_missing.js',
'require_provide_ok.js',
'semicolon_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
'unused_local_variables.js',
'unused_private_members.js',
'utf8.html',
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(
filetestcase.AnnotatedFileTestCase(
resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')

319
tools/closure_linter/build/lib/closure_linter/gjslint.py

@ -1,319 +0,0 @@
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checks JavaScript files for common style guide violations.
gjslint.py is designed to be used as a PRESUBMIT script to check for javascript
style guide violations. As of now, it checks for the following violations:
* Missing and extra spaces
* Lines longer than 80 characters
* Missing newline at end of file
* Missing semicolon after function declaration
* Valid JsDoc including parameter matching
Someday it will validate to the best of its ability against the entirety of the
JavaScript style guide.
This file is a front end that parses arguments and flags. The core of the code
is in tokenizer.py and checker.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)',)
import errno
import itertools
import os
import platform
import re
import sys
import time
import gflags as flags
from closure_linter import errorrecord
from closure_linter import runner
from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable=g-import-not-at-top
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False,
'Whether to emit warnings in standard unix format.')
flags.DEFINE_boolean('beep', True, 'Whether to beep when errors are found.')
flags.DEFINE_boolean('time', False, 'Whether to emit timing statistics.')
flags.DEFINE_boolean('quiet', False, 'Whether to minimize logged messages. '
'Most useful for per-file linting, such as that performed '
'by the presubmit linter service.')
flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess',
platform.system() is 'Linux' and bool(multiprocessing),
'Whether to attempt parallelized linting using the '
'multiprocessing module. Enabled by default on Linux '
'if the multiprocessing module is present (Python 2.6+). '
'Otherwise disabled by default. '
'Disabling may make debugging easier.')
flags.ADOPT_module_key_flags(fileflags)
flags.ADOPT_module_key_flags(runner)
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary', '--quiet']
def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
path_results = pool.imap(_CheckPath, paths)
for results in path_results:
for result in results:
yield result
# Force destruct before returning, as this can sometimes raise spurious
# "interrupted system call" (EINTR), which we can ignore.
try:
pool.close()
pool.join()
del pool
except OSError as err:
if err.errno is not errno.EINTR:
raise err
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_handler = erroraccumulator.ErrorAccumulator()
runner.Run(path, error_handler)
make_error_record = lambda err: errorrecord.MakeErrorRecord(path, err)
return map(make_error_record, error_handler.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if (error_count or new_error_count) and not FLAGS.quiet:
error_noun = 'error' if error_count == 1 else 'errors'
new_error_noun = 'error' if new_error_count == 1 else 'errors'
error_file_noun = 'file' if error_paths_count == 1 else 'files'
ok_file_noun = 'file' if no_error_paths_count == 1 else 'files'
print ('Found %d %s, including %d new %s, in %d %s (%d %s OK).' %
(error_count,
error_noun,
new_error_count,
new_error_noun,
error_paths_count,
error_file_noun,
no_error_paths_count,
ok_file_noun))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
"""Formats a duration as a human-readable string.
Args:
t: A duration in seconds.
Returns:
A formatted duration string.
"""
if t < 1:
return '%dms' % round(t * 1000)
else:
return '%.2fs' % t
def main(argv=None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
if FLAGS.time:
start_time = time.time()
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
if FLAGS.multiprocess:
records_iter = _MultiprocessCheckPaths(paths)
else:
records_iter = _CheckPaths(paths)
records_iter, records_iter_copy = itertools.tee(records_iter, 2)
_PrintErrorRecords(records_iter_copy)
error_records = list(records_iter)
_PrintSummary(paths, error_records)
exit_code = 0
# If there are any errors
if error_records:
exit_code += 1
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2
if exit_code:
if FLAGS.summary:
_PrintFileSummary(paths, error_records)
if FLAGS.beep:
# Make a beep noise.
sys.stdout.write(chr(7))
# Write out instructions for using fixjsstyle script to fix some of the
# reported errors.
fix_args = []
for flag in sys.argv[1:]:
for f in GJSLINT_ONLY_FLAGS:
if flag.startswith(f):
break
else:
fix_args.append(flag)
if not FLAGS.quiet:
print """
Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing:
fixjsstyle %s """ % ' '.join(fix_args)
if FLAGS.time:
print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code)
if __name__ == '__main__':
main()

617
tools/closure_linter/build/lib/closure_linter/indentation.py

@ -1,617 +0,0 @@
#!/usr/bin/env python
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking EcmaScript files for indentation issues."""
__author__ = ('robbyw@google.com (Robert Walker)')
import gflags as flags
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
flags.DEFINE_boolean('debug_indentation', False,
'Whether to print debugging information for indentation.')
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Position = position.Position
Type = javascripttokens.JavaScriptTokenType
# The general approach:
#
# 1. Build a stack of tokens that can affect indentation.
# For each token, we determine if it is a block or continuation token.
# Some tokens need to be temporarily overwritten in case they are removed
# before the end of the line.
# Much of the work here is determining which tokens to keep on the stack
# at each point. Operators, for example, should be removed once their
# expression or line is gone, while parentheses must stay until the matching
# end parentheses is found.
#
# 2. Given that stack, determine the allowable indentations.
# Due to flexible indentation rules in JavaScript, there may be many
# allowable indentations for each stack. We follows the general
# "no false positives" approach of GJsLint and build the most permissive
# set possible.
class TokenInfo(object):
"""Stores information about a token.
Attributes:
token: The token
is_block: Whether the token represents a block indentation.
is_transient: Whether the token should be automatically removed without
finding a matching end token.
overridden_by: TokenInfo for a token that overrides the indentation that
this token would require.
is_permanent_override: Whether the override on this token should persist
even after the overriding token is removed from the stack. For example:
x([
1],
2);
needs this to be set so the last line is not required to be a continuation
indent.
line_number: The effective line number of this token. Will either be the
actual line number or the one before it in the case of a mis-wrapped
operator.
"""
def __init__(self, token, is_block=False):
"""Initializes a TokenInfo object.
Args:
token: The token
is_block: Whether the token represents a block indentation.
"""
self.token = token
self.overridden_by = None
self.is_permanent_override = False
self.is_block = is_block
self.is_transient = not is_block and token.type not in (
Type.START_PAREN, Type.START_PARAMETERS)
self.line_number = token.line_number
def __repr__(self):
result = '\n %s' % self.token
if self.overridden_by:
result = '%s OVERRIDDEN [by "%s"]' % (
result, self.overridden_by.token.string)
result += ' {is_block: %s, is_transient: %s}' % (
self.is_block, self.is_transient)
return result
class IndentationRules(object):
"""EmcaScript indentation rules.
Can be used to find common indentation errors in JavaScript, ActionScript and
other Ecma like scripting languages.
"""
def __init__(self):
"""Initializes the IndentationRules checker."""
self._stack = []
# Map from line number to number of characters it is off in indentation.
self._start_index_offset = {}
def Finalize(self):
if self._stack:
old_stack = self._stack
self._stack = []
raise Exception('INTERNAL ERROR: indentation stack is not empty: %r' %
old_stack)
def CheckToken(self, token, state):
"""Checks a token for indentation errors.
Args:
token: The current token under consideration
state: Additional information about the current tree state
Returns:
An error array [error code, error string, error token] if the token is
improperly indented, or None if indentation is correct.
"""
token_type = token.type
indentation_errors = []
stack = self._stack
is_first = self._IsFirstNonWhitespaceTokenInLine(token)
# Add tokens that could decrease indentation before checking.
if token_type == Type.END_PAREN:
self._PopTo(Type.START_PAREN)
elif token_type == Type.END_PARAMETERS:
self._PopTo(Type.START_PARAMETERS)
elif token_type == Type.END_BRACKET:
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
start_token = self._PopTo(Type.START_BLOCK)
# Check for required goog.scope comment.
if start_token:
goog_scope = tokenutil.GoogScopeOrNoneFromStartBlock(start_token.token)
if goog_scope is not None:
if not token.line.endswith('; // goog.scope\n'):
if (token.line.find('//') > -1 and
token.line.find('goog.scope') >
token.line.find('//')):
indentation_errors.append([
errors.MALFORMED_END_OF_SCOPE_COMMENT,
('Malformed end of goog.scope comment. Please use the '
'exact following syntax to close the scope:\n'
'}); // goog.scope'),
token,
Position(token.start_index, token.length)])
else:
indentation_errors.append([
errors.MISSING_END_OF_SCOPE_COMMENT,
('Missing comment for end of goog.scope which opened at line '
'%d. End the scope with:\n'
'}); // goog.scope' %
(start_token.line_number)),
token,
Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
elif token_type == Type.SEMICOLON:
self._PopTransient()
if (is_first and
token_type not in (Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT)):
if flags.FLAGS.debug_indentation:
print 'Line #%d: stack %r' % (token.line_number, stack)
# Ignore lines that start in JsDoc since we don't check them properly yet.
# TODO(robbyw): Support checking JsDoc indentation.
# Ignore lines that start as multi-line strings since indentation is N/A.
# Ignore lines that start with operators since we report that already.
# Ignore lines with tabs since we report that already.
expected = self._GetAllowableIndentations()
actual = self._GetActualIndentation(token)
# Special case comments describing else, case, and default. Allow them
# to outdent to the parent block.
if token_type in Type.COMMENT_TYPES:
next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_code and next_code.type == Type.END_BLOCK:
next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
if next_code and next_code.string in ('else', 'case', 'default'):
# TODO(robbyw): This almost certainly introduces false negatives.
expected |= self._AddToEach(expected, -2)
if actual >= 0 and actual not in expected:
expected = sorted(expected)
indentation_errors.append([
errors.WRONG_INDENTATION,
'Wrong indentation: expected any of {%s} but got %d' % (
', '.join('%d' % x for x in expected if x < 80), actual),
token,
Position(actual, expected[0])])
self._start_index_offset[token.line_number] = expected[0] - actual
# Add tokens that could increase indentation.
if token_type == Type.START_BRACKET:
self._Add(TokenInfo(
token=token,
is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
self._Add(TokenInfo(token))
elif not token.IsLastInLine() and (
token.IsAssignment() or token.IsOperator('?')):
self._Add(TokenInfo(token=token))
# Handle implied block closes.
if token.metadata.is_implied_block_close:
self._PopToImpliedBlock()
# Add some tokens only if they appear at the end of the line.
is_last = self._IsLastCodeInLine(token)
if is_last:
next_code_token = tokenutil.GetNextCodeToken(token)
# Increase required indentation if this is an overlong wrapped statement
# ending in an operator.
if token_type == Type.OPERATOR:
if token.string == ':':
if stack and stack[-1].token.string == '?':
# When a ternary : is on a different line than its '?', it doesn't
# add indentation.
if token.line_number == stack[-1].token.line_number:
self._Add(TokenInfo(token))
elif token.metadata.context.type == Context.CASE_BLOCK:
# Pop transient tokens from say, line continuations, e.g.,
# case x.
# y:
# Want to pop the transient 4 space continuation indent.
self._PopTransient()
# Starting the body of the case statement, which is a type of
# block.
self._Add(TokenInfo(token=token, is_block=True))
elif token.metadata.context.type == Context.LITERAL_ELEMENT:
# When in an object literal, acts as operator indicating line
# continuations.
self._Add(TokenInfo(token))
else:
# ':' might also be a statement label, no effect on indentation in
# this case.
pass
elif token.string != ',':
self._Add(TokenInfo(token))
else:
# The token is a comma.
if token.metadata.context.type == Context.VAR:
self._Add(TokenInfo(token))
elif token.metadata.context.type != Context.PARAMETERS:
self._PopTransient()
# Increase required indentation if this is the end of a statement that's
# continued with an operator on the next line (e.g. the '.').
elif (next_code_token and next_code_token.type == Type.OPERATOR and
not next_code_token.metadata.IsUnaryOperator()):
self._Add(TokenInfo(token))
elif token_type == Type.PARAMETERS and token.string.endswith(','):
# Parameter lists.
self._Add(TokenInfo(token))
elif token.IsKeyword('var'):
self._Add(TokenInfo(token))
elif token.metadata.is_implied_semicolon:
self._PopTransient()
elif token.IsAssignment():
self._Add(TokenInfo(token))
return indentation_errors
def _AddToEach(self, original, amount):
"""Returns a new set with the given amount added to each element.
Args:
original: The original set of numbers
amount: The amount to add to each element
Returns:
A new set containing each element of the original set added to the amount.
"""
return set([x + amount for x in original])
_HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
Type.START_BRACKET)
_HARD_STOP_STRINGS = ('return', '?')
def _IsHardStop(self, token):
"""Determines if the given token can have a hard stop after it.
Args:
token: token to examine
Returns:
Whether the token can have a hard stop after it.
Hard stops are indentations defined by the position of another token as in
indentation lined up with return, (, [, and ?.
"""
return (token.type in self._HARD_STOP_TYPES or
token.string in self._HARD_STOP_STRINGS or
token.IsAssignment())
def _GetAllowableIndentations(self):
"""Computes the set of allowable indentations.
Returns:
The set of allowable indentations, given the current stack.
"""
expected = set([0])
hard_stops = set([])
# Whether the tokens are still in the same continuation, meaning additional
# indentation is optional. As an example:
# x = 5 +
# 6 +
# 7;
# The second '+' does not add any required indentation.
in_same_continuation = False
for token_info in self._stack:
token = token_info.token
# Handle normal additive indentation tokens.
if not token_info.overridden_by and token.string != 'return':
if token_info.is_block:
expected = self._AddToEach(expected, 2)
hard_stops = self._AddToEach(hard_stops, 2)
in_same_continuation = False
elif in_same_continuation:
expected |= self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
else:
expected = self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
in_same_continuation = True
# Handle hard stops after (, [, return, =, and ?
if self._IsHardStop(token):
override_is_hard_stop = (token_info.overridden_by and
self._IsHardStop(
token_info.overridden_by.token))
if token.type == Type.START_PAREN and token.previous:
# For someFunction(...) we allow to indent at the beginning of the
# identifier +4
prev = token.previous
if (prev.type == Type.IDENTIFIER and
prev.line_number == token.line_number):
hard_stops.add(prev.start_index + 4)
if not override_is_hard_stop:
start_index = token.start_index
if token.line_number in self._start_index_offset:
start_index += self._start_index_offset[token.line_number]
if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
not token_info.overridden_by):
hard_stops.add(start_index + 1)
elif token.string == 'return' and not token_info.overridden_by:
hard_stops.add(start_index + 7)
elif token.type == Type.START_BRACKET:
hard_stops.add(start_index + 1)
elif token.IsAssignment():
hard_stops.add(start_index + len(token.string) + 1)
elif token.IsOperator('?') and not token_info.overridden_by:
hard_stops.add(start_index + 2)
return (expected | hard_stops) or set([0])
def _GetActualIndentation(self, token):
"""Gets the actual indentation of the line containing the given token.
Args:
token: Any token on the line.
Returns:
The actual indentation of the line containing the given token. Returns
-1 if this line should be ignored due to the presence of tabs.
"""
# Move to the first token in the line
token = tokenutil.GetFirstTokenInSameLine(token)
# If it is whitespace, it is the indentation.
if token.type == Type.WHITESPACE:
if token.string.find('\t') >= 0:
return -1
else:
return len(token.string)
elif token.type == Type.PARAMETERS:
return len(token.string) - len(token.string.lstrip())
else:
return 0
def _IsFirstNonWhitespaceTokenInLine(self, token):
"""Determines if the given token is the first non-space token on its line.
Args:
token: The token.
Returns:
True if the token is the first non-whitespace token on its line.
"""
if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
return False
if token.IsFirstInLine():
return True
return (token.previous and token.previous.IsFirstInLine() and
token.previous.type == Type.WHITESPACE)
def _IsLastCodeInLine(self, token):
"""Determines if the given token is the last code token on its line.
Args:
token: The token.
Returns:
True if the token is the last code token on its line.
"""
if token.type in Type.NON_CODE_TYPES:
return False
start_token = token
while True:
token = token.next
if not token or token.line_number != start_token.line_number:
return True
if token.type not in Type.NON_CODE_TYPES:
return False
def _AllFunctionPropertyAssignTokens(self, start_token, end_token):
"""Checks if tokens are (likely) a valid function property assignment.
Args:
start_token: Start of the token range.
end_token: End of the token range.
Returns:
True if all tokens between start_token and end_token are legal tokens
within a function declaration and assignment into a property.
"""
for token in tokenutil.GetTokenRange(start_token, end_token):
fn_decl_tokens = (Type.FUNCTION_DECLARATION,
Type.PARAMETERS,
Type.START_PARAMETERS,
Type.END_PARAMETERS,
Type.END_PAREN)
if (token.type not in fn_decl_tokens and
token.IsCode() and
not tokenutil.IsIdentifierOrDot(token) and
not token.IsAssignment() and
not (token.type == Type.OPERATOR and token.string == ',')):
return False
return True
def _Add(self, token_info):
"""Adds the given token info to the stack.
Args:
token_info: The token information to add.
"""
if self._stack and self._stack[-1].token == token_info.token:
# Don't add the same token twice.
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
scope_token = tokenutil.GoogScopeOrNoneFromStartBlock(token_info.token)
token_info.overridden_by = TokenInfo(scope_token) if scope_token else None
if (token_info.token.type == Type.START_BLOCK and
token_info.token.metadata.context.type == Context.BLOCK):
# Handle function() {} assignments: their block contents get special
# treatment and are allowed to just indent by two whitespace.
# For example
# long.long.name = function(
# a) {
# In this case the { and the = are on different lines. But the
# override should still apply for all previous stack tokens that are
# part of an assignment of a block.
has_assignment = any(x for x in self._stack if x.token.IsAssignment())
if has_assignment:
last_token = token_info.token.previous
for stack_info in reversed(self._stack):
if (last_token and
not self._AllFunctionPropertyAssignTokens(stack_info.token,
last_token)):
break
stack_info.overridden_by = token_info
stack_info.is_permanent_override = True
last_token = stack_info.token
index = len(self._stack) - 1
while index >= 0:
stack_info = self._stack[index]
stack_token = stack_info.token
if stack_info.line_number == token_info.line_number:
# In general, tokens only override each other when they are on
# the same line.
stack_info.overridden_by = token_info
if (token_info.token.type == Type.START_BLOCK and
(stack_token.IsAssignment() or
stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
# Multi-line blocks have lasting overrides, as in:
# callFn({
# a: 10
# },
# 30);
# b/11450054. If a string is not closed properly then close_block
# could be null.
close_block = token_info.token.metadata.context.end_token
stack_info.is_permanent_override = close_block and (
close_block.line_number != token_info.token.line_number)
else:
break
index -= 1
self._stack.append(token_info)
def _Pop(self):
"""Pops the top token from the stack.
Returns:
The popped token info.
"""
token_info = self._stack.pop()
if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
# Remove any temporary overrides.
self._RemoveOverrides(token_info)
else:
# For braces and brackets, which can be object and array literals, remove
# overrides when the literal is closed on the same line.
token_check = token_info.token
same_type = token_check.type
goal_type = None
if token_info.token.type == Type.START_BRACKET:
goal_type = Type.END_BRACKET
else:
goal_type = Type.END_BLOCK
line_number = token_info.token.line_number
count = 0
while token_check and token_check.line_number == line_number:
if token_check.type == goal_type:
count -= 1
if not count:
self._RemoveOverrides(token_info)
break
if token_check.type == same_type:
count += 1
token_check = token_check.next
return token_info
def _PopToImpliedBlock(self):
"""Pops the stack until an implied block token is found."""
while not self._Pop().token.metadata.is_implied_block:
pass
def _PopTo(self, stop_type):
"""Pops the stack until a token of the given type is popped.
Args:
stop_type: The type of token to pop to.
Returns:
The token info of the given type that was popped.
"""
last = None
while True:
last = self._Pop()
if last.token.type == stop_type:
break
return last
def _RemoveOverrides(self, token_info):
"""Marks any token that was overridden by this token as active again.
Args:
token_info: The token that is being removed from the stack.
"""
for stack_token in self._stack:
if (stack_token.overridden_by == token_info and
not stack_token.is_permanent_override):
stack_token.overridden_by = None
def _PopTransient(self):
"""Pops all transient tokens - i.e. not blocks, literals, or parens."""
while self._stack and self._stack[-1].is_transient:
self._Pop()

754
tools/closure_linter/build/lib/closure_linter/javascriptlintrules.py

@ -1,754 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking JS files for common style guide violations.
These style guide violations should only apply to JavaScript and not an Ecma
scripting languages.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
from closure_linter import ecmalintrules
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
# Shorthand
Error = error.Error
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors."""
def __init__(self, namespaces_info):
"""Initializes a JavaScriptLintRules instance."""
ecmalintrules.EcmaScriptLintRules.__init__(self)
self._namespaces_info = namespaces_info
self._declared_private_member_tokens = {}
self._declared_private_members = set()
self._used_private_members = set()
# A stack of dictionaries, one for each function scope entered. Each
# dictionary is keyed by an identifier that defines a local variable and has
# a token as its value.
self._unused_local_variables_by_scope = []
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
'Missing docs for parameter: "%s"' % param_name, token)
# pylint: disable=too-many-statements
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Call the base class's CheckToken function.
super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables
namespaces_info = self._namespaces_info
if error_check.ShouldCheck(Rule.UNUSED_LOCAL_VARIABLES):
self._CheckUnusedLocalVariables(token, state)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Find all assignments to private members.
if token.type == Type.SIMPLE_LVALUE:
identifier = token.string
if identifier.endswith('_') and not identifier.endswith('__'):
doc_comment = state.GetDocComment()
suppressed = doc_comment and (
'underscore' in doc_comment.suppressions or
'unusedPrivateMembers' in doc_comment.suppressions)
if not suppressed:
# Look for static members defined on a provided namespace.
if namespaces_info:
namespace = namespaces_info.GetClosurizedNamespace(identifier)
provided_namespaces = namespaces_info.GetProvidedNamespaces()
else:
namespace = None
provided_namespaces = set()
# Skip cases of this.something_.somethingElse_.
regex = re.compile(r'^this\.[a-zA-Z_]+$')
if namespace in provided_namespaces or regex.match(identifier):
variable = identifier.split('.')[-1]
self._declared_private_member_tokens[variable] = token
self._declared_private_members.add(variable)
elif not identifier.endswith('__'):
# Consider setting public members of private members to be a usage.
for piece in identifier.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
# Find all usages of private members.
if token.type == Type.IDENTIFIER:
for piece in token.string.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
if token.type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token)
if flag.type is not None and flag.name is not None:
if error_check.ShouldCheck(Rule.VARIABLE_ARG_MARKER):
# Check for variable arguments marker in type.
if flag.jstype.IsVarArgsType() and flag.name != 'var_args':
self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_NAME,
'Variable length argument %s must be renamed '
'to var_args.' % flag.name,
token)
elif not flag.jstype.IsVarArgsType() and flag.name == 'var_args':
self._HandleError(errors.JSDOC_MISSING_VAR_ARGS_TYPE,
'Variable length argument %s type must start '
'with \'...\'.' % flag.name,
token)
if error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER):
# Check for optional marker in type.
if (flag.jstype.opt_arg and
not flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
'Optional parameter name %s must be prefixed '
'with opt_.' % flag.name,
token)
elif (not flag.jstype.opt_arg and
flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
'Optional parameter %s type must end with =.' %
flag.name,
token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums,
# const, private, public and protected without types.
if (flag.flag_type not in state.GetDocFlag().CAN_OMIT_TYPE
and (not flag.jstype or flag.jstype.IsEmpty())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token)
elif flag.name_token and flag.type_end_token and tokenutil.Compare(
flag.type_end_token, flag.name_token) > 0:
self._HandleError(
errors.OUT_OF_ORDER_JSDOC_TAG_TYPE,
'Type should be immediately after %s tag' % token.string,
token)
elif token.type == Type.DOUBLE_QUOTE_STRING_START:
next_token = token.next
while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
next_token.string):
break
next_token = next_token.next
else:
self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
'Single-quoted string preferred over double-quoted string.',
token,
position=Position.All(token.string))
elif token.type == Type.END_DOC_COMMENT:
doc_comment = state.GetDocComment()
# When @externs appears in a @fileoverview comment, it should trigger
# the same limited doc checks as a special filename like externs.js.
if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
self._SetLimitedDocChecks(True)
if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
not self._is_html and
state.InTopLevel() and
not state.InNonScopeBlock()):
# Check if we're in a fileoverview or constructor JsDoc.
is_constructor = (
doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface'))
# @fileoverview is an optional tag so if the dosctring is the first
# token in the file treat it as a file level docstring.
is_file_level_comment = (
doc_comment.HasFlag('fileoverview') or
not doc_comment.start_token.previous)
# If the comment is not a file overview, and it does not immediately
# precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file.
next_token = token.next
if (not next_token or
(not is_file_level_comment and
next_token.type in Type.NON_CODE_TYPES)):
return
# Don't require extra blank lines around suppression of extra
# goog.require errors.
if (doc_comment.SuppressionOnly() and
next_token.type == Type.IDENTIFIER and
next_token.string in ['goog.provide', 'goog.require']):
return
# Find the start of this block (include comments above the block, unless
# this is a file overview).
block_start = doc_comment.start_token
if not is_file_level_comment:
token = block_start.previous
while token and token.type in Type.COMMENT_TYPES:
block_start = token
token = token.previous
# Count the number of blank lines before this block.
blank_lines = 0
token = block_start.previous
while token and token.type in [Type.WHITESPACE, Type.BLANK_LINE]:
if token.type == Type.BLANK_LINE:
# A blank line.
blank_lines += 1
elif token.type == Type.WHITESPACE and not token.line.strip():
# A line with only whitespace on it.
blank_lines += 1
token = token.previous
# Log errors.
error_message = False
expected_blank_lines = 0
# Only need blank line before file overview if it is not the beginning
# of the file, e.g. copyright is first.
if is_file_level_comment and blank_lines == 0 and block_start.previous:
error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1
elif is_constructor and blank_lines != 3:
error_message = (
'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3
elif (not is_file_level_comment and not is_constructor and
blank_lines != 2):
error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2
if error_message:
self._HandleError(
errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, position=Position.AtBeginning(),
fix_data=expected_blank_lines - blank_lines)
elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and
token.next.type == Type.START_PAREN)
function = state.GetFunction()
if not self._limited_doc_checks:
if (function.has_return and function.doc and
not is_immediately_called and
not function.doc.HasFlag('return') and
not function.doc.InheritsDocumentation() and
not function.doc.HasFlag('constructor')):
# Check for proper documentation of return value.
self._HandleError(
errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, position=Position.AtBeginning())
elif (not function.has_return and
not function.has_throw and
function.doc and
function.doc.HasFlag('return') and
not state.InInterfaceMethod()):
flag = function.doc.GetFlag('return')
valid_no_return_names = ['undefined', 'void', '*']
invalid_return = flag.jstype is None or not any(
sub_type.identifier in valid_no_return_names
for sub_type in flag.jstype.IterTypeGroup())
if invalid_return:
self._HandleError(
errors.UNNECESSARY_RETURN_DOCUMENTATION,
'Found @return JsDoc on function that returns nothing',
flag.flag_token, position=Position.AtBeginning())
# b/4073735. Method in object literal definition of prototype can
# safely reference 'this'.
prototype_object_literal = False
block_start = None
previous_code = None
previous_previous_code = None
# Search for cases where prototype is defined as object literal.
# previous_previous_code
# | previous_code
# | | block_start
# | | |
# a.b.prototype = {
# c : function() {
# this.d = 1;
# }
# }
# If in object literal, find first token of block so to find previous
# tokens to check above condition.
if state.InObjectLiteral():
block_start = state.GetCurrentBlockStart()
# If an object literal then get previous token (code type). For above
# case it should be '='.
if block_start:
previous_code = tokenutil.SearchExcept(block_start,
Type.NON_CODE_TYPES,
reverse=True)
# If previous token to block is '=' then get its previous token.
if previous_code and previous_code.IsOperator('='):
previous_previous_code = tokenutil.SearchExcept(previous_code,
Type.NON_CODE_TYPES,
reverse=True)
# If variable/token before '=' ends with '.prototype' then its above
# case of prototype defined with object literal.
prototype_object_literal = (previous_previous_code and
previous_previous_code.string.endswith(
'.prototype'))
if (function.has_this and function.doc and
not function.doc.HasFlag('this') and
not function.is_constructor and
not function.is_interface and
'.prototype.' not in function.name and
not prototype_object_literal):
self._HandleError(
errors.MISSING_JSDOC_TAG_THIS,
'Missing @this JsDoc in function referencing "this". ('
'this usually means you are trying to reference "this" in '
'a static function, or you have forgotten to mark a '
'constructor with @constructor)',
function.doc.end_token, position=Position.AtBeginning())
elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError(
errors.MISSING_LINE,
'Missing newline between constructor and goog.inherits',
token,
position=Position.AtBeginning())
extra_space = state.GetLastNonSpaceToken().next
while extra_space != token:
if extra_space.type == Type.BLANK_LINE:
self._HandleError(
errors.EXTRA_LINE,
'Extra line between constructor and goog.inherits',
extra_space)
extra_space = extra_space.next
# TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation.
elif (token.string == 'goog.provide' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.GetStringAfterToken(token)
# Report extra goog.provide statement.
if not namespace or namespaces_info.IsExtraProvide(token):
if not namespace:
msg = 'Empty namespace in goog.provide'
else:
msg = 'Unnecessary goog.provide: ' + namespace
# Hint to user if this is a Test namespace.
if namespace.endswith('Test'):
msg += (' *Test namespaces must be mentioned in the '
'goog.setTestOnly() call')
self._HandleError(
errors.EXTRA_GOOG_PROVIDE,
msg,
token, position=Position.AtBeginning())
if namespaces_info.IsLastProvide(token):
# Report missing provide statements after the last existing provide.
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetLastTokenInSameLine(token).next,
False)
# If there are no require statements, missing requires should be
# reported after the last provide.
if not namespaces_info.GetRequiredNamespaces():
missing_requires, illegal_alias_statements = (
namespaces_info.GetMissingRequires())
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
True)
if illegal_alias_statements:
self._ReportIllegalAliasStatement(illegal_alias_statements)
elif (token.string == 'goog.require' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.GetStringAfterToken(token)
# If there are no provide statements, missing provides should be
# reported before the first require.
if (namespaces_info.IsFirstRequire(token) and
not namespaces_info.GetProvidedNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetFirstTokenInSameLine(token),
True)
# Report extra goog.require statement.
if not namespace or namespaces_info.IsExtraRequire(token):
if not namespace:
msg = 'Empty namespace in goog.require'
else:
msg = 'Unnecessary goog.require: ' + namespace
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
msg,
token, position=Position.AtBeginning())
# Report missing goog.require statements.
if namespaces_info.IsLastRequire(token):
missing_requires, illegal_alias_statements = (
namespaces_info.GetMissingRequires())
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
False)
if illegal_alias_statements:
self._ReportIllegalAliasStatement(illegal_alias_statements)
elif token.type == Type.OPERATOR:
last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok.
# Don't report an error before a start bracket - it will be reported
# by that token's space checks.
if (not token.metadata.IsUnaryOperator() and not last_in_line
and not token.next.IsComment()
and not token.next.IsOperator(',')
and not tokenutil.IsDot(token)
and token.next.type not in (Type.WHITESPACE, Type.END_PAREN,
Type.END_BRACKET, Type.SEMICOLON,
Type.START_BRACKET)):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after "%s"' % token.string,
token,
position=Position.AtEnd(token.string))
elif token.type == Type.WHITESPACE:
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment():
# Ensure there is no space after opening parentheses.
if (token.previous.type in (Type.START_PAREN, Type.START_BRACKET,
Type.FUNCTION_NAME)
or token.next.type == Type.START_PARAMETERS):
self._HandleError(
errors.EXTRA_SPACE,
'Extra space after "%s"' % token.previous.string,
token,
position=Position.All(token.string))
elif token.type == Type.SEMICOLON:
previous_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES,
reverse=True)
if not previous_token:
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'Semicolon without any statement',
token,
position=Position.AtEnd(token.string))
elif (previous_token.type == Type.KEYWORD and
previous_token.string not in ['break', 'continue', 'return']):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
('Semicolon after \'%s\' without any statement.'
' Looks like an error.' % previous_token.string),
token,
position=Position.AtEnd(token.string))
def _CheckUnusedLocalVariables(self, token, state):
"""Checks for unused local variables in function blocks.
Args:
token: The token to check.
state: The state tracker.
"""
# We don't use state.InFunction because that disregards scope functions.
in_function = state.FunctionDepth() > 0
if token.type == Type.SIMPLE_LVALUE or token.type == Type.IDENTIFIER:
if in_function:
identifier = token.string
# Check whether the previous token was var.
previous_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES,
reverse=True)
if previous_code_token and previous_code_token.IsKeyword('var'):
# Add local variable declaration to the top of the unused locals
# stack.
self._unused_local_variables_by_scope[-1][identifier] = token
elif token.type == Type.IDENTIFIER:
# This covers most cases where the variable is used as an identifier.
self._MarkLocalVariableUsed(token.string)
elif token.type == Type.SIMPLE_LVALUE and '.' in identifier:
# This covers cases where a value is assigned to a property of the
# variable.
self._MarkLocalVariableUsed(token.string)
elif token.type == Type.START_BLOCK:
if in_function and state.IsFunctionOpen():
# Push a new map onto the stack
self._unused_local_variables_by_scope.append({})
elif token.type == Type.END_BLOCK:
if state.IsFunctionClose():
# Pop the stack and report any remaining locals as unused.
unused_local_variables = self._unused_local_variables_by_scope.pop()
for unused_token in unused_local_variables.values():
self._HandleError(
errors.UNUSED_LOCAL_VARIABLE,
'Unused local variable: %s.' % unused_token.string,
unused_token)
elif token.type == Type.DOC_FLAG:
# Flags that use aliased symbols should be counted.
flag = token.attached_object
js_type = flag and flag.jstype
if flag and flag.flag_type in state.GetDocFlag().HAS_TYPE and js_type:
self._MarkAliasUsed(js_type)
def _MarkAliasUsed(self, js_type):
"""Marks aliases in a type as used.
Recursively iterates over all subtypes in a jsdoc type annotation and
tracks usage of aliased symbols (which may be local variables).
Marks the local variable as used in the scope nearest to the current
scope that matches the given token.
Args:
js_type: The jsdoc type, a typeannotation.TypeAnnotation object.
"""
if js_type.alias:
self._MarkLocalVariableUsed(js_type.identifier)
for sub_type in js_type.IterTypes():
self._MarkAliasUsed(sub_type)
def _MarkLocalVariableUsed(self, identifier):
"""Marks the local variable as used in the relevant scope.
Marks the local variable in the scope nearest to the current scope that
matches the given identifier as used.
Args:
identifier: The identifier representing the potential usage of a local
variable.
"""
identifier = identifier.split('.', 1)[0]
# Find the first instance of the identifier in the stack of function scopes
# and mark it used.
for unused_local_variables in reversed(
self._unused_local_variables_by_scope):
if identifier in unused_local_variables:
del unused_local_variables[identifier]
break
def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
"""Reports missing provide statements to the error handler.
Args:
missing_provides: A dictionary of string(key) and integer(value) where
each string(key) is a namespace that should be provided, but is not
and integer(value) is first line number where it's required.
token: The token where the error was detected (also where the new provides
will be inserted.
need_blank_line: Whether a blank line needs to be inserted after the new
provides are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
missing_provides_msg = 'Missing the following goog.provide statements:\n'
missing_provides_msg += '\n'.join(['goog.provide(\'%s\');' % x for x in
sorted(missing_provides)])
missing_provides_msg += '\n'
missing_provides_msg += '\nFirst line where provided: \n'
missing_provides_msg += '\n'.join(
[' %s : line %d' % (x, missing_provides[x]) for x in
sorted(missing_provides)])
missing_provides_msg += '\n'
self._HandleError(
errors.MISSING_GOOG_PROVIDE,
missing_provides_msg,
token, position=Position.AtBeginning(),
fix_data=(missing_provides.keys(), need_blank_line))
def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
"""Reports missing require statements to the error handler.
Args:
missing_requires: A dictionary of string(key) and integer(value) where
each string(key) is a namespace that should be required, but is not
and integer(value) is first line number where it's required.
token: The token where the error was detected (also where the new requires
will be inserted.
need_blank_line: Whether a blank line needs to be inserted before the new
requires are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
missing_requires_msg = 'Missing the following goog.require statements:\n'
missing_requires_msg += '\n'.join(['goog.require(\'%s\');' % x for x in
sorted(missing_requires)])
missing_requires_msg += '\n'
missing_requires_msg += '\nFirst line where required: \n'
missing_requires_msg += '\n'.join(
[' %s : line %d' % (x, missing_requires[x]) for x in
sorted(missing_requires)])
missing_requires_msg += '\n'
self._HandleError(
errors.MISSING_GOOG_REQUIRE,
missing_requires_msg,
token, position=Position.AtBeginning(),
fix_data=(missing_requires.keys(), need_blank_line))
def _ReportIllegalAliasStatement(self, illegal_alias_statements):
"""Reports alias statements that would need a goog.require."""
for namespace, token in illegal_alias_statements.iteritems():
self._HandleError(
errors.ALIAS_STMT_NEEDS_GOOG_REQUIRE,
'The alias definition would need the namespace \'%s\' which is not '
'required through any other symbol.' % namespace,
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed."""
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state)
if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Report an error for any declared private member that was never used.
unused_private_members = (self._declared_private_members -
self._used_private_members)
for variable in unused_private_members:
token = self._declared_private_member_tokens[variable]
self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
'Unused private member: %s.' % token.string,
token)
# Clear state to prepare for the next file.
self._declared_private_member_tokens = {}
self._declared_private_members = set()
self._used_private_members = set()
namespaces_info = self._namespaces_info
if namespaces_info is not None:
# If there are no provide or require statements, missing provides and
# requires should be reported on line 1.
if (not namespaces_info.GetProvidedNamespaces() and
not namespaces_info.GetRequiredNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides, state.GetFirstToken(), None)
missing_requires, illegal_alias = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires, state.GetFirstToken(), None)
if illegal_alias:
self._ReportIllegalAliasStatement(illegal_alias)
self._CheckSortedRequiresProvides(state.GetFirstToken())
def _CheckSortedRequiresProvides(self, token):
"""Checks that all goog.require and goog.provide statements are sorted.
Note that this method needs to be run after missing statements are added to
preserve alphabetical order.
Args:
token: The first token in the token stream.
"""
sorter = requireprovidesorter.RequireProvideSorter()
first_provide_token = sorter.CheckProvides(token)
if first_provide_token:
new_order = sorter.GetFixedProvideString(first_provide_token)
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
new_order,
first_provide_token,
position=Position.AtBeginning(),
fix_data=first_provide_token)
first_require_token = sorter.CheckRequires(token)
if first_require_token:
new_order = sorter.GetFixedRequireString(first_require_token)
self._HandleError(
errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
'goog.require classes must be alphabetized. The correct code is:\n' +
new_order,
first_require_token,
position=Position.AtBeginning(),
fix_data=first_require_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return [
re.compile(r'(var .+\s*=\s*)?goog\.require\(.+\);?\s*$'),
re.compile(r'goog\.(provide|module|setTestOnly)\(.+\);?\s*$'),
re.compile(r'[\s/*]*@visibility\s*{.*}[\s*/]*$'),
]

150
tools/closure_linter/build/lib/closure_linter/javascriptstatetracker.py

@ -1,150 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parser for JavaScript files."""
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class JsDocFlag(statetracker.DocFlag):
"""Javascript doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag JS type,
including braces.
type_end_token: The last token specifying the flag JS type,
including braces.
type: The type spec string.
jstype: The type spec, a TypeAnnotation instance.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# Some projects use the following extensions to JsDoc.
# TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
'meaning', 'provideGoog', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
class JavaScriptStateTracker(statetracker.StateTracker):
"""JavaScript state tracker.
Inherits from the core EcmaScript StateTracker adding extra state tracking
functionality needed for JavaScript.
"""
def __init__(self):
"""Initializes a JavaScript token stream state tracker."""
statetracker.StateTracker.__init__(self, JsDocFlag)
def Reset(self):
self._scope_depth = 0
self._block_stack = []
super(JavaScriptStateTracker, self).Reset()
def InTopLevel(self):
"""Compute whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
Returns:
Whether we are at the top level in the class.
"""
return self._scope_depth == self.ParenthesesDepth()
def InFunction(self):
"""Returns true if the current token is within a function.
This js-specific override ignores goog.scope functions.
Returns:
True if the current token is within a function.
"""
return self._scope_depth != self.FunctionDepth()
def InNonScopeBlock(self):
"""Compute whether we are nested within a non-goog.scope block.
Returns:
True if the token is not enclosed in a block that does not originate from
a goog.scope statement. False otherwise.
"""
return self._scope_depth != self.BlockDepth()
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK
Returns:
Code block type for current token.
"""
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, reverse=True)
if last_code.type in (Type.END_PARAMETERS, Type.END_PAREN,
Type.KEYWORD) and not last_code.IsKeyword('return'):
return self.CODE
else:
return self.OBJECT_LITERAL
def GetCurrentBlockStart(self):
"""Gets the start token of current block.
Returns:
Starting token of current block. None if not in block.
"""
if self._block_stack:
return self._block_stack[-1]
else:
return None
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token: The last non space token encountered
"""
if token.type == Type.START_BLOCK:
self._block_stack.append(token)
if token.type == Type.IDENTIFIER and token.string == 'goog.scope':
self._scope_depth += 1
if token.type == Type.END_BLOCK:
start_token = self._block_stack.pop()
if tokenutil.GoogScopeOrNoneFromStartBlock(start_token):
self._scope_depth -= 1
super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token)

278
tools/closure_linter/build/lib/closure_linter/javascriptstatetracker_test.py

@ -1,278 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the javascriptstatetracker module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
_FUNCTION_SCRIPT = """\
var a = 3;
function foo(aaa, bbb, ccc) {
var b = 4;
}
/**
* JSDoc comment.
*/
var bar = function(ddd, eee, fff) {
};
/**
* Verify that nested functions get their proper parameters recorded.
*/
var baz = function(ggg, hhh, iii) {
var qux = function(jjj, kkk, lll) {
};
// make sure that entering a new block does not change baz' parameters.
{};
};
"""
class FunctionTest(googletest.TestCase):
def testFunctionParse(self):
functions, _ = testutil.ParseFunctionsAndComments(_FUNCTION_SCRIPT)
self.assertEquals(4, len(functions))
# First function
function = functions[0]
self.assertEquals(['aaa', 'bbb', 'ccc'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(3, start_token.line_number)
self.assertEquals(0, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(5, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('foo', function.name)
self.assertIsNone(function.doc)
# Second function
function = functions[1]
self.assertEquals(['ddd', 'eee', 'fff'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(11, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(13, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('bar', function.name)
self.assertIsNotNone(function.doc)
# Check function JSDoc
doc = function.doc
doc_tokens = tokenutil.GetTokenRange(doc.start_token, doc.end_token)
comment_type = javascripttokens.JavaScriptTokenType.COMMENT
comment_tokens = filter(lambda t: t.type is comment_type, doc_tokens)
self.assertEquals('JSDoc comment.',
tokenutil.TokensToString(comment_tokens).strip())
# Third function
function = functions[2]
self.assertEquals(['ggg', 'hhh', 'iii'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(19, start_token.line_number)
self.assertEquals(10, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(24, end_token.line_number)
self.assertEquals(0, end_token.start_index)
self.assertEquals('baz', function.name)
self.assertIsNotNone(function.doc)
# Fourth function (inside third function)
function = functions[3]
self.assertEquals(['jjj', 'kkk', 'lll'], function.parameters)
start_token = function.start_token
end_token = function.end_token
self.assertEquals(
javascripttokens.JavaScriptTokenType.FUNCTION_DECLARATION,
function.start_token.type)
self.assertEquals('function', start_token.string)
self.assertEquals(20, start_token.line_number)
self.assertEquals(12, start_token.start_index)
self.assertEquals('}', end_token.string)
self.assertEquals(21, end_token.line_number)
self.assertEquals(2, end_token.start_index)
self.assertEquals('qux', function.name)
self.assertIsNone(function.doc)
class CommentTest(googletest.TestCase):
def testGetDescription(self):
comment = self._ParseComment("""
/**
* Comment targeting goog.foo.
*
* This is the second line.
* @param {number} foo The count of foo.
*/
target;""")
self.assertEqual(
'Comment targeting goog.foo.\n\nThis is the second line.',
comment.description)
def testCommentGetTarget(self):
self.assertCommentTarget('goog.foo', """
/**
* Comment targeting goog.foo.
*/
goog.foo = 6;
""")
self.assertCommentTarget('bar', """
/**
* Comment targeting bar.
*/
var bar = "Karate!";
""")
self.assertCommentTarget('doThing', """
/**
* Comment targeting doThing.
*/
function doThing() {};
""")
self.assertCommentTarget('this.targetProperty', """
goog.bar.Baz = function() {
/**
* Comment targeting targetProperty.
*/
this.targetProperty = 3;
};
""")
self.assertCommentTarget('goog.bar.prop', """
/**
* Comment targeting goog.bar.prop.
*/
goog.bar.prop;
""")
self.assertCommentTarget('goog.aaa.bbb', """
/**
* Comment targeting goog.aaa.bbb.
*/
(goog.aaa.bbb)
""")
self.assertCommentTarget('theTarget', """
/**
* Comment targeting symbol preceded by newlines, whitespace,
* and parens -- things we ignore.
*/
(theTarget)
""")
self.assertCommentTarget(None, """
/**
* @fileoverview File overview.
*/
(notATarget)
""")
self.assertCommentTarget(None, """
/**
* Comment that doesn't find a target.
*/
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split
.across.lines)
""")
self.assertCommentTarget('theTarget.is.split.across.lines', """
/**
* Comment that addresses a symbol split across lines.
*/
(theTarget.is.split.
across.lines)
""")
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
_, comments = testutil.ParseFunctionsAndComments(script)
self.assertEquals(1, len(comments))
return comments[0]
def assertCommentTarget(self, target, script):
comment = self._ParseComment(script)
self.assertEquals(target, comment.GetTargetIdentifier())
if __name__ == '__main__':
googletest.main()

463
tools/closure_linter/build/lib/closure_linter/javascripttokenizer.py

@ -1,463 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript parsing classes."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
import re
from closure_linter import javascripttokens
from closure_linter.common import matcher
from closure_linter.common import tokenizer
# Shorthand
Type = javascripttokens.JavaScriptTokenType
Matcher = matcher.Matcher
class JavaScriptModes(object):
"""Enumeration of the different matcher modes used for JavaScript."""
TEXT_MODE = 'text'
SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
BLOCK_COMMENT_MODE = 'block_comment'
DOC_COMMENT_MODE = 'doc_comment'
DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
LINE_COMMENT_MODE = 'line_comment'
PARAMETER_MODE = 'parameter'
FUNCTION_MODE = 'function'
class JavaScriptTokenizer(tokenizer.Tokenizer):
"""JavaScript tokenizer.
Convert JavaScript code in to an array of tokens.
"""
# Useful patterns for JavaScript parsing.
IDENTIFIER_CHAR = r'A-Za-z0-9_$'
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
MANTISSA = r"""
(\d+(?!\.)) | # Matches '10'
(\d+\.(?!\d)) | # Matches '10.'
(\d*\.\d+) # Matches '.5' or '10.5'
"""
DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
NUMBER = re.compile(r"""
((%s)|(%s))
""" % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
# Strings come in three parts - first we match the start of the string, then
# the contents, then the end. The contents consist of any character except a
# backslash or end of string, or a backslash followed by any character, or a
# backslash followed by end of line to support correct parsing of multi-line
# strings.
SINGLE_QUOTE = re.compile(r"'")
SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
DOUBLE_QUOTE = re.compile(r'"')
DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
START_SINGLE_LINE_COMMENT = re.compile(r'//')
END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
START_DOC_COMMENT = re.compile(r'/\*\*')
START_BLOCK_COMMENT = re.compile(r'/\*')
END_BLOCK_COMMENT = re.compile(r'\*/')
BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
# Comment text is anything that we are not going to parse into another special
# token like (inline) flags or end comments. Complicated regex to match
# most normal characters, and '*', '{', '}', and '@' when we are sure that
# it is safe. Expression [^*{\s]@ must come first, or the other options will
# match everything before @, and we won't match @'s that aren't part of flags
# like in email addresses in the @author tag.
DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
# Match anything that is allowed in a type definition, except for tokens
# needed to parse it (and the lookahead assertion for "*/").
DOC_COMMENT_TYPE_TEXT = re.compile(r'([^*|!?=<>(){}:,\s]|\*(?!/))+')
# Match the prefix ' * ' that starts every line of jsdoc. Want to include
# spaces after the '*', but nothing else that occurs after a '*', and don't
# want to match the '*' in '*/'.
DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
START_BLOCK = re.compile('{')
END_BLOCK = re.compile('}')
REGEX_CHARACTER_CLASS = r"""
\[ # Opening bracket
([^\]\\]|\\.)* # Anything but a ] or \,
# or a backslash followed by anything
\] # Closing bracket
"""
# We ensure the regex is followed by one of the above tokens to avoid
# incorrectly parsing something like x / y / z as x REGEX(/ y /) z
POST_REGEX_LIST = [
';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
REGEX = re.compile(r"""
/ # opening slash
(?!\*) # not the start of a comment
(\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
# or anything but a / or [ or \,
# or a character class
/ # closing slash
[gimsx]* # optional modifiers
(?=\s*(%s))
""" % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
re.VERBOSE)
ANYTHING = re.compile(r'.*')
PARAMETERS = re.compile(r'[^\)]+')
CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
OPENING_PAREN = re.compile(r'\(')
CLOSING_PAREN = re.compile(r'\)')
OPENING_BRACKET = re.compile(r'\[')
CLOSING_BRACKET = re.compile(r'\]')
# We omit these JS keywords from the list:
# function - covered by FUNCTION_DECLARATION.
# delete, in, instanceof, new, typeof - included as operators.
# this - included in identifiers.
# null, undefined - not included, should go in some "special constant" list.
KEYWORD_LIST = [
'break',
'case',
'catch',
'continue',
'default',
'do',
'else',
'finally',
'for',
'if',
'return',
'switch',
'throw',
'try',
'var',
'while',
'with',
]
# List of regular expressions to match as operators. Some notes: for our
# purposes, the comma behaves similarly enough to a normal operator that we
# include it here. r'\bin\b' actually matches 'in' surrounded by boundary
# characters - this may not match some very esoteric uses of the in operator.
# Operators that are subsets of larger operators must come later in this list
# for proper matching, e.g., '>>' must come AFTER '>>>'.
OPERATOR_LIST = [
',',
r'\+\+',
'===',
'!==',
'>>>=',
'>>>',
'==',
'>=',
'<=',
'!=',
'<<=',
'>>=',
'<<',
'>>',
'=>',
'>',
'<',
r'\+=',
r'\+',
'--',
r'\^=',
'-=',
'-',
'/=',
'/',
r'\*=',
r'\*',
'%=',
'%',
'&&',
r'\|\|',
'&=',
'&',
r'\|=',
r'\|',
'=',
'!',
':',
r'\?',
r'\^',
r'\bdelete\b',
r'\bin\b',
r'\binstanceof\b',
r'\bnew\b',
r'\btypeof\b',
r'\bvoid\b',
r'\.',
]
OPERATOR = re.compile('|'.join(OPERATOR_LIST))
WHITESPACE = re.compile(r'\s+')
SEMICOLON = re.compile(r';')
# Technically JavaScript identifiers can't contain '.', but we treat a set of
# nested identifiers as a single identifier, except for trailing dots.
NESTED_IDENTIFIER = r'[a-zA-Z_$]([%s]|\.[a-zA-Z_$])*' % IDENTIFIER_CHAR
IDENTIFIER = re.compile(NESTED_IDENTIFIER)
SIMPLE_LVALUE = re.compile(r"""
(?P<identifier>%s) # a valid identifier
(?=\s* # optional whitespace
\= # look ahead to equal sign
(?!=)) # not follwed by equal
""" % NESTED_IDENTIFIER, re.VERBOSE)
# A doc flag is a @ sign followed by non-space characters that appears at the
# beginning of the line, after whitespace, or after a '{'. The look-behind
# check is necessary to not match someone@google.com as a flag.
DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
# To properly parse parameter names and complex doctypes containing
# whitespace, we need to tokenize whitespace into a token after certain
# doctags. All statetracker.HAS_TYPE that are not listed here must not contain
# any whitespace in their types.
DOC_FLAG_LEX_SPACES = re.compile(
r'(^|(?<=\s))@(?P<name>%s)\b' %
'|'.join([
'const',
'enum',
'extends',
'final',
'implements',
'param',
'private',
'protected',
'public',
'return',
'type',
'typedef'
]))
DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
DOC_TYPE_BLOCK_START = re.compile(r'[<(]')
DOC_TYPE_BLOCK_END = re.compile(r'[>)]')
DOC_TYPE_MODIFIERS = re.compile(r'[!?|,:=]')
# Star followed by non-slash, i.e a star that does not end a comment.
# This is used for TYPE_GROUP below.
SAFE_STAR = r'(\*(?!/))'
COMMON_DOC_MATCHERS = [
# Find the end of the comment.
Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
JavaScriptModes.TEXT_MODE),
# Tokenize documented flags like @private.
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
# Encountering a doc flag should leave lex spaces mode.
Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
Matcher(END_BLOCK, Type.DOC_END_BRACE),
# And some more to parse types.
Matcher(DOC_TYPE_BLOCK_START, Type.DOC_TYPE_START_BLOCK),
Matcher(DOC_TYPE_BLOCK_END, Type.DOC_TYPE_END_BLOCK),
Matcher(DOC_TYPE_MODIFIERS, Type.DOC_TYPE_MODIFIER),
Matcher(DOC_COMMENT_TYPE_TEXT, Type.COMMENT),
Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.
JAVASCRIPT_DEFAULT_TYPES = {
JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
}
@classmethod
def BuildMatchers(cls):
"""Builds the token matcher group.
The token matcher groups work as follows: it is a list of Matcher objects.
The matchers will be tried in this order, and the first to match will be
returned. Hence the order is important because the matchers that come first
overrule the matchers that come later.
Returns:
The completed token matcher group.
"""
# Match a keyword string followed by a non-identifier character in order to
# not match something like doSomething as do + Something.
keyword = re.compile('(%s)((?=[^%s])|$)' % (
'|'.join(cls.KEYWORD_LIST), cls.IDENTIFIER_CHAR))
return {
# Matchers for basic text mode.
JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and
# so on)
Matcher(cls.START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE),
Matcher(cls.START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
JavaScriptModes.BLOCK_COMMENT_MODE),
Matcher(cls.END_OF_LINE_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT),
Matcher(cls.START_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT,
JavaScriptModes.LINE_COMMENT_MODE),
Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(cls.REGEX, Type.REGEX),
# Next we check for start blocks appearing outside any of the items
# above.
Matcher(cls.START_BLOCK, Type.START_BLOCK),
Matcher(cls.END_BLOCK, Type.END_BLOCK),
# Then we search for function declarations.
Matcher(cls.FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
JavaScriptModes.FUNCTION_MODE),
# Next, we convert non-function related parens to tokens.
Matcher(cls.OPENING_PAREN, Type.START_PAREN),
Matcher(cls.CLOSING_PAREN, Type.END_PAREN),
# Next, we convert brackets to tokens.
Matcher(cls.OPENING_BRACKET, Type.START_BRACKET),
Matcher(cls.CLOSING_BRACKET, Type.END_BRACKET),
# Find numbers. This has to happen before operators because
# scientific notation numbers can have + and - in them.
Matcher(cls.NUMBER, Type.NUMBER),
# Find operators and simple assignments
Matcher(cls.SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(cls.OPERATOR, Type.OPERATOR),
# Find key words and whitespace.
Matcher(keyword, Type.KEYWORD),
Matcher(cls.WHITESPACE, Type.WHITESPACE),
# Find identifiers.
Matcher(cls.IDENTIFIER, Type.IDENTIFIER),
# Finally, we convert semicolons to tokens.
Matcher(cls.SEMICOLON, Type.SEMICOLON)],
# Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(cls.SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(cls.SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(cls.DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(cls.DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment.
Matcher(cls.END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
JavaScriptModes.TEXT_MODE),
# Match non-comment-ending text..
Matcher(cls.BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: cls.COMMON_DOC_MATCHERS + [
Matcher(cls.DOC_COMMENT_TEXT, Type.COMMENT)],
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: cls.COMMON_DOC_MATCHERS + [
Matcher(cls.WHITESPACE, Type.COMMENT),
Matcher(cls.DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
# Matchers for single line comments.
JavaScriptModes.LINE_COMMENT_MODE: [
# We greedy match until the end of the line in line comment mode.
Matcher(cls.ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter
# mode, otherwise everything inside the parameter list is parsed
# incorrectly.
Matcher(cls.OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE),
Matcher(cls.WHITESPACE, Type.WHITESPACE),
Matcher(cls.IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated
# specially. Everything else is treated as lines of parameters.
Matcher(cls.CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
JavaScriptModes.TEXT_MODE),
Matcher(cls.PARAMETERS, Type.PARAMETERS,
JavaScriptModes.PARAMETER_MODE)]}
def __init__(self, parse_js_doc=True):
"""Create a tokenizer object.
Args:
parse_js_doc: Whether to do detailed parsing of javascript doc comments,
or simply treat them as normal comments. Defaults to parsing JsDoc.
"""
matchers = self.BuildMatchers()
if not parse_js_doc:
# Make a copy so the original doesn't get modified.
matchers = copy.deepcopy(matchers)
matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
JavaScriptModes.BLOCK_COMMENT_MODE]
tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
self.JAVASCRIPT_DEFAULT_TYPES)
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new JavaScriptToken object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
"""
return javascripttokens.JavaScriptToken(string, token_type, line,
line_number, values, line_number)

153
tools/closure_linter/build/lib/closure_linter/javascripttokens.py

@ -1,153 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent JavaScript tokens."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
class JavaScriptTokenType(tokens.TokenType):
"""Enumeration of JavaScript token types, and useful sets of token types."""
NUMBER = 'number'
START_SINGLE_LINE_COMMENT = '//'
START_BLOCK_COMMENT = '/*'
START_DOC_COMMENT = '/**'
END_BLOCK_COMMENT = '*/'
END_DOC_COMMENT = 'doc */'
COMMENT = 'comment'
SINGLE_QUOTE_STRING_START = "'string"
SINGLE_QUOTE_STRING_END = "string'"
DOUBLE_QUOTE_STRING_START = '"string'
DOUBLE_QUOTE_STRING_END = 'string"'
STRING_TEXT = 'string'
START_BLOCK = '{'
END_BLOCK = '}'
START_PAREN = '('
END_PAREN = ')'
START_BRACKET = '['
END_BRACKET = ']'
REGEX = '/regex/'
FUNCTION_DECLARATION = 'function(...)'
FUNCTION_NAME = 'function functionName(...)'
START_PARAMETERS = 'startparams('
PARAMETERS = 'pa,ra,ms'
END_PARAMETERS = ')endparams'
SEMICOLON = ';'
DOC_FLAG = '@flag'
DOC_INLINE_FLAG = '{@flag ...}'
DOC_START_BRACE = 'doc {'
DOC_END_BRACE = 'doc }'
DOC_PREFIX = 'comment prefix: * '
DOC_TYPE_START_BLOCK = 'Type <'
DOC_TYPE_END_BLOCK = 'Type >'
DOC_TYPE_MODIFIER = 'modifier'
SIMPLE_LVALUE = 'lvalue='
KEYWORD = 'keyword'
OPERATOR = 'operator'
IDENTIFIER = 'identifier'
STRING_TYPES = frozenset([
SINGLE_QUOTE_STRING_START, SINGLE_QUOTE_STRING_END,
DOUBLE_QUOTE_STRING_START, DOUBLE_QUOTE_STRING_END, STRING_TEXT])
COMMENT_TYPES = frozenset([
START_SINGLE_LINE_COMMENT, COMMENT,
START_BLOCK_COMMENT, START_DOC_COMMENT,
END_BLOCK_COMMENT, END_DOC_COMMENT,
DOC_START_BRACE, DOC_END_BRACE,
DOC_FLAG, DOC_INLINE_FLAG, DOC_PREFIX,
DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
FLAG_DESCRIPTION_TYPES = frozenset([
DOC_INLINE_FLAG, COMMENT, DOC_START_BRACE, DOC_END_BRACE,
DOC_TYPE_START_BLOCK, DOC_TYPE_END_BLOCK, DOC_TYPE_MODIFIER])
FLAG_ENDING_TYPES = frozenset([DOC_FLAG, END_DOC_COMMENT])
NON_CODE_TYPES = COMMENT_TYPES | frozenset([
tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
UNARY_OPERATORS = ['!', 'new', 'delete', 'typeof', 'void']
UNARY_OK_OPERATORS = ['--', '++', '-', '+'] + UNARY_OPERATORS
UNARY_POST_OPERATORS = ['--', '++']
# An expression ender is any token that can end an object - i.e. we could have
# x.y or [1, 2], or (10 + 9) or {a: 10}.
EXPRESSION_ENDER_TYPES = [tokens.TokenType.NORMAL, IDENTIFIER, NUMBER,
SIMPLE_LVALUE, END_BRACKET, END_PAREN, END_BLOCK,
SINGLE_QUOTE_STRING_END, DOUBLE_QUOTE_STRING_END]
class JavaScriptToken(tokens.Token):
"""JavaScript token subclass of Token, provides extra instance checks.
The following token types have data in attached_object:
- All JsDoc flags: a parser.JsDocFlag object.
"""
def IsKeyword(self, keyword):
"""Tests if this token is the given keyword.
Args:
keyword: The keyword to compare to.
Returns:
True if this token is a keyword token with the given name.
"""
return self.type == JavaScriptTokenType.KEYWORD and self.string == keyword
def IsOperator(self, operator):
"""Tests if this token is the given operator.
Args:
operator: The operator to compare to.
Returns:
True if this token is a operator token with the given name.
"""
return self.type == JavaScriptTokenType.OPERATOR and self.string == operator
def IsAssignment(self):
"""Tests if this token is an assignment operator.
Returns:
True if this token is an assignment operator.
"""
return (self.type == JavaScriptTokenType.OPERATOR and
self.string.endswith('=') and
self.string not in ('==', '!=', '>=', '<=', '===', '!=='))
def IsComment(self):
"""Tests if this token is any part of a comment.
Returns:
True if this token is any part of a comment.
"""
return self.type in JavaScriptTokenType.COMMENT_TYPES
def IsCode(self):
"""Tests if this token is code, as opposed to a comment or whitespace."""
return self.type not in JavaScriptTokenType.NON_CODE_TYPES
def __repr__(self):
return '<JavaScriptToken: %d, %s, "%s", %r, %r>' % (self.line_number,
self.type, self.string,
self.values,
self.metadata)

74
tools/closure_linter/build/lib/closure_linter/not_strict_test.py

@ -1,74 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')

329
tools/closure_linter/build/lib/closure_linter/requireprovidesorter.py

@ -1,329 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
The first provide token in the token stream.
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return provide_tokens[0]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
The first require token in the token stream.
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return require_tokens[0]
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token and i is not None:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Save token to rest of file. Sorted token will be inserted before this.
rest_of_file = tokens_map[strings[-1]][-1].next
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
if rest_of_file:
tokenutil.InsertTokenBefore(i, rest_of_file)
else:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in [
'goog.provide', 'goog.require', 'goog.setTestOnly']:
# These 3 identifiers are at the top of the file. So if any other
# identifier is encountered, return.
# TODO(user): Once it's decided what ordering goog.require
# should use, add 'goog.module' to the list above and implement the
# decision.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
if not token.is_deleted:
name = tokenutil.GetStringAfterToken(token)
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.GetStringAfterToken(token)
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while (previous_first_token and
previous_first_token.IsAnyType(Type.COMMENT_TYPES)):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
def GetFixedRequireString(self, token):
"""Get fixed/sorted order of goog.require statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.require.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def GetFixedProvideString(self, token):
"""Get fixed/sorted order of goog.provide statements.
Args:
token: The first token in the token stream.
Returns:
A string for correct sorted order of goog.provide.
"""
return self._GetFixedRequireOrProvideString(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def _GetFixedRequireOrProvideString(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
Returns:
A string for sorted goog.require or goog.provide statements
"""
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
sorted_strings = sorted(tokens_map.keys())
new_order = ''
for string in sorted_strings:
for i in tokens_map[string]:
new_order += i.string
if i.IsLastInLine():
new_order += '\n'
return new_order

155
tools/closure_linter/build/lib/closure_linter/requireprovidesorter_test.py

@ -1,155 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RequireProvideSorter."""
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import testutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
class RequireProvideSorterTest(googletest.TestCase):
"""Tests for RequireProvideSorter."""
def testGetFixedProvideString(self):
"""Tests that fixed string constains proper comments also."""
input_lines = [
'goog.provide(\'package.xyz\');',
'/** @suppress {extraprovide} **/',
'goog.provide(\'package.abcd\');'
]
expected_lines = [
'/** @suppress {extraprovide} **/',
'goog.provide(\'package.abcd\');',
'goog.provide(\'package.xyz\');'
]
token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
sorter = requireprovidesorter.RequireProvideSorter()
fixed_provide_string = sorter.GetFixedProvideString(token)
self.assertEquals(expected_lines, fixed_provide_string.splitlines())
def testGetFixedRequireString(self):
"""Tests that fixed string constains proper comments also."""
input_lines = [
'goog.require(\'package.xyz\');',
'/** This is needed for scope. **/',
'goog.require(\'package.abcd\');'
]
expected_lines = [
'/** This is needed for scope. **/',
'goog.require(\'package.abcd\');',
'goog.require(\'package.xyz\');'
]
token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
sorter = requireprovidesorter.RequireProvideSorter()
fixed_require_string = sorter.GetFixedRequireString(token)
self.assertEquals(expected_lines, fixed_require_string.splitlines())
def testFixRequires_removeBlankLines(self):
"""Tests that blank lines are omitted in sorted goog.require statements."""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def fixRequiresTest_withTestOnly(self, position):
"""Regression-tests sorting even with a goog.setTestOnly statement.
Args:
position: The position in the list where to insert the goog.setTestOnly
statement. Will be used to test all possible combinations for
this test.
"""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
input_lines.insert(position, 'goog.setTestOnly();')
expected_lines.insert(position, 'goog.setTestOnly();')
token = testutil.TokenizeSourceAndRunEcmaPass(input_lines)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def testFixRequires_withTestOnly(self):
"""Regression-tests sorting even after a goog.setTestOnly statement."""
# goog.setTestOnly at first line.
self.fixRequiresTest_withTestOnly(position=0)
# goog.setTestOnly after goog.provide.
self.fixRequiresTest_withTestOnly(position=1)
# goog.setTestOnly before goog.require.
self.fixRequiresTest_withTestOnly(position=2)
# goog.setTestOnly after goog.require.
self.fixRequiresTest_withTestOnly(position=4)
def _GetLines(self, token):
"""Returns an array of lines based on the specified token stream."""
lines = []
line = ''
while token:
line += token.string
if token.IsLastInLine():
lines.append(line)
line = ''
token = token.next
return lines
if __name__ == '__main__':
googletest.main()

198
tools/closure_linter/build/lib/closure_linter/runner.py

@ -1,198 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main lint function. Tokenizes file, runs passes, and feeds to checker."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = 'nnaze@google.com (Nathan Naze)'
import traceback
import gflags as flags
from closure_linter import checker
from closure_linter import ecmalintrules
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter.common import error
from closure_linter.common import htmlutil
from closure_linter.common import tokens
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'List of files with relaxed documentation checks. Will not '
'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.')
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
flags.ADOPT_module_key_flags(checker)
flags.ADOPT_module_key_flags(ecmalintrules)
flags.ADOPT_module_key_flags(error_check)
def _GetLastNonWhiteSpaceToken(start_token):
"""Get the last non-whitespace token in a token stream."""
ret_token = None
whitespace_tokens = frozenset([
tokens.TokenType.WHITESPACE, tokens.TokenType.BLANK_LINE])
for t in start_token:
if t.type not in whitespace_tokens:
ret_token = t
return ret_token
def _IsHtml(filename):
return filename.endswith('.html') or filename.endswith('.htm')
def _Tokenize(fileobj):
"""Tokenize a file.
Args:
fileobj: file-like object (or iterable lines) with the source.
Returns:
The first token in the token stream and the ending mode of the tokenizer.
"""
tokenizer = javascripttokenizer.JavaScriptTokenizer()
start_token = tokenizer.TokenizeFile(fileobj)
return start_token, tokenizer.mode
def _IsLimitedDocCheck(filename, limited_doc_files):
"""Whether this this a limited-doc file.
Args:
filename: The filename.
limited_doc_files: Iterable of strings. Suffixes of filenames that should
be limited doc check.
Returns:
Whether the file should be limited check.
"""
for limited_doc_filename in limited_doc_files:
if filename.endswith(limited_doc_filename):
return True
return False
def Run(filename, error_handler, source=None):
"""Tokenize, run passes, and check the given file.
Args:
filename: The path of the file to check
error_handler: The error handler to report errors to.
source: A file-like object with the file source. If omitted, the file will
be read from the filename path.
"""
if not source:
try:
source = open(filename)
except IOError:
error_handler.HandleFile(filename, None)
error_handler.HandleError(
error.Error(errors.FILE_NOT_FOUND, 'File not found'))
error_handler.FinishFile()
return
if _IsHtml(filename):
source_file = htmlutil.GetScriptLines(source)
else:
source_file = source
token, tokenizer_mode = _Tokenize(source_file)
error_handler.HandleFile(filename, token)
# If we did not end in the basic mode, this a failed parse.
if tokenizer_mode is not javascripttokenizer.JavaScriptModes.TEXT_MODE:
error_handler.HandleError(
error.Error(errors.FILE_IN_BLOCK,
'File ended in mode "%s".' % tokenizer_mode,
_GetLastNonWhiteSpaceToken(token)))
# Run the ECMA pass
error_token = None
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
error_token = RunMetaDataPass(token, ecma_pass, error_handler, filename)
is_limited_doc_check = (
_IsLimitedDocCheck(filename, flags.FLAGS.limited_doc_files))
_RunChecker(token, error_handler,
is_limited_doc_check,
is_html=_IsHtml(filename),
stop_token=error_token)
error_handler.FinishFile()
def RunMetaDataPass(start_token, metadata_pass, error_handler, filename=''):
"""Run a metadata pass over a token stream.
Args:
start_token: The first token in a token stream.
metadata_pass: Metadata pass to run.
error_handler: The error handler to report errors to.
filename: Filename of the source.
Returns:
The token where the error occurred (if any).
"""
try:
metadata_pass.Process(start_token)
except ecmametadatapass.ParseError, parse_err:
if flags.FLAGS.error_trace:
traceback.print_exc()
error_token = parse_err.token
error_msg = str(parse_err)
error_handler.HandleError(
error.Error(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.'
'\nError "%s"' % (error_token, error_msg)), error_token))
return error_token
except Exception: # pylint: disable=broad-except
traceback.print_exc()
error_handler.HandleError(
error.Error(
errors.FILE_DOES_NOT_PARSE,
'Internal error in %s' % filename))
def _RunChecker(start_token, error_handler,
limited_doc_checks, is_html,
stop_token=None):
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
style_checker = checker.JavaScriptStyleChecker(
state_tracker=state_tracker,
error_handler=error_handler)
style_checker.Check(start_token,
is_html=is_html,
limited_doc_checks=limited_doc_checks,
stop_token=stop_token)

101
tools/closure_linter/build/lib/closure_linter/runner_test.py

@ -1,101 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the runner module."""
__author__ = ('nnaze@google.com (Nathan Naze)')
import StringIO
import mox
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import error
from closure_linter.common import errorhandler
from closure_linter.common import tokens
class LimitedDocTest(googletest.TestCase):
def testIsLimitedDocCheck(self):
self.assertTrue(runner._IsLimitedDocCheck('foo_test.js', ['_test.js']))
self.assertFalse(runner._IsLimitedDocCheck('foo_bar.js', ['_test.js']))
self.assertTrue(runner._IsLimitedDocCheck(
'foo_moo.js', ['moo.js', 'quack.js']))
self.assertFalse(runner._IsLimitedDocCheck(
'foo_moo.js', ['woof.js', 'quack.js']))
class RunnerTest(googletest.TestCase):
def setUp(self):
self.mox = mox.Mox()
def testRunOnMissingFile(self):
mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
def ValidateError(err):
return (isinstance(err, error.Error) and
err.code is errors.FILE_NOT_FOUND and
err.token is None)
mock_error_handler.HandleFile('does_not_exist.js', None)
mock_error_handler.HandleError(mox.Func(ValidateError))
mock_error_handler.FinishFile()
self.mox.ReplayAll()
runner.Run('does_not_exist.js', mock_error_handler)
self.mox.VerifyAll()
def testBadTokenization(self):
mock_error_handler = self.mox.CreateMock(errorhandler.ErrorHandler)
def ValidateError(err):
return (isinstance(err, error.Error) and
err.code is errors.FILE_IN_BLOCK and
err.token.string == '}')
mock_error_handler.HandleFile('foo.js', mox.IsA(tokens.Token))
mock_error_handler.HandleError(mox.Func(ValidateError))
mock_error_handler.HandleError(mox.IsA(error.Error))
mock_error_handler.FinishFile()
self.mox.ReplayAll()
source = StringIO.StringIO(_BAD_TOKENIZATION_SCRIPT)
runner.Run('foo.js', mock_error_handler, source)
self.mox.VerifyAll()
_BAD_TOKENIZATION_SCRIPT = """
function foo () {
var a = 3;
var b = 2;
return b + a; /* Comment not closed
}
"""
if __name__ == '__main__':
googletest.main()

206
tools/closure_linter/build/lib/closure_linter/scopeutil.py

@ -1,206 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to match goog.scope alias statements."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import itertools
from closure_linter import ecmametadatapass
from closure_linter import tokenutil
from closure_linter.javascripttokens import JavaScriptTokenType
def IsGoogScopeBlock(context):
"""Whether the given context is a goog.scope block.
This function only checks that the block is a function block inside
a goog.scope() call.
TODO(nnaze): Implement goog.scope checks that verify the call is
in the root context and contains only a single function literal.
Args:
context: An EcmaContext of type block.
Returns:
Whether the context is a goog.scope block.
"""
if context.type != ecmametadatapass.EcmaContext.BLOCK:
return False
if not _IsFunctionLiteralBlock(context):
return False
# Check that this function is contained by a group
# of form "goog.scope(...)".
parent = context.parent
if parent and parent.type is ecmametadatapass.EcmaContext.GROUP:
last_code_token = parent.start_token.metadata.last_code
if (last_code_token and
last_code_token.type is JavaScriptTokenType.IDENTIFIER and
last_code_token.string == 'goog.scope'):
return True
return False
def _IsFunctionLiteralBlock(block_context):
"""Check if a context is a function literal block (without parameters).
Example function literal block: 'function() {}'
Args:
block_context: An EcmaContext of type block.
Returns:
Whether this context is a function literal block.
"""
previous_code_tokens_iter = itertools.ifilter(
lambda token: token not in JavaScriptTokenType.NON_CODE_TYPES,
reversed(block_context.start_token))
# Ignore the current token
next(previous_code_tokens_iter, None)
# Grab the previous three tokens and put them in correct order.
previous_code_tokens = list(itertools.islice(previous_code_tokens_iter, 3))
previous_code_tokens.reverse()
# There aren't three previous tokens.
if len(previous_code_tokens) is not 3:
return False
# Check that the previous three code tokens are "function ()"
previous_code_token_types = [token.type for token in previous_code_tokens]
if (previous_code_token_types == [
JavaScriptTokenType.FUNCTION_DECLARATION,
JavaScriptTokenType.START_PARAMETERS,
JavaScriptTokenType.END_PARAMETERS]):
return True
return False
def IsInClosurizedNamespace(symbol, closurized_namespaces):
"""Match a goog.scope alias.
Args:
symbol: An identifier like 'goog.events.Event'.
closurized_namespaces: Iterable of valid Closurized namespaces (strings).
Returns:
True if symbol is an identifier in a Closurized namespace, otherwise False.
"""
for ns in closurized_namespaces:
if symbol.startswith(ns + '.'):
return True
return False
def _GetVarAssignmentTokens(context):
"""Returns the tokens from context if it is a var assignment.
Args:
context: An EcmaContext.
Returns:
If a var assignment, the tokens contained within it w/o the trailing
semicolon.
"""
if context.type != ecmametadatapass.EcmaContext.VAR:
return
# Get the tokens in this statement.
if context.start_token and context.end_token:
statement_tokens = tokenutil.GetTokenRange(context.start_token,
context.end_token)
else:
return
# And now just those tokens that are actually code.
is_non_code_type = lambda t: t.type not in JavaScriptTokenType.NON_CODE_TYPES
code_tokens = filter(is_non_code_type, statement_tokens)
# Pop off the semicolon if present.
if code_tokens and code_tokens[-1].IsType(JavaScriptTokenType.SEMICOLON):
code_tokens.pop()
if len(code_tokens) < 4:
return
if (code_tokens[0].IsKeyword('var') and
code_tokens[1].IsType(JavaScriptTokenType.SIMPLE_LVALUE) and
code_tokens[2].IsOperator('=')):
return code_tokens
def MatchAlias(context):
"""Match an alias statement (some identifier assigned to a variable).
Example alias: var MyClass = proj.longNamespace.MyClass.
Args:
context: An EcmaContext of type EcmaContext.VAR.
Returns:
If a valid alias, returns a tuple of alias and symbol, otherwise None.
"""
code_tokens = _GetVarAssignmentTokens(context)
if code_tokens is None:
return
if all(tokenutil.IsIdentifierOrDot(t) for t in code_tokens[3:]):
# var Foo = bar.Foo;
alias, symbol = code_tokens[1], code_tokens[3]
# Mark both tokens as an alias definition to not count them as usages.
alias.metadata.is_alias_definition = True
symbol.metadata.is_alias_definition = True
return alias.string, tokenutil.GetIdentifierForToken(symbol)
def MatchModuleAlias(context):
"""Match an alias statement in a goog.module style import.
Example alias: var MyClass = goog.require('proj.longNamespace.MyClass').
Args:
context: An EcmaContext.
Returns:
If a valid alias, returns a tuple of alias and symbol, otherwise None.
"""
code_tokens = _GetVarAssignmentTokens(context)
if code_tokens is None:
return
if(code_tokens[3].IsType(JavaScriptTokenType.IDENTIFIER) and
code_tokens[3].string == 'goog.require'):
# var Foo = goog.require('bar.Foo');
alias = code_tokens[1]
symbol = tokenutil.GetStringAfterToken(code_tokens[3])
if symbol:
alias.metadata.is_alias_definition = True
return alias.string, symbol

222
tools/closure_linter/build/lib/closure_linter/scopeutil_test.py

@ -1,222 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import scopeutil
from closure_linter import testutil
def _FindContexts(start_token):
"""Depth first search of all contexts referenced by a token stream.
Includes contexts' parents, which might not be directly referenced
by any token in the stream.
Args:
start_token: First token in the token stream.
Yields:
All contexts referenced by this token stream.
"""
seen_contexts = set()
# For each token, yield the context if we haven't seen it before.
for token in start_token:
token_context = token.metadata.context
contexts = [token_context]
# Also grab all the context's ancestors.
parent = token_context.parent
while parent:
contexts.append(parent)
parent = parent.parent
# Yield each of these contexts if we've not seen them.
for context in contexts:
if context not in seen_contexts:
yield context
seen_contexts.add(context)
def _FindFirstContextOfType(token, context_type):
"""Returns the first statement context."""
for context in _FindContexts(token):
if context.type == context_type:
return context
def _ParseAssignment(script):
start_token = testutil.TokenizeSourceAndRunEcmaPass(script)
statement = _FindFirstContextOfType(
start_token, ecmametadatapass.EcmaContext.VAR)
return statement
class StatementTest(googletest.TestCase):
def assertAlias(self, expected_match, script):
statement = _ParseAssignment(script)
match = scopeutil.MatchAlias(statement)
self.assertEquals(expected_match, match)
def assertModuleAlias(self, expected_match, script):
statement = _ParseAssignment(script)
match = scopeutil.MatchModuleAlias(statement)
self.assertEquals(expected_match, match)
def testSimpleAliases(self):
self.assertAlias(
('foo', 'goog.foo'),
'var foo = goog.foo;')
self.assertAlias(
('foo', 'goog.foo'),
'var foo = goog.foo') # No semicolon
def testAliasWithComment(self):
self.assertAlias(
('Component', 'goog.ui.Component'),
'var Component = /* comment */ goog.ui.Component;')
def testMultilineAlias(self):
self.assertAlias(
('Component', 'goog.ui.Component'),
'var Component = \n goog.ui.\n Component;')
def testNonSymbolAliasVarStatements(self):
self.assertAlias(None, 'var foo = 3;')
self.assertAlias(None, 'var foo = function() {};')
self.assertAlias(None, 'var foo = bar ? baz : qux;')
def testModuleAlias(self):
self.assertModuleAlias(
('foo', 'goog.foo'),
'var foo = goog.require("goog.foo");')
self.assertModuleAlias(
None,
'var foo = goog.require(notastring);')
class ScopeBlockTest(googletest.TestCase):
@staticmethod
def _GetBlocks(source):
start_token = testutil.TokenizeSourceAndRunEcmaPass(source)
for context in _FindContexts(start_token):
if context.type is ecmametadatapass.EcmaContext.BLOCK:
yield context
def assertNoBlocks(self, script):
blocks = list(self._GetBlocks(script))
self.assertEquals([], blocks)
def testNotBlocks(self):
# Ensure these are not considered blocks.
self.assertNoBlocks('goog.scope(if{});')
self.assertNoBlocks('goog.scope(for{});')
self.assertNoBlocks('goog.scope(switch{});')
self.assertNoBlocks('goog.scope(function foo{});')
def testNonScopeBlocks(self):
blocks = list(self._GetBlocks('goog.scope(try{});'))
self.assertEquals(1, len(blocks))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
blocks = list(self._GetBlocks('goog.scope(function(a,b){});'))
self.assertEquals(1, len(blocks))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
blocks = list(self._GetBlocks('goog.scope(try{} catch(){});'))
# Two blocks: try and catch.
self.assertEquals(2, len(blocks))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
blocks = list(self._GetBlocks('goog.scope(try{} catch(){} finally {});'))
self.assertEquals(3, len(blocks))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
self.assertFalse(scopeutil.IsGoogScopeBlock(blocks.pop()))
class AliasTest(googletest.TestCase):
def setUp(self):
self.start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
def testMatchAliasStatement(self):
matches = set()
for context in _FindContexts(self.start_token):
match = scopeutil.MatchAlias(context)
if match:
matches.add(match)
self.assertEquals(
set([('bar', 'baz'),
('foo', 'this.foo_'),
('Component', 'goog.ui.Component'),
('MyClass', 'myproject.foo.MyClass'),
('NonClosurizedClass', 'aaa.bbb.NonClosurizedClass')]),
matches)
def testMatchAliasStatement_withClosurizedNamespaces(self):
closurized_namepaces = frozenset(['goog', 'myproject'])
matches = set()
for context in _FindContexts(self.start_token):
match = scopeutil.MatchAlias(context)
if match:
unused_alias, symbol = match
if scopeutil.IsInClosurizedNamespace(symbol, closurized_namepaces):
matches.add(match)
self.assertEquals(
set([('MyClass', 'myproject.foo.MyClass'),
('Component', 'goog.ui.Component')]),
matches)
_TEST_SCRIPT = """
goog.scope(function() {
var Component = goog.ui.Component; // scope alias
var MyClass = myproject.foo.MyClass; // scope alias
// Scope alias of non-Closurized namespace.
var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
var foo = this.foo_; // non-scope object property alias
var bar = baz; // variable alias
var component = new Component();
});
"""
if __name__ == '__main__':
googletest.main()

1294
tools/closure_linter/build/lib/closure_linter/statetracker.py

File diff suppressed because it is too large

123
tools/closure_linter/build/lib/closure_linter/statetracker_test.py

@ -1,123 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the statetracker module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import testutil
class _FakeDocFlag(object):
def __repr__(self):
return '@%s %s' % (self.flag_type, self.name)
class IdentifierTest(googletest.TestCase):
def testJustIdentifier(self):
a = javascripttokens.JavaScriptToken(
'abc', javascripttokens.JavaScriptTokenType.IDENTIFIER, 'abc', 1)
st = statetracker.StateTracker()
st.HandleToken(a, None)
class DocCommentTest(googletest.TestCase):
@staticmethod
def _MakeDocFlagFake(flag_type, name=None):
flag = _FakeDocFlag()
flag.flag_type = flag_type
flag.name = name
return flag
def testDocFlags(self):
comment = statetracker.DocComment(None)
a = self._MakeDocFlagFake('param', 'foo')
comment.AddFlag(a)
b = self._MakeDocFlagFake('param', '')
comment.AddFlag(b)
c = self._MakeDocFlagFake('param', 'bar')
comment.AddFlag(c)
self.assertEquals(
['foo', 'bar'],
comment.ordered_params)
self.assertEquals(
[a, b, c],
comment.GetDocFlags())
def testInvalidate(self):
comment = statetracker.DocComment(None)
self.assertFalse(comment.invalidated)
self.assertFalse(comment.IsInvalidated())
comment.Invalidate()
self.assertTrue(comment.invalidated)
self.assertTrue(comment.IsInvalidated())
def testSuppressionOnly(self):
comment = statetracker.DocComment(None)
self.assertFalse(comment.SuppressionOnly())
comment.AddFlag(self._MakeDocFlagFake('suppress'))
self.assertTrue(comment.SuppressionOnly())
comment.AddFlag(self._MakeDocFlagFake('foo'))
self.assertFalse(comment.SuppressionOnly())
def testRepr(self):
comment = statetracker.DocComment(None)
comment.AddFlag(self._MakeDocFlagFake('param', 'foo'))
comment.AddFlag(self._MakeDocFlagFake('param', 'bar'))
self.assertEquals(
'<DocComment: [\'foo\', \'bar\'], [@param foo, @param bar]>',
repr(comment))
def testDocFlagParam(self):
comment = self._ParseComment("""
/**
* @param {string} [name] Name of customer.
*/""")
flag = comment.GetFlag('param')
self.assertEquals('string', flag.type)
self.assertEquals('string', flag.jstype.ToString())
self.assertEquals('[name]', flag.name)
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
_, comments = testutil.ParseFunctionsAndComments(script)
self.assertEquals(1, len(comments))
return comments[0]
if __name__ == '__main__':
googletest.main()

67
tools/closure_linter/build/lib/closure_linter/strict_test.py

@ -1,67 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --strict.
Tests errors that can be thrown by gjslint when in strict mode.
"""
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
class StrictTest(unittest.TestCase):
"""Tests scenarios where strict generates warnings."""
def testUnclosedString(self):
"""Tests warnings are reported when nothing is disabled.
b/11450054.
"""
original = [
'bug = function() {',
' (\'foo\'\');',
'};',
'',
]
expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
errors.FILE_IN_BLOCK]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors):
"""Asserts that the error fixer corrects original to expected."""
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
if __name__ == '__main__':
googletest.main()

94
tools/closure_linter/build/lib/closure_linter/testutil.py

@ -1,94 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for testing gjslint components."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import StringIO
from closure_linter import ecmametadatapass
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
def TokenizeSource(source):
"""Convert a source into a string of tokens.
Args:
source: A source file as a string or file-like object (iterates lines).
Returns:
The first token of the resulting token stream.
"""
if isinstance(source, basestring):
source = StringIO.StringIO(source)
tokenizer = javascripttokenizer.JavaScriptTokenizer()
return tokenizer.TokenizeFile(source)
def TokenizeSourceAndRunEcmaPass(source):
"""Tokenize a source and run the EcmaMetaDataPass on it.
Args:
source: A source file as a string or file-like object (iterates lines).
Returns:
The first token of the resulting token stream.
"""
start_token = TokenizeSource(source)
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
ecma_pass.Process(start_token)
return start_token
def ParseFunctionsAndComments(source, error_handler=None):
"""Run the tokenizer and tracker and return comments and functions found.
Args:
source: A source file as a string or file-like object (iterates lines).
error_handler: An error handler.
Returns:
The functions and comments as a tuple.
"""
start_token = TokenizeSourceAndRunEcmaPass(source)
tracker = javascriptstatetracker.JavaScriptStateTracker()
if error_handler is not None:
tracker.DocFlagPass(start_token, error_handler)
functions = []
comments = []
for token in start_token:
tracker.HandleToken(token, tracker.GetLastNonSpaceToken())
function = tracker.GetFunction()
if function and function not in functions:
functions.append(function)
comment = tracker.GetDocComment()
if comment and comment not in comments:
comments.append(comment)
tracker.HandleAfterToken(token)
return functions, comments

697
tools/closure_linter/build/lib/closure_linter/tokenutil.py

@ -1,697 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Token utility functions."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
import StringIO
from closure_linter.common import tokens
from closure_linter.javascripttokens import JavaScriptToken
from closure_linter.javascripttokens import JavaScriptTokenType
# Shorthand
Type = tokens.TokenType
def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token.
Args:
token: Any token in the line.
Returns:
The first token in the same line as token.
"""
while not token.IsFirstInLine():
token = token.previous
return token
def GetFirstTokenInPreviousLine(token):
"""Returns the first token in the previous line as token.
Args:
token: Any token in the line.
Returns:
The first token in the previous line as token, or None if token is on the
first line.
"""
first_in_line = GetFirstTokenInSameLine(token)
if first_in_line.previous:
return GetFirstTokenInSameLine(first_in_line.previous)
return None
def GetLastTokenInSameLine(token):
"""Returns the last token in the same line as token.
Args:
token: Any token in the line.
Returns:
The last token in the same line as token.
"""
while not token.IsLastInLine():
token = token.next
return token
def GetAllTokensInSameLine(token):
"""Returns all tokens in the same line as the given token.
Args:
token: Any token in the line.
Returns:
All tokens on the same line as the given token.
"""
first_token = GetFirstTokenInSameLine(token)
last_token = GetLastTokenInSameLine(token)
tokens_in_line = []
while first_token != last_token:
tokens_in_line.append(first_token)
first_token = first_token.next
tokens_in_line.append(last_token)
return tokens_in_line
def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False):
"""Returns the first token where func is True within distance of this token.
Args:
start_token: The token to start searching from
func: The function to call to test a token for applicability
end_func: The function to call to test a token to determine whether to abort
the search.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token matching func within distance of this token, or None if no
such token is found.
"""
token = start_token
if reverse:
while token and (distance is None or distance > 0):
previous = token.previous
if previous:
if func(previous):
return previous
if end_func and end_func(previous):
return None
token = previous
if distance is not None:
distance -= 1
else:
while token and (distance is None or distance > 0):
next_token = token.next
if next_token:
if func(next_token):
return next_token
if end_func and end_func(next_token):
return None
token = next_token
if distance is not None:
distance -= 1
return None
def Search(start_token, token_types, distance=None, reverse=False):
"""Returns the first token of type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The allowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
None, distance, reverse)
def SearchExcept(start_token, token_types, distance=None, reverse=False):
"""Returns the first token not of any type in token_types within distance.
Args:
start_token: The token to start searching from
token_types: The unallowable types of the token being searched for
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token, or
None if no such token is found.
"""
return CustomSearch(start_token,
lambda token: not token.IsAnyType(token_types),
None, distance, reverse)
def SearchUntil(start_token, token_types, end_types, distance=None,
reverse=False):
"""Returns the first token of type in token_types before a token of end_type.
Args:
start_token: The token to start searching from.
token_types: The allowable types of the token being searched for.
end_types: Types of tokens to abort search if we find.
distance: The number of tokens to look through before failing search. Must
be positive. If unspecified, will search until the end of the token
chain
reverse: When true, search the tokens before this one instead of the tokens
after it
Returns:
The first token of any type in token_types within distance of this token
before any tokens of type in end_type, or None if no such token is found.
"""
return CustomSearch(start_token, lambda token: token.IsAnyType(token_types),
lambda token: token.IsAnyType(end_types),
distance, reverse)
def DeleteToken(token):
"""Deletes the given token from the linked list.
Args:
token: The token to delete
"""
# When deleting a token, we do not update the deleted token itself to make
# sure the previous and next pointers are still pointing to tokens which are
# not deleted. Also it is very hard to keep track of all previously deleted
# tokens to update them when their pointers become invalid. So we add this
# flag that any token linked list iteration logic can skip deleted node safely
# when its current token is deleted.
token.is_deleted = True
if token.previous:
token.previous.next = token.next
if token.next:
token.next.previous = token.previous
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next
def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token.
Args:
token: The token to start deleting at.
token_count: The total number of tokens to delete.
"""
for i in xrange(1, token_count):
DeleteToken(token.next)
DeleteToken(token)
def InsertTokenBefore(new_token, token):
"""Insert new_token before token.
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.next = token
new_token.previous = token.previous
new_token.metadata = copy.copy(token.metadata)
if new_token.IsCode():
old_last_code = token.metadata.last_code
following_token = token
while (following_token and
following_token.metadata.last_code == old_last_code):
following_token.metadata.last_code = new_token
following_token = following_token.next
token.previous = new_token
if new_token.previous:
new_token.previous.next = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index
else:
previous_token = new_token.previous
if previous_token:
new_token.start_index = (previous_token.start_index +
len(previous_token.string))
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertTokenAfter(new_token, token):
"""Insert new_token after token.
Args:
new_token: A token to be added to the stream
token: A token already in the stream
"""
new_token.previous = token
new_token.next = token.next
new_token.metadata = copy.copy(token.metadata)
if token.IsCode():
new_token.metadata.last_code = token
if new_token.IsCode():
following_token = token.next
while following_token and following_token.metadata.last_code == token:
following_token.metadata.last_code = new_token
following_token = following_token.next
token.next = new_token
if new_token.next:
new_token.next.previous = new_token
if new_token.start_index is None:
if new_token.line_number == token.line_number:
new_token.start_index = token.start_index + len(token.string)
else:
new_token.start_index = 0
iterator = new_token.next
while iterator and iterator.line_number == new_token.line_number:
iterator.start_index += len(new_token.string)
iterator = iterator.next
def InsertTokensAfter(new_tokens, token):
"""Insert multiple tokens after token.
Args:
new_tokens: An array of tokens to be added to the stream
token: A token already in the stream
"""
# TODO(user): It would be nicer to have InsertTokenAfter defer to here
# instead of vice-versa.
current_token = token
for new_token in new_tokens:
InsertTokenAfter(new_token, current_token)
current_token = new_token
def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token.
Args:
token: The token to insert a space token after
Returns:
A single space token
"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number)
InsertTokenAfter(space_token, token)
def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token.
Args:
token: The token to insert a blank line after
Returns:
A single space token
"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1)
InsertLineAfter(token, [blank_token])
def InsertLineAfter(token, new_tokens):
"""Inserts a new line consisting of new_tokens after the given token.
Args:
token: The token to insert after.
new_tokens: The tokens that will make up the new line.
"""
insert_location = token
for new_token in new_tokens:
InsertTokenAfter(new_token, insert_location)
insert_location = new_token
# Update all subsequent line numbers.
next_token = new_tokens[-1].next
while next_token:
next_token.line_number += 1
next_token = next_token.next
def SplitToken(token, position):
"""Splits the token into two tokens at position.
Args:
token: The token to split
position: The position to split at. Will be the beginning of second token.
Returns:
The new second token.
"""
new_string = token.string[position:]
token.string = token.string[:position]
new_token = JavaScriptToken(new_string, token.type, token.line,
token.line_number)
InsertTokenAfter(new_token, token)
return new_token
def Compare(token1, token2):
"""Compares two tokens and determines their relative order.
Args:
token1: The first token to compare.
token2: The second token to compare.
Returns:
A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream.
"""
if token2.line_number != token1.line_number:
return token1.line_number - token2.line_number
else:
return token1.start_index - token2.start_index
def GoogScopeOrNoneFromStartBlock(token):
"""Determines if the given START_BLOCK is part of a goog.scope statement.
Args:
token: A token of type START_BLOCK.
Returns:
The goog.scope function call token, or None if such call doesn't exist.
"""
if token.type != JavaScriptTokenType.START_BLOCK:
return None
# Search for a goog.scope statement, which will be 5 tokens before the
# block. Illustration of the tokens found prior to the start block:
# goog.scope(function() {
# 5 4 3 21 ^
maybe_goog_scope = token
for unused_i in xrange(5):
maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
maybe_goog_scope.previous else None)
if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
return maybe_goog_scope
def GetTokenRange(start_token, end_token):
"""Returns a list of tokens between the two given, inclusive.
Args:
start_token: Start token in the range.
end_token: End token in the range.
Returns:
A list of tokens, in order, from start_token to end_token (including start
and end). Returns none if the tokens do not describe a valid range.
"""
token_range = []
token = start_token
while token:
token_range.append(token)
if token == end_token:
return token_range
token = token.next
def TokensToString(token_iterable):
"""Convert a number of tokens into a string.
Newlines will be inserted whenever the line_number of two neighboring
strings differ.
Args:
token_iterable: The tokens to turn to a string.
Returns:
A string representation of the given tokens.
"""
buf = StringIO.StringIO()
token_list = list(token_iterable)
if not token_list:
return ''
line_number = token_list[0].line_number
for token in token_list:
while line_number < token.line_number:
line_number += 1
buf.write('\n')
if line_number > token.line_number:
line_number = token.line_number
buf.write('\n')
buf.write(token.string)
return buf.getvalue()
def GetPreviousCodeToken(token):
"""Returns the code token before the specified token.
Args:
token: A token.
Returns:
The code token before the specified token or None if no such token
exists.
"""
return CustomSearch(
token,
lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
reverse=True)
def GetNextCodeToken(token):
"""Returns the next code token after the specified token.
Args:
token: A token.
Returns:
The next code token after the specified token or None if no such token
exists.
"""
return CustomSearch(
token,
lambda t: t and t.type not in JavaScriptTokenType.NON_CODE_TYPES,
reverse=False)
def GetIdentifierStart(token):
"""Returns the first token in an identifier.
Given a token which is part of an identifier, returns the token at the start
of the identifier.
Args:
token: A token which is part of an identifier.
Returns:
The token at the start of the identifier or None if the identifier was not
of the form 'a.b.c' (e.g. "['a']['b'].c").
"""
start_token = token
previous_code_token = GetPreviousCodeToken(token)
while (previous_code_token and (
previous_code_token.IsType(JavaScriptTokenType.IDENTIFIER) or
IsDot(previous_code_token))):
start_token = previous_code_token
previous_code_token = GetPreviousCodeToken(previous_code_token)
if IsDot(start_token):
return None
return start_token
def GetIdentifierForToken(token):
"""Get the symbol specified by a token.
Given a token, this function additionally concatenates any parts of an
identifying symbol being identified that are split by whitespace or a
newline.
The function will return None if the token is not the first token of an
identifier.
Args:
token: The first token of a symbol.
Returns:
The whole symbol, as a string.
"""
# Search backward to determine if this token is the first token of the
# identifier. If it is not the first token, return None to signal that this
# token should be ignored.
prev_token = token.previous
while prev_token:
if (prev_token.IsType(JavaScriptTokenType.IDENTIFIER) or
IsDot(prev_token)):
return None
if (prev_token.IsType(tokens.TokenType.WHITESPACE) or
prev_token.IsAnyType(JavaScriptTokenType.COMMENT_TYPES)):
prev_token = prev_token.previous
else:
break
# A "function foo()" declaration.
if token.type is JavaScriptTokenType.FUNCTION_NAME:
return token.string
# A "var foo" declaration (if the previous token is 'var')
previous_code_token = GetPreviousCodeToken(token)
if previous_code_token and previous_code_token.IsKeyword('var'):
return token.string
# Otherwise, this is potentially a namespaced (goog.foo.bar) identifier that
# could span multiple lines or be broken up by whitespace. We need
# to concatenate.
identifier_types = set([
JavaScriptTokenType.IDENTIFIER,
JavaScriptTokenType.SIMPLE_LVALUE
])
assert token.type in identifier_types
# Start with the first token
symbol_tokens = [token]
if token.next:
for t in token.next:
last_symbol_token = symbol_tokens[-1]
# A dot is part of the previous symbol.
if IsDot(t):
symbol_tokens.append(t)
continue
# An identifier is part of the previous symbol if the previous one was a
# dot.
if t.type in identifier_types:
if IsDot(last_symbol_token):
symbol_tokens.append(t)
continue
else:
break
# Skip any whitespace
if t.type in JavaScriptTokenType.NON_CODE_TYPES:
continue
# This is the end of the identifier. Stop iterating.
break
if symbol_tokens:
return ''.join([t.string for t in symbol_tokens])
def GetStringAfterToken(token):
"""Get string after token.
Args:
token: Search will be done after this token.
Returns:
String if found after token else None (empty string will also
return None).
Search until end of string as in case of empty string Type.STRING_TEXT is not
present/found and don't want to return next string.
E.g.
a = '';
b = 'test';
When searching for string after 'a' if search is not limited by end of string
then it will return 'test' which is not desirable as there is a empty string
before that.
This will return None for cases where string is empty or no string found
as in both cases there is no Type.STRING_TEXT.
"""
string_token = SearchUntil(token, JavaScriptTokenType.STRING_TEXT,
[JavaScriptTokenType.SINGLE_QUOTE_STRING_END,
JavaScriptTokenType.DOUBLE_QUOTE_STRING_END])
if string_token:
return string_token.string
else:
return None
def IsDot(token):
"""Whether the token represents a "dot" operator (foo.bar)."""
return token.type is JavaScriptTokenType.OPERATOR and token.string == '.'
def IsIdentifierOrDot(token):
"""Whether the token is either an identifier or a '.'."""
return (token.type in [JavaScriptTokenType.IDENTIFIER,
JavaScriptTokenType.SIMPLE_LVALUE] or
IsDot(token))

297
tools/closure_linter/build/lib/closure_linter/tokenutil_test.py

@ -1,297 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
class FakeToken(object):
pass
class TokenUtilTest(googletest.TestCase):
def testGetTokenRange(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.next = b
b.next = c
c.next = d
self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
# This is an error as e does not come after a in the token chain.
self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
def testTokensToString(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.string = 'aaa'
b.string = 'bbb'
c.string = 'ccc'
d.string = 'ddd'
e.string = 'eee'
a.line_number = 5
b.line_number = 6
c.line_number = 6
d.line_number = 10
e.line_number = 11
self.assertEquals(
'aaa\nbbbccc\n\n\n\nddd\neee',
tokenutil.TokensToString([a, b, c, d, e]))
self.assertEquals(
'ddd\neee\naaa\nbbbccc',
tokenutil.TokensToString([d, e, a, b, c]),
'Neighboring tokens not in line_number order should have a newline '
'between them.')
def testGetPreviousCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
None,
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'.',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('.')).string)
def testGetNextCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'.',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
self.assertEquals(
'end1',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('.')).string)
self.assertEquals(
None,
tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
def testGetIdentifierStart(self):
tokens = testutil.TokenizeSource("""
start1 . // comment
prototype. /* another comment */
end1
['edge'][case].prototype.
end2 = function() {}
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
def testInsertTokenBefore(self):
self.AssertInsertTokenAfterBefore(False)
def testInsertTokenAfter(self):
self.AssertInsertTokenAfterBefore(True)
def AssertInsertTokenAfterBefore(self, after):
new_token = javascripttokens.JavaScriptToken(
'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
existing_token1 = javascripttokens.JavaScriptToken(
'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
existing_token1.start_index = 0
existing_token1.metadata = ecmametadatapass.EcmaMetaData()
existing_token2 = javascripttokens.JavaScriptToken(
' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
existing_token2.start_index = 3
existing_token2.metadata = ecmametadatapass.EcmaMetaData()
existing_token2.metadata.last_code = existing_token1
existing_token1.next = existing_token2
existing_token2.previous = existing_token1
if after:
tokenutil.InsertTokenAfter(new_token, existing_token1)
else:
tokenutil.InsertTokenBefore(new_token, existing_token2)
self.assertEquals(existing_token1, new_token.previous)
self.assertEquals(existing_token2, new_token.next)
self.assertEquals(new_token, existing_token1.next)
self.assertEquals(new_token, existing_token2.previous)
self.assertEquals(existing_token1, new_token.metadata.last_code)
self.assertEquals(new_token, existing_token2.metadata.last_code)
self.assertEquals(0, existing_token1.start_index)
self.assertEquals(3, new_token.start_index)
self.assertEquals(4, existing_token2.start_index)
def testGetIdentifierForToken(self):
tokens = testutil.TokenizeSource("""
start1.abc.def.prototype.
onContinuedLine
(start2.abc.def
.hij.klm
.nop)
start3.abc.def
.hij = function() {};
// An absurd multi-liner.
start4.abc.def.
hij.
klm = function() {};
start5 . aaa . bbb . ccc
shouldntBePartOfThePreviousSymbol
start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
var start7 = 42;
function start8() {
}
start9.abc. // why is there a comment here?
def /* another comment */
shouldntBePart
start10.abc // why is there a comment here?
.def /* another comment */
shouldntBePart
start11.abc. middle1.shouldNotBeIdentifier
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1.abc.def.prototype.onContinuedLine',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start2.abc.def.hij.klm.nop',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
self.assertEquals(
'start3.abc.def.hij',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
self.assertEquals(
'start4.abc.def.hij.klm',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
self.assertEquals(
'start5.aaa.bbb.ccc',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
self.assertEquals(
'start6.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
self.assertEquals(
'start7',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
self.assertEquals(
'start8',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
self.assertEquals(
'start9.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
self.assertEquals(
'start10.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
self.assertIsNone(
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
if __name__ == '__main__':
googletest.main()

401
tools/closure_linter/build/lib/closure_linter/typeannotation.py

@ -1,401 +0,0 @@
#!/usr/bin/env python
#*-* coding: utf-8
"""Closure typeannotation parsing and utilities."""
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter.common import error
# Shorthand
TYPE = javascripttokens.JavaScriptTokenType
class TypeAnnotation(object):
"""Represents a structured view of a closure type annotation.
Attribute:
identifier: The name of the type.
key_type: The name part before a colon.
sub_types: The list of sub_types used e.g. for Array.<>
or_null: The '?' annotation
not_null: The '!' annotation
type_group: If this a a grouping (a|b), but does not include function(a).
return_type: The return type of a function definition.
alias: The actual type set by closurizednamespaceinfo if the identifier uses
an alias to shorten the name.
tokens: An ordered list of tokens used for this type. May contain
TypeAnnotation instances for sub_types, key_type or return_type.
"""
IMPLICIT_TYPE_GROUP = 2
NULLABILITY_UNKNOWN = 2
# Frequently used known non-nullable types.
NON_NULLABLE = frozenset([
'boolean', 'function', 'number', 'string', 'undefined'])
# Frequently used known nullable types.
NULLABLE_TYPE_WHITELIST = frozenset([
'Array', 'Document', 'Element', 'Function', 'Node', 'NodeList',
'Object'])
def __init__(self):
self.identifier = ''
self.sub_types = []
self.or_null = False
self.not_null = False
self.type_group = False
self.alias = None
self.key_type = None
self.record_type = False
self.opt_arg = False
self.return_type = None
self.tokens = []
def IsFunction(self):
"""Determines whether this is a function definition."""
return self.identifier == 'function'
def IsConstructor(self):
"""Determines whether this is a function definition for a constructor."""
key_type = self.sub_types and self.sub_types[0].key_type
return self.IsFunction() and key_type.identifier == 'new'
def IsRecordType(self):
"""Returns True if this type is a record type."""
return (self.record_type or
bool([t for t in self.sub_types if t.IsRecordType()]))
def IsVarArgsType(self):
"""Determines if the type is a var_args type, i.e. starts with '...'."""
return self.identifier.startswith('...') or (
self.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP and
self.sub_types[0].identifier.startswith('...'))
def IsEmpty(self):
"""Returns True if the type is empty."""
return not self.tokens
def IsUnknownType(self):
"""Returns True if this is the unknown type {?}."""
return (self.or_null
and not self.identifier
and not self.sub_types
and not self.return_type)
def Append(self, item):
"""Adds a sub_type to this type and finalizes it.
Args:
item: The TypeAnnotation item to append.
"""
# item is a TypeAnnotation instance, so pylint: disable=protected-access
self.sub_types.append(item._Finalize(self))
def __repr__(self):
"""Reconstructs the type definition."""
append = ''
if self.sub_types:
separator = (',' if not self.type_group else '|')
if self.identifier == 'function':
surround = '(%s)'
else:
surround = {False: '{%s}' if self.record_type else '<%s>',
True: '(%s)',
self.IMPLICIT_TYPE_GROUP: '%s'}[self.type_group]
append = surround % separator.join([repr(t) for t in self.sub_types])
if self.return_type:
append += ':%s' % repr(self.return_type)
append += '=' if self.opt_arg else ''
prefix = '' + ('?' if self.or_null else '') + ('!' if self.not_null else '')
keyword = '%s:' % repr(self.key_type) if self.key_type else ''
return keyword + prefix + '%s' % (self.alias or self.identifier) + append
def ToString(self):
"""Concats the type's tokens to form a string again."""
ret = []
for token in self.tokens:
if not isinstance(token, TypeAnnotation):
ret.append(token.string)
else:
ret.append(token.ToString())
return ''.join(ret)
def Dump(self, indent=''):
"""Dumps this type's structure for debugging purposes."""
result = []
for t in self.tokens:
if isinstance(t, TypeAnnotation):
result.append(indent + str(t) + ' =>\n' + t.Dump(indent + ' '))
else:
result.append(indent + str(t))
return '\n'.join(result)
def IterIdentifiers(self):
"""Iterates over all identifiers in this type and its subtypes."""
if self.identifier:
yield self.identifier
for subtype in self.IterTypes():
for identifier in subtype.IterIdentifiers():
yield identifier
def IterTypeGroup(self):
"""Iterates over all types in the type group including self.
Yields:
If this is a implicit or manual type-group: all sub_types.
Otherwise: self
E.g. for @type {Foo.<Bar>} this will yield only Foo.<Bar>,
for @type {Foo|(Bar|Sample)} this will yield Foo, Bar and Sample.
"""
if self.type_group:
for sub_type in self.sub_types:
for sub_type in sub_type.IterTypeGroup():
yield sub_type
else:
yield self
def IterTypes(self):
"""Iterates over each subtype as well as return and key types."""
if self.return_type:
yield self.return_type
if self.key_type:
yield self.key_type
for sub_type in self.sub_types:
yield sub_type
def GetNullability(self, modifiers=True):
"""Computes whether the type may be null.
Args:
modifiers: Whether the modifiers ? and ! should be considered in the
evaluation.
Returns:
True if the type allows null, False if the type is strictly non nullable
and NULLABILITY_UNKNOWN if the nullability cannot be determined.
"""
# Explicitly marked nullable types or 'null' are nullable.
if (modifiers and self.or_null) or self.identifier == 'null':
return True
# Explicitly marked non-nullable types or non-nullable base types:
if ((modifiers and self.not_null) or self.record_type
or self.identifier in self.NON_NULLABLE):
return False
# A type group is nullable if any of its elements are nullable.
if self.type_group:
maybe_nullable = False
for sub_type in self.sub_types:
nullability = sub_type.GetNullability()
if nullability == self.NULLABILITY_UNKNOWN:
maybe_nullable = nullability
elif nullability:
return True
return maybe_nullable
# Whitelisted types are nullable.
if self.identifier.rstrip('.') in self.NULLABLE_TYPE_WHITELIST:
return True
# All other types are unknown (most should be nullable, but
# enums are not and typedefs might not be).
return self.NULLABILITY_UNKNOWN
def WillAlwaysBeNullable(self):
"""Computes whether the ! flag is illegal for this type.
This is the case if this type or any of the subtypes is marked as
explicitly nullable.
Returns:
True if the ! flag would be illegal.
"""
if self.or_null or self.identifier == 'null':
return True
if self.type_group:
return bool([t for t in self.sub_types if t.WillAlwaysBeNullable()])
return False
def _Finalize(self, parent):
"""Fixes some parsing issues once the TypeAnnotation is complete."""
# Normalize functions whose definition ended up in the key type because
# they defined a return type after a colon.
if self.key_type and self.key_type.identifier == 'function':
current = self.key_type
current.return_type = self
self.key_type = None
# opt_arg never refers to the return type but to the function itself.
current.opt_arg = self.opt_arg
self.opt_arg = False
return current
# If a typedef just specified the key, it will not end up in the key type.
if parent.record_type and not self.key_type:
current = TypeAnnotation()
current.key_type = self
current.tokens.append(self)
return current
return self
def FirstToken(self):
"""Returns the first token used in this type or any of its subtypes."""
first = self.tokens[0]
return first.FirstToken() if isinstance(first, TypeAnnotation) else first
def Parse(token, token_end, error_handler):
"""Parses a type annotation and returns a TypeAnnotation object."""
return TypeAnnotationParser(error_handler).Parse(token.next, token_end)
class TypeAnnotationParser(object):
"""A parser for type annotations constructing the TypeAnnotation object."""
def __init__(self, error_handler):
self._stack = []
self._error_handler = error_handler
self._closing_error = False
def Parse(self, token, token_end):
"""Parses a type annotation and returns a TypeAnnotation object."""
root = TypeAnnotation()
self._stack.append(root)
current = TypeAnnotation()
root.tokens.append(current)
while token and token != token_end:
if token.type in (TYPE.DOC_TYPE_START_BLOCK, TYPE.DOC_START_BRACE):
if token.string == '(':
if (current.identifier and
current.identifier not in ['function', '...']):
self.Error(token,
'Invalid identifier for (): "%s"' % current.identifier)
current.type_group = current.identifier != 'function'
elif token.string == '{':
current.record_type = True
current.tokens.append(token)
self._stack.append(current)
current = TypeAnnotation()
self._stack[-1].tokens.append(current)
elif token.type in (TYPE.DOC_TYPE_END_BLOCK, TYPE.DOC_END_BRACE):
prev = self._stack.pop()
prev.Append(current)
current = prev
# If an implicit type group was created, close it as well.
if prev.type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
prev = self._stack.pop()
prev.Append(current)
current = prev
current.tokens.append(token)
elif token.type == TYPE.DOC_TYPE_MODIFIER:
if token.string == '!':
current.tokens.append(token)
current.not_null = True
elif token.string == '?':
current.tokens.append(token)
current.or_null = True
elif token.string == ':':
current.tokens.append(token)
prev = current
current = TypeAnnotation()
prev.tokens.append(current)
current.key_type = prev
elif token.string == '=':
# For implicit type groups the '=' refers to the parent.
try:
if self._stack[-1].type_group == TypeAnnotation.IMPLICIT_TYPE_GROUP:
self._stack[-1].tokens.append(token)
self._stack[-1].opt_arg = True
else:
current.tokens.append(token)
current.opt_arg = True
except IndexError:
self.ClosingError(token)
elif token.string == '|':
# If a type group has explicitly been opened do a normal append.
# Otherwise we have to open the type group and move the current
# type into it, before appending
if not self._stack[-1].type_group:
type_group = TypeAnnotation()
if current.key_type and current.key_type.identifier != 'function':
type_group.key_type = current.key_type
current.key_type = None
type_group.type_group = TypeAnnotation.IMPLICIT_TYPE_GROUP
# Fix the token order
prev = self._stack[-1].tokens.pop()
self._stack[-1].tokens.append(type_group)
type_group.tokens.append(prev)
self._stack.append(type_group)
self._stack[-1].tokens.append(token)
self.Append(current, error_token=token)
current = TypeAnnotation()
self._stack[-1].tokens.append(current)
elif token.string == ',':
self.Append(current, error_token=token)
current = TypeAnnotation()
self._stack[-1].tokens.append(token)
self._stack[-1].tokens.append(current)
else:
current.tokens.append(token)
self.Error(token, 'Invalid token')
elif token.type == TYPE.COMMENT:
current.tokens.append(token)
current.identifier += token.string.strip()
elif token.type in [TYPE.DOC_PREFIX, TYPE.WHITESPACE]:
current.tokens.append(token)
else:
current.tokens.append(token)
self.Error(token, 'Unexpected token')
token = token.next
self.Append(current, error_token=token)
try:
ret = self._stack.pop()
except IndexError:
self.ClosingError(token)
# The type is screwed up, but let's return something.
return current
if self._stack and (len(self._stack) != 1 or
ret.type_group != TypeAnnotation.IMPLICIT_TYPE_GROUP):
self.Error(token, 'Too many opening items.')
return ret if len(ret.sub_types) > 1 else ret.sub_types[0]
def Append(self, type_obj, error_token):
"""Appends a new TypeAnnotation object to the current parent."""
if self._stack:
self._stack[-1].Append(type_obj)
else:
self.ClosingError(error_token)
def ClosingError(self, token):
"""Reports an error about too many closing items, but only once."""
if not self._closing_error:
self._closing_error = True
self.Error(token, 'Too many closing items.')
def Error(self, token, message):
"""Calls the error_handler to post an error message."""
if self._error_handler:
self._error_handler.HandleError(error.Error(
errors.JSDOC_DOES_NOT_PARSE,
'Error parsing jsdoc type at token "%s" (column: %d): %s' %
(token.string, token.start_index, message), token))

232
tools/closure_linter/build/lib/closure_linter/typeannotation_test.py

@ -1,232 +0,0 @@
#!/usr/bin/env python
"""Unit tests for the typeannotation module."""
import unittest as googletest
from closure_linter import testutil
from closure_linter.common import erroraccumulator
CRAZY_TYPE = ('Array.<!function(new:X,{a:null},...(c|d)):'
'function(...(Object.<string>))>')
class TypeErrorException(Exception):
"""Exception for TypeErrors."""
def __init__(self, errors):
super(TypeErrorException, self).__init__()
self.errors = errors
class TypeParserTest(googletest.TestCase):
"""Tests for typeannotation parsing."""
def _ParseComment(self, script):
"""Parse a script that contains one comment and return it."""
accumulator = erroraccumulator.ErrorAccumulator()
_, comments = testutil.ParseFunctionsAndComments(script, accumulator)
if accumulator.GetErrors():
raise TypeErrorException(accumulator.GetErrors())
self.assertEquals(1, len(comments))
return comments[0]
def _ParseType(self, type_str):
"""Creates a comment to parse and returns the parsed type."""
comment = self._ParseComment('/** @type {%s} **/' % type_str)
return comment.GetDocFlags()[0].jstype
def assertProperReconstruction(self, type_str, matching_str=None):
"""Parses the type and asserts the its repr matches the type.
If matching_str is specified, it will assert that the repr matches this
string instead.
Args:
type_str: The type string to parse.
matching_str: A string the __repr__ of the parsed type should match.
Returns:
The parsed js_type.
"""
parsed_type = self._ParseType(type_str)
# Use listEqual assertion to more easily identify the difference
self.assertListEqual(list(matching_str or type_str),
list(repr(parsed_type)))
self.assertEquals(matching_str or type_str, repr(parsed_type))
# Newlines will be inserted by the file writer.
self.assertEquals(type_str.replace('\n', ''), parsed_type.ToString())
return parsed_type
def assertNullable(self, type_str, nullable=True):
parsed_type = self.assertProperReconstruction(type_str)
self.assertEquals(nullable, parsed_type.GetNullability(),
'"%s" should %sbe nullable' %
(type_str, 'not ' if nullable else ''))
def assertNotNullable(self, type_str):
return self.assertNullable(type_str, nullable=False)
def testReconstruction(self):
self.assertProperReconstruction('*')
self.assertProperReconstruction('number')
self.assertProperReconstruction('(((number)))')
self.assertProperReconstruction('!number')
self.assertProperReconstruction('?!number')
self.assertProperReconstruction('number=')
self.assertProperReconstruction('number=!?', '?!number=')
self.assertProperReconstruction('number|?string')
self.assertProperReconstruction('(number|string)')
self.assertProperReconstruction('?(number|string)')
self.assertProperReconstruction('Object.<number,string>')
self.assertProperReconstruction('function(new:Object)')
self.assertProperReconstruction('function(new:Object):number')
self.assertProperReconstruction('function(new:Object,Element):number')
self.assertProperReconstruction('function(this:T,...)')
self.assertProperReconstruction('{a:?number}')
self.assertProperReconstruction('{a:?number,b:(number|string)}')
self.assertProperReconstruction('{c:{nested_element:*}|undefined}')
self.assertProperReconstruction('{handleEvent:function(?):?}')
self.assertProperReconstruction('function():?|null')
self.assertProperReconstruction('null|function():?|bar')
def testOptargs(self):
self.assertProperReconstruction('number=')
self.assertProperReconstruction('number|string=')
self.assertProperReconstruction('(number|string)=')
self.assertProperReconstruction('(number|string=)')
self.assertProperReconstruction('(number=|string)')
self.assertProperReconstruction('function(...):number=')
def testIndepth(self):
# Do an deeper check of the crazy identifier
crazy = self.assertProperReconstruction(CRAZY_TYPE)
self.assertEquals('Array.', crazy.identifier)
self.assertEquals(1, len(crazy.sub_types))
func1 = crazy.sub_types[0]
func2 = func1.return_type
self.assertEquals('function', func1.identifier)
self.assertEquals('function', func2.identifier)
self.assertEquals(3, len(func1.sub_types))
self.assertEquals(1, len(func2.sub_types))
self.assertEquals('Object.', func2.sub_types[0].sub_types[0].identifier)
def testIterIdentifiers(self):
nested_identifiers = self._ParseType('(a|{b:(c|function(new:d):e)})')
for identifier in ('a', 'b', 'c', 'd', 'e'):
self.assertIn(identifier, nested_identifiers.IterIdentifiers())
def testIsEmpty(self):
self.assertTrue(self._ParseType('').IsEmpty())
self.assertFalse(self._ParseType('?').IsEmpty())
self.assertFalse(self._ParseType('!').IsEmpty())
self.assertFalse(self._ParseType('<?>').IsEmpty())
def testIsConstructor(self):
self.assertFalse(self._ParseType('').IsConstructor())
self.assertFalse(self._ParseType('Array.<number>').IsConstructor())
self.assertTrue(self._ParseType('function(new:T)').IsConstructor())
def testIsVarArgsType(self):
self.assertTrue(self._ParseType('...number').IsVarArgsType())
self.assertTrue(self._ParseType('...Object|Array').IsVarArgsType())
self.assertTrue(self._ParseType('...(Object|Array)').IsVarArgsType())
self.assertFalse(self._ParseType('Object|...Array').IsVarArgsType())
self.assertFalse(self._ParseType('(...Object|Array)').IsVarArgsType())
def testIsUnknownType(self):
self.assertTrue(self._ParseType('?').IsUnknownType())
self.assertTrue(self._ParseType('Foo.<?>').sub_types[0].IsUnknownType())
self.assertFalse(self._ParseType('?|!').IsUnknownType())
self.assertTrue(self._ParseType('?|!').sub_types[0].IsUnknownType())
self.assertFalse(self._ParseType('!').IsUnknownType())
long_type = 'function():?|{handleEvent:function(?=):?,sample:?}|?='
record = self._ParseType(long_type)
# First check that there's not just one type with 3 return types, but three
# top-level types.
self.assertEquals(3, len(record.sub_types))
# Now extract all unknown type instances and verify that they really are.
handle_event, sample = record.sub_types[1].sub_types
for i, sub_type in enumerate([
record.sub_types[0].return_type,
handle_event.return_type,
handle_event.sub_types[0],
sample,
record.sub_types[2]]):
self.assertTrue(sub_type.IsUnknownType(),
'Type %d should be the unknown type: %s\n%s' % (
i, sub_type.tokens, record.Dump()))
def testTypedefNames(self):
easy = self._ParseType('{a}')
self.assertTrue(easy.record_type)
easy = self.assertProperReconstruction('{a}', '{a:}').sub_types[0]
self.assertEquals('a', easy.key_type.identifier)
self.assertEquals('', easy.identifier)
easy = self.assertProperReconstruction('{a:b}').sub_types[0]
self.assertEquals('a', easy.key_type.identifier)
self.assertEquals('b', easy.identifier)
def assertTypeError(self, type_str):
"""Asserts that parsing the given type raises a linter error."""
self.assertRaises(TypeErrorException, self._ParseType, type_str)
def testParseBadTypes(self):
"""Tests that several errors in types don't break the parser."""
self.assertTypeError('<')
self.assertTypeError('>')
self.assertTypeError('Foo.<Bar')
self.assertTypeError('Foo.Bar>=')
self.assertTypeError('Foo.<Bar>>=')
self.assertTypeError('(')
self.assertTypeError(')')
self.assertTypeError('Foo.<Bar)>')
self._ParseType(':')
self._ParseType(':foo')
self.assertTypeError(':)foo')
self.assertTypeError('(a|{b:(c|function(new:d):e')
def testNullable(self):
self.assertNullable('null')
self.assertNullable('Object')
self.assertNullable('?string')
self.assertNullable('?number')
self.assertNotNullable('string')
self.assertNotNullable('number')
self.assertNotNullable('boolean')
self.assertNotNullable('function(Object)')
self.assertNotNullable('function(Object):Object')
self.assertNotNullable('function(?Object):?Object')
self.assertNotNullable('!Object')
self.assertNotNullable('boolean|string')
self.assertNotNullable('(boolean|string)')
self.assertNullable('(boolean|string|null)')
self.assertNullable('(?boolean)')
self.assertNullable('?(boolean)')
self.assertNullable('(boolean|Object)')
self.assertNotNullable('(boolean|(string|{a:}))')
def testSpaces(self):
"""Tests that spaces don't change the outcome."""
type_str = (' A < b | ( c | ? ! d e f ) > | '
'function ( x : . . . ) : { y : z = } ')
two_spaces = type_str.replace(' ', ' ')
no_spaces = type_str.replace(' ', '')
newlines = type_str.replace(' ', '\n * ')
self.assertProperReconstruction(no_spaces)
self.assertProperReconstruction(type_str, no_spaces)
self.assertProperReconstruction(two_spaces, no_spaces)
self.assertProperReconstruction(newlines, no_spaces)
if __name__ == '__main__':
googletest.main()

10
tools/closure_linter/closure_linter.egg-info/PKG-INFO

@ -1,10 +0,0 @@
Metadata-Version: 1.0
Name: closure-linter
Version: 2.3.17
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors
Author-email: opensource@google.com
License: Apache
Description: UNKNOWN
Platform: UNKNOWN

63
tools/closure_linter/closure_linter.egg-info/SOURCES.txt

@ -1,63 +0,0 @@
README
setup.py
closure_linter/__init__.py
closure_linter/aliaspass.py
closure_linter/aliaspass_test.py
closure_linter/checker.py
closure_linter/checkerbase.py
closure_linter/closurizednamespacesinfo.py
closure_linter/closurizednamespacesinfo_test.py
closure_linter/ecmalintrules.py
closure_linter/ecmametadatapass.py
closure_linter/error_check.py
closure_linter/error_fixer.py
closure_linter/error_fixer_test.py
closure_linter/errorrecord.py
closure_linter/errorrules.py
closure_linter/errorrules_test.py
closure_linter/errors.py
closure_linter/fixjsstyle.py
closure_linter/fixjsstyle_test.py
closure_linter/full_test.py
closure_linter/gjslint.py
closure_linter/indentation.py
closure_linter/javascriptlintrules.py
closure_linter/javascriptstatetracker.py
closure_linter/javascriptstatetracker_test.py
closure_linter/javascripttokenizer.py
closure_linter/javascripttokens.py
closure_linter/not_strict_test.py
closure_linter/requireprovidesorter.py
closure_linter/requireprovidesorter_test.py
closure_linter/runner.py
closure_linter/runner_test.py
closure_linter/scopeutil.py
closure_linter/scopeutil_test.py
closure_linter/statetracker.py
closure_linter/statetracker_test.py
closure_linter/strict_test.py
closure_linter/testutil.py
closure_linter/tokenutil.py
closure_linter/tokenutil_test.py
closure_linter/typeannotation.py
closure_linter/typeannotation_test.py
closure_linter.egg-info/PKG-INFO
closure_linter.egg-info/SOURCES.txt
closure_linter.egg-info/dependency_links.txt
closure_linter.egg-info/entry_points.txt
closure_linter.egg-info/requires.txt
closure_linter.egg-info/top_level.txt
closure_linter/common/__init__.py
closure_linter/common/error.py
closure_linter/common/erroraccumulator.py
closure_linter/common/errorhandler.py
closure_linter/common/erroroutput.py
closure_linter/common/filetestcase.py
closure_linter/common/htmlutil.py
closure_linter/common/lintrunner.py
closure_linter/common/matcher.py
closure_linter/common/position.py
closure_linter/common/simplefileflags.py
closure_linter/common/tokenizer.py
closure_linter/common/tokens.py
closure_linter/common/tokens_test.py

1
tools/closure_linter/closure_linter.egg-info/dependency_links.txt

@ -1 +0,0 @@

4
tools/closure_linter/closure_linter.egg-info/entry_points.txt

@ -1,4 +0,0 @@
[console_scripts]
fixjsstyle = closure_linter.fixjsstyle:main
gjslint = closure_linter.gjslint:main

1
tools/closure_linter/closure_linter.egg-info/requires.txt

@ -1 +0,0 @@
python-gflags

1
tools/closure_linter/closure_linter.egg-info/top_level.txt

@ -1 +0,0 @@
closure_linter

16
tools/closure_linter/closure_linter/__init__.py

@ -1,16 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint."""

248
tools/closure_linter/closure_linter/aliaspass.py

@ -1,248 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pass that scans for goog.scope aliases and lint/usage errors."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter import scopeutil
from closure_linter import tokenutil
from closure_linter.common import error
# TODO(nnaze): Create a Pass interface and move this class, EcmaMetaDataPass,
# and related classes onto it.
def _GetAliasForIdentifier(identifier, alias_map):
"""Returns the aliased_symbol name for an identifier.
Example usage:
>>> alias_map = {'MyClass': 'goog.foo.MyClass'}
>>> _GetAliasForIdentifier('MyClass.prototype.action', alias_map)
'goog.foo.MyClass.prototype.action'
>>> _GetAliasForIdentifier('MyClass.prototype.action', {})
None
Args:
identifier: The identifier.
alias_map: A dictionary mapping a symbol to an alias.
Returns:
The aliased symbol name or None if not found.
"""
ns = identifier.split('.', 1)[0]
aliased_symbol = alias_map.get(ns)
if aliased_symbol:
return aliased_symbol + identifier[len(ns):]
def _SetTypeAlias(js_type, alias_map):
"""Updates the alias for identifiers in a type.
Args:
js_type: A typeannotation.TypeAnnotation instance.
alias_map: A dictionary mapping a symbol to an alias.
"""
aliased_symbol = _GetAliasForIdentifier(js_type.identifier, alias_map)
if aliased_symbol:
js_type.alias = aliased_symbol
for sub_type in js_type.IterTypes():
_SetTypeAlias(sub_type, alias_map)
class AliasPass(object):
"""Pass to identify goog.scope() usages.
Identifies goog.scope() usages and finds lint/usage errors. Notes any
aliases of symbols in Closurized namespaces (that is, reassignments
such as "var MyClass = goog.foo.MyClass;") and annotates identifiers
when they're using an alias (so they may be expanded to the full symbol
later -- that "MyClass.prototype.action" refers to
"goog.foo.MyClass.prototype.action" when expanded.).
"""
def __init__(self, closurized_namespaces=None, error_handler=None):
"""Creates a new pass.
Args:
closurized_namespaces: A set of Closurized namespaces (e.g. 'goog').
error_handler: An error handler to report lint errors to.
"""
self._error_handler = error_handler
# If we have namespaces, freeze the set.
if closurized_namespaces:
closurized_namespaces = frozenset(closurized_namespaces)
self._closurized_namespaces = closurized_namespaces
def Process(self, start_token):
"""Runs the pass on a token stream.
Args:
start_token: The first token in the stream.
"""
if start_token is None:
return
# TODO(nnaze): Add more goog.scope usage checks.
self._CheckGoogScopeCalls(start_token)
# If we have closurized namespaces, identify aliased identifiers.
if self._closurized_namespaces:
context = start_token.metadata.context
root_context = context.GetRoot()
self._ProcessRootContext(root_context)
def _CheckGoogScopeCalls(self, start_token):
"""Check goog.scope calls for lint/usage errors."""
def IsScopeToken(token):
return (token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER and
token.string == 'goog.scope')
# Find all the goog.scope tokens in the file
scope_tokens = [t for t in start_token if IsScopeToken(t)]
for token in scope_tokens:
scope_context = token.metadata.context
if not (scope_context.type == ecmametadatapass.EcmaContext.STATEMENT and
scope_context.parent.type == ecmametadatapass.EcmaContext.ROOT):
self._MaybeReportError(
error.Error(errors.INVALID_USE_OF_GOOG_SCOPE,
'goog.scope call not in global scope', token))
# There should be only one goog.scope reference. Register errors for
# every instance after the first.
for token in scope_tokens[1:]:
self._MaybeReportError(
error.Error(errors.EXTRA_GOOG_SCOPE_USAGE,
'More than one goog.scope call in file.', token))
def _MaybeReportError(self, err):
"""Report an error to the handler (if registered)."""
if self._error_handler:
self._error_handler.HandleError(err)
@classmethod
def _YieldAllContexts(cls, context):
"""Yields all contexts that are contained by the given context."""
yield context
for child_context in context.children:
for descendent_child in cls._YieldAllContexts(child_context):
yield descendent_child
@staticmethod
def _IsTokenInParentBlock(token, parent_block):
"""Determines whether the given token is contained by the given block.
Args:
token: A token
parent_block: An EcmaContext.
Returns:
Whether the token is in a context that is or is a child of the given
parent_block context.
"""
context = token.metadata.context
while context:
if context is parent_block:
return True
context = context.parent
return False
def _ProcessRootContext(self, root_context):
"""Processes all goog.scope blocks under the root context."""
assert root_context.type is ecmametadatapass.EcmaContext.ROOT
# Process aliases in statements in the root scope for goog.module-style
# aliases.
global_alias_map = {}
for context in root_context.children:
if context.type == ecmametadatapass.EcmaContext.STATEMENT:
for statement_child in context.children:
if statement_child.type == ecmametadatapass.EcmaContext.VAR:
match = scopeutil.MatchModuleAlias(statement_child)
if match:
# goog.require aliases cannot use further aliases, the symbol is
# the second part of match, directly.
symbol = match[1]
if scopeutil.IsInClosurizedNamespace(symbol,
self._closurized_namespaces):
global_alias_map[match[0]] = symbol
# Process each block to find aliases.
for context in root_context.children:
self._ProcessBlock(context, global_alias_map)
def _ProcessBlock(self, context, global_alias_map):
"""Scans a goog.scope block to find aliases and mark alias tokens."""
alias_map = global_alias_map.copy()
# Iterate over every token in the context. Each token points to one
# context, but multiple tokens may point to the same context. We only want
# to check each context once, so keep track of those we've seen.
seen_contexts = set()
token = context.start_token
while token and self._IsTokenInParentBlock(token, context):
token_context = token.metadata.context if token.metadata else None
# Check to see if this token is an alias.
if token_context and token_context not in seen_contexts:
seen_contexts.add(token_context)
# If this is a alias statement in the goog.scope block.
if (token_context.type == ecmametadatapass.EcmaContext.VAR and
scopeutil.IsGoogScopeBlock(token_context.parent.parent)):
match = scopeutil.MatchAlias(token_context)
# If this is an alias, remember it in the map.
if match:
alias, symbol = match
symbol = _GetAliasForIdentifier(symbol, alias_map) or symbol
if scopeutil.IsInClosurizedNamespace(symbol,
self._closurized_namespaces):
alias_map[alias] = symbol
# If this token is an identifier that matches an alias,
# mark the token as an alias to the original symbol.
if (token.type is javascripttokens.JavaScriptTokenType.SIMPLE_LVALUE or
token.type is javascripttokens.JavaScriptTokenType.IDENTIFIER):
identifier = tokenutil.GetIdentifierForToken(token)
if identifier:
aliased_symbol = _GetAliasForIdentifier(identifier, alias_map)
if aliased_symbol:
token.metadata.aliased_symbol = aliased_symbol
elif token.type == javascripttokens.JavaScriptTokenType.DOC_FLAG:
flag = token.attached_object
if flag and flag.HasType() and flag.jstype:
_SetTypeAlias(flag.jstype, alias_map)
token = token.next # Get next token

191
tools/closure_linter/closure_linter/aliaspass_test.py

@ -1,191 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the aliaspass module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('nnaze@google.com (Nathan Naze)')
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import testutil
from closure_linter.common import erroraccumulator
def _GetTokenByLineAndString(start_token, string, line_number):
for token in start_token:
if token.line_number == line_number and token.string == string:
return token
class AliasPassTest(googletest.TestCase):
def testInvalidGoogScopeCall(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(1, len(alias_errors))
alias_error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, alias_error.code)
self.assertEquals('goog.scope', alias_error.token.string)
def testAliasedIdentifiers(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
alias_pass.Process(start_token)
alias_token = _GetTokenByLineAndString(start_token, 'Event', 4)
self.assertTrue(alias_token.metadata.is_alias_definition)
my_class_token = _GetTokenByLineAndString(start_token, 'myClass', 9)
self.assertIsNone(my_class_token.metadata.aliased_symbol)
component_token = _GetTokenByLineAndString(start_token, 'Component', 17)
self.assertEquals('goog.ui.Component',
component_token.metadata.aliased_symbol)
event_token = _GetTokenByLineAndString(start_token, 'Event.Something', 17)
self.assertEquals('goog.events.Event.Something',
event_token.metadata.aliased_symbol)
non_closurized_token = _GetTokenByLineAndString(
start_token, 'NonClosurizedClass', 18)
self.assertIsNone(non_closurized_token.metadata.aliased_symbol)
long_start_token = _GetTokenByLineAndString(start_token, 'Event', 24)
self.assertEquals('goog.events.Event.MultilineIdentifier.someMethod',
long_start_token.metadata.aliased_symbol)
def testAliasedDoctypes(self):
"""Tests that aliases are correctly expanded within type annotations."""
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_ALIAS_SCRIPT)
tracker = javascriptstatetracker.JavaScriptStateTracker()
tracker.DocFlagPass(start_token, error_handler=None)
alias_pass = aliaspass.AliasPass(set(['goog', 'myproject']))
alias_pass.Process(start_token)
flag_token = _GetTokenByLineAndString(start_token, '@type', 22)
self.assertEquals(
'goog.events.Event.<goog.ui.Component,Array<myproject.foo.MyClass>>',
repr(flag_token.attached_object.jstype))
def testModuleAlias(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass("""
goog.module('goog.test');
var Alias = goog.require('goog.Alias');
Alias.use();
""")
alias_pass = aliaspass.AliasPass(set(['goog']))
alias_pass.Process(start_token)
alias_token = _GetTokenByLineAndString(start_token, 'Alias', 3)
self.assertTrue(alias_token.metadata.is_alias_definition)
def testMultipleGoogScopeCalls(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(
_TEST_MULTIPLE_SCOPE_SCRIPT)
error_accumulator = erroraccumulator.ErrorAccumulator()
alias_pass = aliaspass.AliasPass(
set(['goog', 'myproject']),
error_handler=error_accumulator)
alias_pass.Process(start_token)
alias_errors = error_accumulator.GetErrors()
self.assertEquals(3, len(alias_errors))
error = alias_errors[0]
self.assertEquals(errors.INVALID_USE_OF_GOOG_SCOPE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[1]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(7, error.token.line_number)
error = alias_errors[2]
self.assertEquals(errors.EXTRA_GOOG_SCOPE_USAGE, error.code)
self.assertEquals(11, error.token.line_number)
_TEST_ALIAS_SCRIPT = """
goog.scope(function() {
var events = goog.events; // scope alias
var Event = events.
Event; // nested multiline scope alias
// This should not be registered as an aliased identifier because
// it appears before the alias.
var myClass = new MyClass();
var Component = goog.ui.Component; // scope alias
var MyClass = myproject.foo.MyClass; // scope alias
// Scope alias of non-Closurized namespace.
var NonClosurizedClass = aaa.bbb.NonClosurizedClass;
var component = new Component(Event.Something);
var nonClosurized = NonClosurizedClass();
/**
* A created namespace with a really long identifier.
* @type {events.Event.<Component,Array<MyClass>}
*/
Event.
MultilineIdentifier.
someMethod = function() {};
});
"""
_TEST_SCOPE_SCRIPT = """
function foo () {
// This goog.scope call is invalid.
goog.scope(function() {
});
}
"""
_TEST_MULTIPLE_SCOPE_SCRIPT = """
goog.scope(function() {
// do nothing
});
function foo() {
var test = goog.scope; // We should not see goog.scope mentioned.
}
// This goog.scope invalid. There can be only one.
goog.scope(function() {
});
"""
if __name__ == '__main__':
googletest.main()

108
tools/closure_linter/closure_linter/checker.py

@ -1,108 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking JS files for common style guide violations."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import gflags as flags
from closure_linter import aliaspass
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptlintrules
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
def __init__(self, state_tracker, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
state_tracker: State tracker.
error_handler: Error handler to pass all errors to.
"""
self._namespaces_info = None
self._alias_pass = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
self._alias_pass = aliaspass.AliasPass(
flags.FLAGS.closurized_namespaces, error_handler)
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=state_tracker)
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
start_token: The first token in the token stream.
limited_doc_checks: Whether to perform limited checks.
is_html: Whether this token stream is HTML.
stop_token: If given, checks should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
self._state_tracker.DocFlagPass(start_token, self._error_handler)
if self._alias_pass:
self._alias_pass.Process(start_token)
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info:
self._namespaces_info.Reset()
self._ExecutePass(start_token, self._DependencyPass, stop_token)
self._ExecutePass(start_token, self._LintPass, stop_token)
# If we have a stop_token, we didn't end up reading the whole file and,
# thus, don't call Finalize to do end-of-file checks.
if not stop_token:
self._lint_rules.Finalize(self._state_tracker)
def _DependencyPass(self, token):
"""Processes an individual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)

192
tools/closure_linter/closure_linter/checkerbase.py

@ -1,192 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for writing checkers that operate on tokens."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
from closure_linter import errorrules
from closure_linter.common import error
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
def __init__(self):
self.__checker = None
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initializes to prepare to check a file.
Args:
checker: Class to report errors to.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file is an HTML file with extracted contents.
"""
self.__checker = checker
self._limited_doc_checks = limited_doc_checks
self._is_html = is_html
def _HandleError(self, code, message, token, position=None,
fix_data=None):
"""Call the HandleError function for the checker we are associated with."""
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
def _SetLimitedDocChecks(self, limited_doc_checks):
"""Sets whether doc checking is relaxed for this file.
Args:
limited_doc_checks: Whether doc checking is relaxed for this file.
"""
self._limited_doc_checks = limited_doc_checks
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration.
parser_state: Object that indicates the parser state in the page.
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method CheckToken not implemented')
def Finalize(self, parser_state):
"""Perform all checks that need to occur after all lines are processed.
Args:
parser_state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method Finalize not implemented')
class CheckerBase(object):
"""This class handles checking a LintRules object against a file."""
def __init__(self, error_handler, lint_rules, state_tracker):
"""Initialize a checker object.
Args:
error_handler: Object that handles errors.
lint_rules: LintRules object defining lint errors given a token
and state_tracker object.
state_tracker: Object that tracks the current state in the token stream.
"""
self._error_handler = error_handler
self._lint_rules = lint_rules
self._state_tracker = state_tracker
self._has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
"""Prints out the given error message including a line number.
Args:
code: The error code.
message: The error to print.
token: The token where the error occurred, or None if it was a file-wide
issue.
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
self._has_errors = True
self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
"""Returns true if the style checker has found any errors.
Returns:
True if the style checker has found any errors.
"""
return self._has_errors
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream, reporting errors to the error reporter.
Args:
start_token: First token in token stream.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file being checked is an HTML file with extracted
contents.
stop_token: If given, check should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
self._ExecutePass(start_token, self._LintPass, stop_token=stop_token)
self._lint_rules.Finalize(self._state_tracker)
def _LintPass(self, token):
"""Checks an individual token for lint warnings/errors.
Used to encapsulate the logic needed to check an individual token so that it
can be passed to _ExecutePass.
Args:
token: The token to check.
"""
self._lint_rules.CheckToken(token, self._state_tracker)
def _ExecutePass(self, token, pass_function, stop_token=None):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
stop_token: The last token to check (if given).
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token:
# When we are looking at a token and decided to delete the whole line, we
# will delete all of them in the "HandleToken()" below. So the current
# token and subsequent ones may already be deleted here. The way we
# delete a token does not wipe out the previous and next pointers of the
# deleted token. So we need to check the token itself to make sure it is
# not deleted.
if not token.is_deleted:
# End the pass at the stop token
if stop_token and token is stop_token:
return
self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
token = token.next

578
tools/closure_linter/closure_linter/closurizednamespacesinfo.py

@ -1,578 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
import re
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class UsedNamespace(object):
"""A type for information about a used namespace."""
def __init__(self, namespace, identifier, token, alias_definition):
"""Initializes the instance.
Args:
namespace: the namespace of an identifier used in the file
identifier: the complete identifier
token: the token that uses the namespace
alias_definition: a boolean stating whether the namespace is only to used
for an alias definition and should not be required.
"""
self.namespace = namespace
self.identifier = identifier
self.token = token
self.alias_definition = alias_definition
def GetLine(self):
return self.token.line_number
def __repr__(self):
return 'UsedNamespace(%s)' % ', '.join(
['%s=%s' % (k, repr(v)) for k, v in self.__dict__.iteritems()])
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file, the second is the identifier itself and the third is
# the line number where it's created.
self._created_namespaces = []
# A list of UsedNamespace instances.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return set(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return set(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
if self.GetClosurizedNamespace(namespace) is None:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier, _ in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
if self.GetClosurizedNamespace(namespace) is None:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for ns in self._used_namespaces:
if (not ns.alias_definition and (
namespace == ns.namespace or namespace == ns.identifier)):
return False
return True
def GetMissingProvides(self):
"""Returns the dict of missing provided namespaces for the current file.
Returns:
Returns a dictionary of key as string and value as integer where each
string(key) is a namespace that should be provided by this file, but is
not and integer(value) is first line number where it's defined.
"""
missing_provides = dict()
for namespace, identifier, line_number in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces and
namespace not in missing_provides):
missing_provides[namespace] = line_number
return missing_provides
def GetMissingRequires(self):
"""Returns the dict of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a dictionary of key as string and value integer where each
string(key) is a namespace that should be required by this file, but is
not and integer(value) is first line number where it's used.
"""
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
# goog.module is treated as a builtin, too (for goog.module.get).
external_dependencies.add('goog.module')
created_identifiers = set()
for unused_namespace, identifier, unused_line_number in (
self._created_namespaces):
created_identifiers.add(identifier)
missing_requires = dict()
illegal_alias_statements = dict()
def ShouldRequireNamespace(namespace, identifier):
"""Checks if a namespace would normally be required."""
return (
not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers and
namespace not in missing_requires)
# First check all the used identifiers where we know that their namespace
# needs to be provided (unless they are optional).
for ns in self._used_namespaces:
namespace = ns.namespace
identifier = ns.identifier
if (not ns.alias_definition and
ShouldRequireNamespace(namespace, identifier)):
missing_requires[namespace] = ns.GetLine()
# Now that all required namespaces are known, we can check if the alias
# definitions (that are likely being used for typeannotations that don't
# need explicit goog.require statements) are already covered. If not
# the user shouldn't use the alias.
for ns in self._used_namespaces:
if (not ns.alias_definition or
not ShouldRequireNamespace(ns.namespace, ns.identifier)):
continue
if self._FindNamespace(ns.identifier, self._provided_namespaces,
created_identifiers, external_dependencies,
missing_requires):
continue
namespace = ns.identifier.rsplit('.', 1)[0]
illegal_alias_statements[namespace] = ns.token
return missing_requires, illegal_alias_statements
def _FindNamespace(self, identifier, *namespaces_list):
"""Finds the namespace of an identifier given a list of other namespaces.
Args:
identifier: An identifier whose parent needs to be defined.
e.g. for goog.bar.foo we search something that provides
goog.bar.
*namespaces_list: var args of iterables of namespace identifiers
Returns:
The namespace that the given identifier is part of or None.
"""
identifier = identifier.rsplit('.', 1)[0]
identifier_prefix = identifier + '.'
for namespaces in namespaces_list:
for namespace in namespaces:
if namespace == identifier or namespace.startswith(identifier_prefix):
return namespace
return None
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifier is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = tokenutil.GetIdentifierForToken(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
if self._HasSuppression(state_tracker, 'extraRequire'):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace, token)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
if self._HasSuppression(state_tracker, 'extraProvide'):
self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.scope':
self._scopified_file = True
elif token.string == 'goog.setTestOnly':
# Since the message is optional, we don't want to scan to later lines.
for t in tokenutil.GetAllTokensInSameLine(token):
if t.type == TokenType.STRING_TEXT:
message = t.string
if re.match(r'^\w+(\.\w+)+$', message):
# This looks like a namespace. If it's a Closurized namespace,
# consider it created.
base_namespace = message.split('.', 1)[0]
if base_namespace in self._closurized_namespaces:
self._AddCreatedNamespace(state_tracker, message,
token.line_number)
break
else:
jsdoc = state_tracker.GetDocComment()
if token.metadata and token.metadata.aliased_symbol:
whole_identifier_string = token.metadata.aliased_symbol
elif (token.string == 'goog.module.get' and
not self._HasSuppression(state_tracker, 'extraRequire')):
# Cannot use _AddUsedNamespace as this is not an identifier, but
# already the entire namespace that's required.
namespace = tokenutil.GetStringAfterToken(token)
namespace = UsedNamespace(namespace, namespace, token,
alias_definition=False)
self._used_namespaces.append(namespace)
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
token.line_number,
namespace=self.GetClosurizedNamespace(
whole_identifier_string))
else:
is_alias_definition = (token.metadata and
token.metadata.is_alias_definition)
self._AddUsedNamespace(state_tracker, whole_identifier_string,
token, is_alias_definition)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
start_token = tokenutil.GetIdentifierStart(token)
if start_token and start_token != token:
# Multi-line identifier being assigned. Get the whole identifier.
identifier = tokenutil.GetIdentifierForToken(start_token)
else:
start_token = token
# If an alias is defined on the start_token, use it instead.
if (start_token and
start_token.metadata and
start_token.metadata.aliased_symbol and
not start_token.metadata.is_alias_definition):
identifier = start_token.metadata.aliased_symbol
if identifier:
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier, token)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier,
token.line_number, namespace=namespace)
elif token.type == TokenType.DOC_FLAG:
flag = token.attached_object
flag_type = flag.flag_type
if flag and flag.HasType() and flag.jstype:
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends'
and is_interface):
identifier = flag.jstype.alias or flag.jstype.identifier
self._AddUsedNamespace(state_tracker, identifier, token)
# Since we process doctypes only for implements and extends, the
# type is a simple one and we don't need any iteration for subtypes.
def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
line_number: Line number where namespace is created.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
if self._HasSuppression(state_tracker, 'missingProvide'):
return
self._created_namespaces.append([namespace, identifier, line_number])
def _AddUsedNamespace(self, state_tracker, identifier, token,
is_alias_definition=False):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
token: The token in which the namespace is used.
is_alias_definition: If the used namespace is part of an alias_definition.
Aliased symbols need their parent namespace to be available, if it is
not yet required through another symbol, an error will be thrown.
"""
if self._HasSuppression(state_tracker, 'missingRequire'):
return
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
namespace = UsedNamespace(namespace, identifier, token,
is_alias_definition)
self._used_namespaces.append(namespace)
def _HasSuppression(self, state_tracker, suppression):
jsdoc = state_tracker.GetDocComment()
return jsdoc and suppression in jsdoc.suppressions
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None

873
tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py

@ -1,873 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
def _ToLineDict(illegal_alias_stmts):
"""Replaces tokens with the respective line number."""
return {k: v.line_number for k, v in illegal_alias_stmts.iteritems()}
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_notCreatedMultipartClosurizedNamespace(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'multi.part.namespace.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['multi.part'])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notUsedMultiPartClosurizedNamespace(self):
"""Tests unused require with multi-part closurized namespaces."""
input_lines = ['goog.require(\'multi.part.namespace.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['multi.part'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['goog'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(
input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals(1, len(missing_provides))
missing_provide = missing_provides.popitem()
self.assertEquals('package.Foo', missing_provide[0])
self.assertEquals(1, missing_provide[1])
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredNamespace(self):
"""Tests that required namespaces satisfy the namespace."""
input_lines = [
'goog.require(\'package.soy.fooTemplate\');',
'render(package.soy.fooTemplate);'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_req = missing_requires.popitem()
self.assertEquals('package.Foo', missing_req[0])
self.assertEquals(1, missing_req[1])
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();',
'package.Foo.anotherMethodName1();',
'package.Foo.anotherMethodName2();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_require = missing_requires.popitem()
self.assertEquals('package.Foo', missing_require[0])
# Make sure line number of first occurrence is reported
self.assertEquals(2, missing_require[1])
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(0, len(missing_requires))
def testGetMissingRequires_implements(self):
"""Tests that a parametrized type requires the correct identifier."""
input_lines = [
'/** @constructor @implements {package.Bar<T>} */',
'package.Foo = function();',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertItemsEqual({'package.Bar': 1}, missing_requires)
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires),
'The whole class, not the object, should be required.')
def testGetMissingRequires_variableWithSameName(self):
"""Tests that we should not goog.require variables and parameters.
b/5362203 Variables in scope are not missing namespaces.
"""
input_lines = [
'goog.provide(\'Foo\');',
'Foo.A = function();',
'Foo.A.prototype.method = function(ab) {',
' if (ab) {',
' var docs;',
' var lvalue = new Obj();',
' // Variable in scope hence not goog.require here.',
' docs.foo.abc = 1;',
' lvalue.next();',
' }',
' // Since js is function scope this should also not goog.require.',
' docs.foo.func();',
' // Its not a variable in scope hence goog.require.',
' dummy.xyz.reset();',
' return this.method2();',
'};',
'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
' // Parameter hence not goog.require.',
' docs.nodes.length = 2;',
' lvalue.abc.reset();',
'};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
'docs',
'lvalue',
'dummy'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals(2, len(missing_requires))
self.assertItemsEqual(
{'dummy.xyz': 14,
'lvalue.abc': 20}, missing_requires)
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = testutil.TokenizeSource(input_lines)
self.assertEquals('package.Foo.veryLong.identifier',
tokenutil.GetIdentifierForToken(token))
self.assertEquals(None,
tokenutil.GetIdentifierForToken(token.next))
def testScopified(self):
"""Tests that a goog.scope call is noticed."""
input_lines = [
'goog.scope(function() {',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertTrue(namespaces_info._scopified_file)
def testScope_unusedAlias(self):
"""Tests that an unused alias symbol is illegal."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_usedMultilevelAlias(self):
"""Tests that an used alias symbol in a deep namespace is ok."""
input_lines = [
'goog.require(\'goog.Events\');',
'goog.scope(function() {',
'var Event = goog.Events.DeepNamespace.Event;',
'Event();',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_usedAlias(self):
"""Tests that aliased symbols result in correct requires."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'var dom = goog.dom;',
'Event(dom.classes.get);',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, illegal_alias_stmts)
self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
missing_requires)
def testModule_alias(self):
"""Tests that goog.module style aliases are supported."""
input_lines = [
'goog.module(\'test.module\');',
'var Unused = goog.require(\'goog.Unused\');',
'var AliasedClass = goog.require(\'goog.AliasedClass\');',
'var x = new AliasedClass();',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
namespaceToken = self._GetRequireTokens('goog.AliasedClass')
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
'AliasedClass should be marked as used')
unusedToken = self._GetRequireTokens('goog.Unused')
self.assertTrue(namespaces_info.IsExtraRequire(unusedToken),
'Unused should be marked as not used')
def testModule_aliasInScope(self):
"""Tests that goog.module style aliases are supported."""
input_lines = [
'goog.module(\'test.module\');',
'var AliasedClass = goog.require(\'goog.AliasedClass\');',
'goog.scope(function() {',
'var x = new AliasedClass();',
'});',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
namespaceToken = self._GetRequireTokens('goog.AliasedClass')
self.assertFalse(namespaces_info.IsExtraRequire(namespaceToken),
'AliasedClass should be marked as used')
def testModule_getAlwaysProvided(self):
"""Tests that goog.module.get is recognized as a built-in."""
input_lines = [
'goog.provide(\'test.MyClass\');',
'goog.require(\'goog.someModule\');',
'goog.scope(function() {',
'var someModule = goog.module.get(\'goog.someModule\');',
'test.MyClass = function() {};',
'});',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertEquals({}, namespaces_info.GetMissingRequires()[0])
def testModule_requireForGet(self):
"""Tests that goog.module.get needs a goog.require call."""
input_lines = [
'goog.provide(\'test.MyClass\');',
'function foo() {',
' var someModule = goog.module.get(\'goog.someModule\');',
' someModule.doSth();',
'}',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertEquals({'goog.someModule': 3},
namespaces_info.GetMissingRequires()[0])
def testScope_usedTypeAlias(self):
"""Tests aliased symbols in type annotations."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'/** @type {Event} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_partialAlias_typeOnly(self):
"""Tests a partial alias only used in type annotations.
In this example, some goog.events namespace would need to be required
so that evaluating goog.events.bar doesn't throw an error.
"""
input_lines = [
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Foo} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_partialAlias(self):
"""Tests a partial alias in conjunction with a type annotation.
In this example, the partial alias is already defined by another type,
therefore the doc-only type doesn't need to be required.
"""
input_lines = [
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({'goog.events.bar.EventType': 4}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_partialAliasRequires(self):
"""Tests partial aliases with correct requires."""
input_lines = [
'goog.require(\'goog.events.bar.EventType\');',
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_partialAliasRequiresBoth(self):
"""Tests partial aliases with correct requires."""
input_lines = [
'goog.require(\'goog.events.bar.Event\');',
'goog.require(\'goog.events.bar.EventType\');',
'goog.scope(function() {',
'var bar = goog.events.bar;',
'/** @type {bar.Event} */;',
'bar.EventType();'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
event_token = self._GetRequireTokens('goog.events.bar.Event')
self.assertTrue(namespaces_info.IsExtraRequire(event_token))
def testScope_partialAliasNoSubtypeRequires(self):
"""Tests that partial aliases don't yield subtype requires (regression)."""
input_lines = [
'goog.provide(\'goog.events.Foo\');',
'goog.scope(function() {',
'goog.events.Foo = {};',
'var Foo = goog.events.Foo;'
'Foo.CssName_ = {};'
'var CssName_ = Foo.CssName_;'
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, _ = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
def testScope_aliasNamespace(self):
"""Tests that an unused alias namespace is not required when available.
In the example goog.events.Bar is not required, because the namespace
goog.events is already defined because goog.events.Foo is required.
"""
input_lines = [
'goog.require(\'goog.events.Foo\');',
'goog.scope(function() {',
'var Bar = goog.events.Bar;',
'/** @type {Bar} */;',
'goog.events.Foo;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({}, illegal_alias_stmts)
def testScope_aliasNamespaceIllegal(self):
"""Tests that an unused alias namespace is not required when available."""
input_lines = [
'goog.scope(function() {',
'var Bar = goog.events.Bar;',
'/** @type {Bar} */;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
self.assertEquals({'goog.events': 2}, _ToLineDict(illegal_alias_stmts))
def testScope_provides(self):
"""Tests that aliased symbols result in correct provides."""
input_lines = [
'goog.scope(function() {',
'goog.bar = {};',
'var bar = goog.bar;',
'bar.Foo = {};',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
_, illegal_alias_stmts = namespaces_info.GetMissingRequires()
self.assertEquals({}, illegal_alias_stmts)
def testSetTestOnlyNamespaces(self):
"""Tests that a namespace in setTestOnly makes it a valid provide."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'goog.foo.barTest\');'
], ['goog'])
token = self._GetProvideTokens('goog.foo.barTest')
self.assertFalse(namespaces_info.IsExtraProvide(token))
token = self._GetProvideTokens('goog.foo.bazTest')
self.assertTrue(namespaces_info.IsExtraProvide(token))
def testSetTestOnlyComment(self):
"""Ensure a comment in setTestOnly does not cause a created namespace."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'this is a comment\');'
], ['goog'])
self.assertEquals(
[], namespaces_info._created_namespaces,
'A comment in setTestOnly should not modify created namespaces.')
def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
_, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
script, closurized_namespaces)
return namespaces_info
def _GetStartTokenAndNamespacesInfoForScript(
self, script, closurized_namespaces):
token = testutil.TokenizeSource(script)
return token, self._GetInitializedNamespacesInfo(
token, closurized_namespaces, [])
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
ecma_pass.Process(token)
state_tracker.DocFlagPass(token, error_handler=None)
alias_pass = aliaspass.AliasPass(closurized_namespaces)
alias_pass.Process(token)
while token:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
namespaces_info.ProcessToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
if __name__ == '__main__':
googletest.main()

16
tools/closure_linter/closure_linter/common/__init__.py

@ -1,16 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint.common."""

65
tools/closure_linter/closure_linter/common/error.py

@ -1,65 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error object commonly used in linters."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Error(object):
"""Object representing a style error."""
def __init__(self, code, message, token=None, position=None, fix_data=None):
"""Initialize the error object.
Args:
code: The numeric error code.
message: The error message string.
token: The tokens.Token where the error occurred.
position: The position of the error within the token.
fix_data: Data to be used in autofixing. Codes with fix_data are:
GOOG_REQUIRES_NOT_ALPHABETIZED - List of string value tokens that are
class names in goog.requires calls.
"""
self.code = code
self.message = message
self.token = token
self.position = position
if token:
self.start_index = token.start_index
else:
self.start_index = 0
self.fix_data = fix_data
if self.position:
self.start_index += self.position.start
def Compare(a, b):
"""Compare two error objects, by source code order.
Args:
a: First error object.
b: Second error object.
Returns:
A Negative/0/Positive number when a is before/the same as/after b.
"""
line_diff = a.token.line_number - b.token.line_number
if line_diff:
return line_diff
return a.start_index - b.start_index
Compare = staticmethod(Compare)

46
tools/closure_linter/closure_linter/common/erroraccumulator.py

@ -1,46 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that accumulates an array of errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import errorhandler
class ErrorAccumulator(errorhandler.ErrorHandler):
"""Error handler object that accumulates errors in a list."""
def __init__(self):
self._errors = []
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
self._errors.append(error)
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""
return self._errors

61
tools/closure_linter/closure_linter/common/errorhandler.py

@ -1,61 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for a linter error handler.
Error handlers aggregate a set of errors from multiple files and can optionally
perform some action based on the reported errors, for example, logging the error
or automatically fixing it.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class ErrorHandler(object):
"""Error handler interface."""
def __init__(self):
if self.__class__ == ErrorHandler:
raise NotImplementedError('class ErrorHandler is abstract')
def HandleFile(self, filename, first_token):
"""Notifies this ErrorHandler that subsequent errors are in filename.
Args:
filename: The file being linted.
first_token: The first token of the file.
"""
def HandleError(self, error):
"""Append the error to the list.
Args:
error: The error object
"""
def FinishFile(self):
"""Finishes handling the current file.
Should be called after all errors in a file have been handled.
"""
def GetErrors(self):
"""Returns the accumulated errors.
Returns:
A sequence of errors.
"""

52
tools/closure_linter/closure_linter/common/erroroutput.py

@ -1,52 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to format errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)')
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def GetErrorOutput(error, new_error=False):
"""Get a output line for an error in regular format."""
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
error_message = error.message
if new_error:
error_message = 'New Error ' + error_message
return '%s%s: %s' % (line, code, error.message)

115
tools/closure_linter/closure_linter/common/filetestcase.py

@ -1,115 +0,0 @@
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test case that runs a checker on a file, matching errors against annotations.
Runs the given checker on the given file, accumulating all errors. The list
of errors is then matched against those annotated in the file. Based heavily
on devtools/javascript/gpylint/full_test.py.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import re
import gflags as flags
import unittest as googletest
from closure_linter.common import erroraccumulator
class AnnotatedFileTestCase(googletest.TestCase):
"""Test case to run a linter against a single file."""
# Matches an all caps letters + underscores error identifer
_MESSAGE = {'msg': '[A-Z][A-Z_]+'}
# Matches a //, followed by an optional line number with a +/-, followed by a
# list of message IDs. Used to extract expected messages from testdata files.
# TODO(robbyw): Generalize to use different commenting patterns.
_EXPECTED_RE = re.compile(r'\s*//\s*(?:(?P<line>[+-]?[0-9]+):)?'
r'\s*(?P<msgs>%(msg)s(?:,\s*%(msg)s)*)' % _MESSAGE)
def __init__(self, filename, lint_callable, converter):
"""Create a single file lint test case.
Args:
filename: Filename to test.
lint_callable: Callable that lints a file. This is usually runner.Run().
converter: Function taking an error string and returning an error code.
"""
googletest.TestCase.__init__(self, 'runTest')
self._filename = filename
self._messages = []
self._lint_callable = lint_callable
self._converter = converter
def setUp(self):
flags.FLAGS.dot_on_next_line = True
def tearDown(self):
flags.FLAGS.dot_on_next_line = False
def shortDescription(self):
"""Provides a description for the test."""
return 'Run linter on %s' % self._filename
def runTest(self):
"""Runs the test."""
try:
filename = self._filename
stream = open(filename)
except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex))
expected = self._GetExpectedMessages(stream)
got = self._ProcessFileAndGetMessages(filename)
self.assertEqual(expected, got)
def _GetExpectedMessages(self, stream):
"""Parse a file and get a sorted list of expected messages."""
messages = []
for i, line in enumerate(stream):
match = self._EXPECTED_RE.search(line)
if match:
line = match.group('line')
msg_ids = match.group('msgs')
if line is None:
line = i + 1
elif line.startswith('+') or line.startswith('-'):
line = i + 1 + int(line)
else:
line = int(line)
for msg_id in msg_ids.split(','):
# Ignore a spurious message from the license preamble.
if msg_id != 'WITHOUT':
messages.append((line, self._converter(msg_id.strip())))
stream.seek(0)
messages.sort()
return messages
def _ProcessFileAndGetMessages(self, filename):
"""Trap gjslint's output parse it to get messages added."""
error_accumulator = erroraccumulator.ErrorAccumulator()
self._lint_callable(filename, error_accumulator)
errors = error_accumulator.GetErrors()
# Convert to expected tuple format.
error_msgs = [(error.token.line_number, error.code) for error in errors]
error_msgs.sort()
return error_msgs

170
tools/closure_linter/closure_linter/common/htmlutil.py

@ -1,170 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with HTML."""
__author__ = ('robbyw@google.com (Robert Walker)')
import cStringIO
import formatter
import htmllib
import HTMLParser
import re
class ScriptExtractor(htmllib.HTMLParser):
"""Subclass of HTMLParser that extracts script contents from an HTML file.
Also inserts appropriate blank lines so that line numbers in the extracted
code match the line numbers in the original HTML.
"""
def __init__(self):
"""Initialize a ScriptExtractor."""
htmllib.HTMLParser.__init__(self, formatter.NullFormatter())
self._in_script = False
self._text = ''
def start_script(self, attrs):
"""Internal handler for the start of a script tag.
Args:
attrs: The attributes of the script tag, as a list of tuples.
"""
for attribute in attrs:
if attribute[0].lower() == 'src':
# Skip script tags with a src specified.
return
self._in_script = True
def end_script(self):
"""Internal handler for the end of a script tag."""
self._in_script = False
def handle_data(self, data):
"""Internal handler for character data.
Args:
data: The character data from the HTML file.
"""
if self._in_script:
# If the last line contains whitespace only, i.e. is just there to
# properly align a </script> tag, strip the whitespace.
if data.rstrip(' \t') != data.rstrip(' \t\n\r\f'):
data = data.rstrip(' \t')
self._text += data
else:
self._AppendNewlines(data)
def handle_comment(self, data):
"""Internal handler for HTML comments.
Args:
data: The text of the comment.
"""
self._AppendNewlines(data)
def _AppendNewlines(self, data):
"""Count the number of newlines in the given string and append them.
This ensures line numbers are correct for reported errors.
Args:
data: The data to count newlines in.
"""
# We append 'x' to both sides of the string to ensure that splitlines
# gives us an accurate count.
for i in xrange(len(('x' + data + 'x').splitlines()) - 1):
self._text += '\n'
def GetScriptLines(self):
"""Return the extracted script lines.
Returns:
The extracted script lines as a list of strings.
"""
return self._text.splitlines()
def GetScriptLines(f):
"""Extract script tag contents from the given HTML file.
Args:
f: The HTML file.
Returns:
Lines in the HTML file that are from script tags.
"""
extractor = ScriptExtractor()
# The HTML parser chokes on text like Array.<!string>, so we patch
# that bug by replacing the < with &lt; - escaping all text inside script
# tags would be better but it's a bit of a catch 22.
contents = f.read()
contents = re.sub(r'<([^\s\w/])',
lambda x: '&lt;%s' % x.group(1),
contents)
extractor.feed(contents)
extractor.close()
return extractor.GetScriptLines()
def StripTags(str):
"""Returns the string with HTML tags stripped.
Args:
str: An html string.
Returns:
The html string with all tags stripped. If there was a parse error, returns
the text successfully parsed so far.
"""
# Brute force approach to stripping as much HTML as possible. If there is a
# parsing error, don't strip text before parse error position, and continue
# trying from there.
final_text = ''
finished = False
while not finished:
try:
strip = _HtmlStripper()
strip.feed(str)
strip.close()
str = strip.get_output()
final_text += str
finished = True
except HTMLParser.HTMLParseError, e:
final_text += str[:e.offset]
str = str[e.offset + 1:]
return final_text
class _HtmlStripper(HTMLParser.HTMLParser):
"""Simple class to strip tags from HTML.
Does so by doing nothing when encountering tags, and appending character data
to a buffer when that is encountered.
"""
def __init__(self):
self.reset()
self.__output = cStringIO.StringIO()
def handle_data(self, d):
self.__output.write(d)
def get_output(self):
return self.__output.getvalue()

39
tools/closure_linter/closure_linter/common/lintrunner.py

@ -1,39 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for a lint running wrapper."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class LintRunner(object):
"""Interface for a lint running wrapper."""
def __init__(self):
if self.__class__ == LintRunner:
raise NotImplementedError('class LintRunner is abstract')
def Run(self, filenames, error_handler):
"""Run a linter on the given filenames.
Args:
filenames: The filenames to check
error_handler: An ErrorHandler object
Returns:
The error handler, which may have been used to collect error info.
"""

60
tools/closure_linter/closure_linter/common/matcher.py

@ -1,60 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript matcher classes."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import position
from closure_linter.common import tokens
# Shorthand
Token = tokens.Token
Position = position.Position
class Matcher(object):
"""A token matcher.
Specifies a pattern to match, the type of token it represents, what mode the
token changes to, and what mode the token applies to.
Modes allow more advanced grammars to be incorporated, and are also necessary
to tokenize line by line. We can have different patterns apply to different
modes - i.e. looking for documentation while in comment mode.
Attributes:
regex: The regular expression representing this matcher.
type: The type of token indicated by a successful match.
result_mode: The mode to move to after a successful match.
"""
def __init__(self, regex, token_type, result_mode=None, line_start=False):
"""Create a new matcher template.
Args:
regex: The regular expression to match.
token_type: The type of token a successful match indicates.
result_mode: What mode to change to after a successful match. Defaults to
None, which means to not change the current mode.
line_start: Whether this matcher should only match string at the start
of a line.
"""
self.regex = regex
self.type = token_type
self.result_mode = result_mode
self.line_start = line_start

126
tools/closure_linter/closure_linter/common/position.py

@ -1,126 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent positions within strings."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Position(object):
"""Object representing a segment of a string.
Attributes:
start: The index in to the string where the segment starts.
length: The length of the string segment.
"""
def __init__(self, start, length):
"""Initialize the position object.
Args:
start: The start index.
length: The number of characters to include.
"""
self.start = start
self.length = length
def Get(self, string):
"""Returns this range of the given string.
Args:
string: The string to slice.
Returns:
The string within the range specified by this object.
"""
return string[self.start:self.start + self.length]
def Set(self, target, source):
"""Sets this range within the target string to the source string.
Args:
target: The target string.
source: The source string.
Returns:
The resulting string
"""
return target[:self.start] + source + target[self.start + self.length:]
def AtEnd(string):
"""Create a Position representing the end of the given string.
Args:
string: The string to represent the end of.
Returns:
The created Position object.
"""
return Position(len(string), 0)
AtEnd = staticmethod(AtEnd)
def IsAtEnd(self, string):
"""Returns whether this position is at the end of the given string.
Args:
string: The string to test for the end of.
Returns:
Whether this position is at the end of the given string.
"""
return self.start == len(string) and self.length == 0
def AtBeginning():
"""Create a Position representing the beginning of any string.
Returns:
The created Position object.
"""
return Position(0, 0)
AtBeginning = staticmethod(AtBeginning)
def IsAtBeginning(self):
"""Returns whether this position is at the beginning of any string.
Returns:
Whether this position is at the beginning of any string.
"""
return self.start == 0 and self.length == 0
def All(string):
"""Create a Position representing the entire string.
Args:
string: The string to represent the entirety of.
Returns:
The created Position object.
"""
return Position(0, len(string))
All = staticmethod(All)
def Index(index):
"""Returns a Position object for the specified index.
Args:
index: The index to select, inclusively.
Returns:
The created Position object.
"""
return Position(index, 1)
Index = staticmethod(Index)

190
tools/closure_linter/closure_linter/common/simplefileflags.py

@ -1,190 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Determines the list of files to be checked from command line arguments."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import glob
import os
import re
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_multistring(
'recurse',
None,
'Recurse in to the subdirectories of the given path',
short_name='r')
flags.DEFINE_list(
'exclude_directories',
('_demos'),
'Exclude the specified directories (only applicable along with -r or '
'--presubmit)',
short_name='e')
flags.DEFINE_list(
'exclude_files',
('deps.js'),
'Exclude the specified files',
short_name='x')
def MatchesSuffixes(filename, suffixes):
"""Returns whether the given filename matches one of the given suffixes.
Args:
filename: Filename to check.
suffixes: Sequence of suffixes to check.
Returns:
Whether the given filename matches one of the given suffixes.
"""
suffix = filename[filename.rfind('.'):]
return suffix in suffixes
def _GetUserSpecifiedFiles(argv, suffixes):
"""Returns files to be linted, specified directly on the command line.
Can handle the '*' wildcard in filenames, but no other wildcards.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type being checked.
Returns:
A sequence of files to be linted.
"""
files = argv[1:] or []
all_files = []
lint_files = []
# Perform any necessary globs.
for f in files:
if f.find('*') != -1:
for result in glob.glob(f):
all_files.append(result)
else:
all_files.append(f)
for f in all_files:
if MatchesSuffixes(f, suffixes):
lint_files.append(f)
return lint_files
def _GetRecursiveFiles(suffixes):
"""Returns files to be checked specified by the --recurse flag.
Args:
suffixes: Expected suffixes for the file type being checked.
Returns:
A list of files to be checked.
"""
lint_files = []
# Perform any request recursion
if FLAGS.recurse:
for start in FLAGS.recurse:
for root, subdirs, files in os.walk(start):
for f in files:
if MatchesSuffixes(f, suffixes):
lint_files.append(os.path.join(root, f))
return lint_files
def GetAllSpecifiedFiles(argv, suffixes):
"""Returns all files specified by the user on the commandline.
Args:
argv: Sequence of command line arguments. The second and following arguments
are assumed to be files that should be linted.
suffixes: Expected suffixes for the file type
Returns:
A list of all files specified directly or indirectly (via flags) on the
command line by the user.
"""
files = _GetUserSpecifiedFiles(argv, suffixes)
if FLAGS.recurse:
files += _GetRecursiveFiles(suffixes)
return FilterFiles(files)
def FilterFiles(files):
"""Filters the list of files to be linted be removing any excluded files.
Filters out files excluded using --exclude_files and --exclude_directories.
Args:
files: Sequence of files that needs filtering.
Returns:
Filtered list of files to be linted.
"""
num_files = len(files)
ignore_dirs_regexs = []
for ignore in FLAGS.exclude_directories:
ignore_dirs_regexs.append(re.compile(r'(^|[\\/])%s[\\/]' % ignore))
result_files = []
for f in files:
add_file = True
for exclude in FLAGS.exclude_files:
if f.endswith('/' + exclude) or f == exclude:
add_file = False
break
for ignore in ignore_dirs_regexs:
if ignore.search(f):
# Break out of ignore loop so we don't add to
# filtered files.
add_file = False
break
if add_file:
# Convert everything to absolute paths so we can easily remove duplicates
# using a set.
result_files.append(os.path.abspath(f))
skipped = num_files - len(result_files)
if skipped:
print 'Skipping %d file(s).' % skipped
return set(result_files)
def GetFileList(argv, file_type, suffixes):
"""Parse the flags and return the list of files to check.
Args:
argv: Sequence of command line arguments.
suffixes: Sequence of acceptable suffixes for the file type.
Returns:
The list of files to check.
"""
return sorted(GetAllSpecifiedFiles(argv, suffixes))
def IsEmptyArgumentList(argv):
return not (len(argv[1:]) or FLAGS.recurse)

185
tools/closure_linter/closure_linter/common/tokenizer.py

@ -1,185 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based lexer."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
# Shorthand
Type = tokens.TokenType
class Tokenizer(object):
"""General purpose tokenizer.
Attributes:
mode: The latest mode of the tokenizer. This allows patterns to distinguish
if they are mid-comment, mid-parameter list, etc.
matchers: Dictionary of modes to sequences of matchers that define the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
def __init__(self, starting_mode, matchers, default_types):
"""Initialize the tokenizer.
Args:
starting_mode: Mode to start in.
matchers: Dictionary of modes to sequences of matchers that defines the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
self.__starting_mode = starting_mode
self.matchers = matchers
self.default_types = default_types
def TokenizeFile(self, file):
"""Tokenizes the given file.
Args:
file: An iterable that yields one line of the file at a time.
Returns:
The first token in the file
"""
# The current mode.
self.mode = self.__starting_mode
# The first token in the stream.
self.__first_token = None
# The last token added to the token stream.
self.__last_token = None
# The current line number.
self.__line_number = 0
for line in file:
self.__line_number += 1
self.__TokenizeLine(line)
return self.__first_token
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new Token object (or subclass).
Args:
string: The string of input the token represents.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
Returns:
The newly created Token object.
"""
return tokens.Token(string, token_type, line, line_number, values,
line_number)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
Args:
line: The contents of the line.
"""
string = line.rstrip('\n\r\f')
line_number = self.__line_number
self.__start_index = 0
if not string:
self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
return
normal_token = ''
index = 0
while index < len(string):
for matcher in self.matchers[self.mode]:
if matcher.line_start and index > 0:
continue
match = matcher.regex.match(string, index)
if match:
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line,
line_number))
normal_token = ''
# Add the match.
self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
line_number, match.groupdict()))
# Change the mode to the correct one for after this match.
self.mode = matcher.result_mode or self.mode
# Shorten the string to be matched.
index = match.end()
break
else:
# If the for loop finishes naturally (i.e. no matches) we just add the
# first character to the string of consecutive non match characters.
# These will constitute a NORMAL token.
if string:
normal_token += string[index:index + 1]
index += 1
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line, line_number))
def __CreateNormalToken(self, mode, string, line, line_number):
"""Creates a normal token.
Args:
mode: The current mode.
string: The string to tokenize.
line: The line of text.
line_number: The line number within the file.
Returns:
A Token object, of the default type for the current mode.
"""
type = Type.NORMAL
if mode in self.default_types:
type = self.default_types[mode]
return self._CreateToken(string, type, line, line_number)
def __AddToken(self, token):
"""Add the given token to the token stream.
Args:
token: The token to add.
"""
# Store the first token, or point the previous token to this one.
if not self.__first_token:
self.__first_token = token
else:
self.__last_token.next = token
# Establish the doubly linked list
token.previous = self.__last_token
self.__last_token = token
# Compute the character indices
token.start_index = self.__start_index
self.__start_index += token.length

145
tools/closure_linter/closure_linter/common/tokens.py

@ -1,145 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent tokens and positions within them."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class TokenType(object):
"""Token types common to all languages."""
NORMAL = 'normal'
WHITESPACE = 'whitespace'
BLANK_LINE = 'blank line'
class Token(object):
"""Token class for intelligent text splitting.
The token class represents a string of characters and an identifying type.
Attributes:
type: The type of token.
string: The characters the token comprises.
length: The length of the token.
line: The text of the line the token is found in.
line_number: The number of the line the token is found in.
values: Dictionary of values returned from the tokens regex match.
previous: The token before this one.
next: The token after this one.
start_index: The character index in the line where this token starts.
attached_object: Object containing more information about this token.
metadata: Object containing metadata about this token. Must be added by
a separate metadata pass.
"""
def __init__(self, string, token_type, line, line_number, values=None,
orig_line_number=None):
"""Creates a new Token object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
orig_line_number: The line number of the original file this token comes
from. This should be only set during the tokenization process. For newly
created error fix tokens after that, it should be None.
"""
self.type = token_type
self.string = string
self.length = len(string)
self.line = line
self.line_number = line_number
self.orig_line_number = orig_line_number
self.values = values
self.is_deleted = False
# These parts can only be computed when the file is fully tokenized
self.previous = None
self.next = None
self.start_index = None
# This part is set in statetracker.py
# TODO(robbyw): Wrap this in to metadata
self.attached_object = None
# This part is set in *metadatapass.py
self.metadata = None
def IsFirstInLine(self):
"""Tests if this token is the first token in its line.
Returns:
Whether the token is the first token in its line.
"""
return not self.previous or self.previous.line_number != self.line_number
def IsLastInLine(self):
"""Tests if this token is the last token in its line.
Returns:
Whether the token is the last token in its line.
"""
return not self.next or self.next.line_number != self.line_number
def IsType(self, token_type):
"""Tests if this token is of the given type.
Args:
token_type: The type to test for.
Returns:
True if the type of this token matches the type passed in.
"""
return self.type == token_type
def IsAnyType(self, *token_types):
"""Tests if this token is any of the given types.
Args:
token_types: The types to check. Also accepts a single array.
Returns:
True if the type of this token is any of the types passed in.
"""
if not isinstance(token_types[0], basestring):
return self.type in token_types[0]
else:
return self.type in token_types
def __repr__(self):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
def __iter__(self):
"""Returns a token iterator."""
node = self
while node:
yield node
node = node.next
def __reversed__(self):
"""Returns a reverse-direction token iterator."""
node = self
while node:
yield node
node = node.previous

113
tools/closure_linter/closure_linter/common/tokens_test.py

@ -1,113 +0,0 @@
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'nnaze@google.com (Nathan Naze)'
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()

844
tools/closure_linter/closure_linter/ecmalintrules.py

@ -1,844 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
import gflags as flags
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(user): When flipping this to True, remove logic from unit tests
# that overrides this flag.
flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
'placed on the next line for wrapped expressions')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(
['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@fileoverview', '@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
# Dots are acceptable places to wrap (may be tokenized as identifiers).
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max_parts = 1
if '@param' in parts:
max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
> max_parts):
self._HandleError(
errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token, js_type):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
js_type: The flag's typeannotation.TypeAnnotation instance.
"""
if not js_type: return
if js_type.type_group and len(js_type.sub_types) == 2:
identifiers = [t.identifier for t in js_type.sub_types]
if 'null' in identifiers:
# Don't warn if the identifier is a template type (e.g. {TYPE|null}.
if not identifiers[0].isupper() and not identifiers[1].isupper():
self._HandleError(
errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
# TODO(user): We should report an error for wrong usage of '?' and '|'
# e.g. {?number|string|null} etc.
for sub_type in js_type.IterTypes():
self._CheckJsDocType(token, sub_type)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
position=Position.AtBeginning())
def _CheckOperator(self, token):
"""Checks an operator for spacing and line style.
Args:
token: The operator token.
"""
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
last_code.line_number == token.line_number):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
token.previous, position=Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
not tokenutil.IsDot(token) and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
position=Position.AtBeginning())
# Check wrapping of operators.
next_code = tokenutil.GetNextCodeToken(token)
is_dot = tokenutil.IsDot(token)
wrapped_before = last_code and last_code.line_number != token.line_number
wrapped_after = next_code and next_code.line_number != token.line_number
if FLAGS.dot_on_next_line and is_dot and wrapped_after:
self._HandleError(
errors.LINE_ENDS_WITH_DOT,
'"." must go on the following line',
token)
if (not is_dot and wrapped_before and
not token.metadata.IsUnaryOperator()):
self._HandleError(
errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator must go on previous line "%s"' % token.string,
token)
def _IsLabel(self, token):
# A ':' token is considered part of a label if it occurs in a case
# statement, a plain label, or an object literal, i.e. is not part of a
# ternary.
return (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT))
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
if tokenutil.IsDot(token):
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if self._IsLabel(token):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
token_type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, position=Position(0, space_count))
elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif token_type == Type.END_BLOCK:
last_code = token.metadata.last_code
if state.InFunction() and state.IsFunctionClose():
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(
errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, position=Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called or used by a dot operator.
if (state.InAssignedFunction() and token.next
and token.next.type != Type.SEMICOLON):
next_token = tokenutil.GetNextCodeToken(token)
is_immediately_used = (next_token.type == Type.START_PAREN or
tokenutil.IsDot(next_token))
if not is_immediately_used:
self._HandleError(
errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, position=Position.AtEnd(token.string))
if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
and last_code.metadata.context.type != Context.OBJECT_LITERAL):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, position=Position.All(token.next.string))
elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(
last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, position=Position.All(token.string))
elif token_type == Type.START_PAREN:
# Ensure that opening parentheses have a space before any keyword
# that is not being invoked like a member function.
if (token.previous and token.previous.type == Type.KEYWORD and
(not token.previous.metadata or
not token.previous.metadata.last_code or
not token.previous.metadata.last_code.string or
token.previous.metadata.last_code.string[-1:] != '.')):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
# Ensure that there is no extra space before a function invocation,
# even if the function being invoked happens to be a keyword.
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER or
(before_space.type == Type.KEYWORD and before_space.metadata and
before_space.metadata.last_code and
before_space.metadata.last_code.string and
before_space.metadata.last_code.string[-1:] == '.')):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, position=Position.All(token.previous.string))
elif token_type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous,
position=Position.All(token.previous.string))
elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
position=Position(1, len(token.string) - 1))
elif token_type == Type.OPERATOR:
self._CheckOperator(token)
elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(
errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.jstype.IterIdentifiers():
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(
errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type, token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after email address',
token.next,
position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.HasType():
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.jstype and not flag.jstype.IsEmpty():
self._CheckJsDocType(token, flag.jstype)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(
errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(
errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
token_type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(
errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
('underscore' not in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(
errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(
errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(
errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(
errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
if index != -1 and identifier.find('.', index + 11) == -1:
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(
errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(
errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(
errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (
Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(
errors.EXTRA_SPACE, 'Extra space at end of line', token,
position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, position=Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
token.previous.type not in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed.
Args:
state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (
state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False

574
tools/closure_linter/closure_linter/ecmametadatapass.py

@ -1,574 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata pass for annotating tokens in EcmaScript files."""
__author__ = ('robbyw@google.com (Robert Walker)')
from closure_linter import javascripttokens
from closure_linter import tokenutil
TokenType = javascripttokens.JavaScriptTokenType
class ParseError(Exception):
"""Exception indicating a parse error at the given token.
Attributes:
token: The token where the parse error occurred.
"""
def __init__(self, token, message=None):
"""Initialize a parse error at the given token with an optional message.
Args:
token: The token where the parse error occurred.
message: A message describing the parse error.
"""
Exception.__init__(self, message)
self.token = token
class EcmaContext(object):
"""Context object for EcmaScript languages.
Attributes:
type: The context type.
start_token: The token where this context starts.
end_token: The token where this context ends.
parent: The parent context.
"""
# The root context.
ROOT = 'root'
# A block of code.
BLOCK = 'block'
# A pseudo-block of code for a given case or default section.
CASE_BLOCK = 'case_block'
# Block of statements in a for loop's parentheses.
FOR_GROUP_BLOCK = 'for_block'
# An implied block of code for 1 line if, while, and for statements
IMPLIED_BLOCK = 'implied_block'
# An index in to an array or object.
INDEX = 'index'
# An array literal in [].
ARRAY_LITERAL = 'array_literal'
# An object literal in {}.
OBJECT_LITERAL = 'object_literal'
# An individual element in an array or object literal.
LITERAL_ELEMENT = 'literal_element'
# The portion of a ternary statement between ? and :
TERNARY_TRUE = 'ternary_true'
# The portion of a ternary statment after :
TERNARY_FALSE = 'ternary_false'
# The entire switch statment. This will contain a GROUP with the variable
# and a BLOCK with the code.
# Since that BLOCK is not a normal block, it can not contain statements except
# for case and default.
SWITCH = 'switch'
# A normal comment.
COMMENT = 'comment'
# A JsDoc comment.
DOC = 'doc'
# An individual statement.
STATEMENT = 'statement'
# Code within parentheses.
GROUP = 'group'
# Parameter names in a function declaration.
PARAMETERS = 'parameters'
# A set of variable declarations appearing after the 'var' keyword.
VAR = 'var'
# Context types that are blocks.
BLOCK_TYPES = frozenset([
ROOT, BLOCK, CASE_BLOCK, FOR_GROUP_BLOCK, IMPLIED_BLOCK])
def __init__(self, context_type, start_token, parent=None):
"""Initializes the context object.
Args:
context_type: The context type.
start_token: The token where this context starts.
parent: The parent context.
Attributes:
type: The context type.
start_token: The token where this context starts.
end_token: The token where this context ends.
parent: The parent context.
children: The child contexts of this context, in order.
"""
self.type = context_type
self.start_token = start_token
self.end_token = None
self.parent = None
self.children = []
if parent:
parent.AddChild(self)
def __repr__(self):
"""Returns a string representation of the context object."""
stack = []
context = self
while context:
stack.append(context.type)
context = context.parent
return 'Context(%s)' % ' > '.join(stack)
def AddChild(self, child):
"""Adds a child to this context and sets child's parent to this context.
Args:
child: A child EcmaContext. The child's parent will be set to this
context.
"""
child.parent = self
self.children.append(child)
self.children.sort(EcmaContext._CompareContexts)
def GetRoot(self):
"""Get the root context that contains this context, if any."""
context = self
while context:
if context.type is EcmaContext.ROOT:
return context
context = context.parent
@staticmethod
def _CompareContexts(context1, context2):
"""Sorts contexts 1 and 2 by start token document position."""
return tokenutil.Compare(context1.start_token, context2.start_token)
class EcmaMetaData(object):
"""Token metadata for EcmaScript languages.
Attributes:
last_code: The last code token to appear before this one.
context: The context this token appears in.
operator_type: The operator type, will be one of the *_OPERATOR constants
defined below.
aliased_symbol: The full symbol being identified, as a string (e.g. an
'XhrIo' alias for 'goog.net.XhrIo'). Only applicable to identifier
tokens. This is set in aliaspass.py and is a best guess.
is_alias_definition: True if the symbol is part of an alias definition.
If so, these symbols won't be counted towards goog.requires/provides.
"""
UNARY_OPERATOR = 'unary'
UNARY_POST_OPERATOR = 'unary_post'
BINARY_OPERATOR = 'binary'
TERNARY_OPERATOR = 'ternary'
def __init__(self):
"""Initializes a token metadata object."""
self.last_code = None
self.context = None
self.operator_type = None
self.is_implied_semicolon = False
self.is_implied_block = False
self.is_implied_block_close = False
self.aliased_symbol = None
self.is_alias_definition = False
def __repr__(self):
"""Returns a string representation of the context object."""
parts = ['%r' % self.context]
if self.operator_type:
parts.append('optype: %r' % self.operator_type)
if self.is_implied_semicolon:
parts.append('implied;')
if self.aliased_symbol:
parts.append('alias for: %s' % self.aliased_symbol)
return 'MetaData(%s)' % ', '.join(parts)
def IsUnaryOperator(self):
return self.operator_type in (EcmaMetaData.UNARY_OPERATOR,
EcmaMetaData.UNARY_POST_OPERATOR)
def IsUnaryPostOperator(self):
return self.operator_type == EcmaMetaData.UNARY_POST_OPERATOR
class EcmaMetaDataPass(object):
"""A pass that iterates over all tokens and builds metadata about them."""
def __init__(self):
"""Initialize the meta data pass object."""
self.Reset()
def Reset(self):
"""Resets the metadata pass to prepare for the next file."""
self._token = None
self._context = None
self._AddContext(EcmaContext.ROOT)
self._last_code = None
def _CreateContext(self, context_type):
"""Overridable by subclasses to create the appropriate context type."""
return EcmaContext(context_type, self._token, self._context)
def _CreateMetaData(self):
"""Overridable by subclasses to create the appropriate metadata type."""
return EcmaMetaData()
def _AddContext(self, context_type):
"""Adds a context of the given type to the context stack.
Args:
context_type: The type of context to create
"""
self._context = self._CreateContext(context_type)
def _PopContext(self):
"""Moves up one level in the context stack.
Returns:
The former context.
Raises:
ParseError: If the root context is popped.
"""
top_context = self._context
top_context.end_token = self._token
self._context = top_context.parent
if self._context:
return top_context
else:
raise ParseError(self._token)
def _PopContextType(self, *stop_types):
"""Pops the context stack until a context of the given type is popped.
Args:
*stop_types: The types of context to pop to - stops at the first match.
Returns:
The context object of the given type that was popped.
"""
last = None
while not last or last.type not in stop_types:
last = self._PopContext()
return last
def _EndStatement(self):
"""Process the end of a statement."""
self._PopContextType(EcmaContext.STATEMENT)
if self._context.type == EcmaContext.IMPLIED_BLOCK:
self._token.metadata.is_implied_block_close = True
self._PopContext()
def _ProcessContext(self):
"""Process the context at the current token.
Returns:
The context that should be assigned to the current token, or None if
the current context after this method should be used.
Raises:
ParseError: When the token appears in an invalid context.
"""
token = self._token
token_type = token.type
if self._context.type in EcmaContext.BLOCK_TYPES:
# Whenever we're in a block, we add a statement context. We make an
# exception for switch statements since they can only contain case: and
# default: and therefore don't directly contain statements.
# The block we add here may be immediately removed in some cases, but
# that causes no harm.
parent = self._context.parent
if not parent or parent.type != EcmaContext.SWITCH:
self._AddContext(EcmaContext.STATEMENT)
elif self._context.type == EcmaContext.ARRAY_LITERAL:
self._AddContext(EcmaContext.LITERAL_ELEMENT)
if token_type == TokenType.START_PAREN:
if self._last_code and self._last_code.IsKeyword('for'):
# for loops contain multiple statements in the group unlike while,
# switch, if, etc.
self._AddContext(EcmaContext.FOR_GROUP_BLOCK)
else:
self._AddContext(EcmaContext.GROUP)
elif token_type == TokenType.END_PAREN:
result = self._PopContextType(EcmaContext.GROUP,
EcmaContext.FOR_GROUP_BLOCK)
keyword_token = result.start_token.metadata.last_code
# keyword_token will not exist if the open paren is the first line of the
# file, for example if all code is wrapped in an immediately executed
# annonymous function.
if keyword_token and keyword_token.string in ('if', 'for', 'while'):
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
if next_code.type != TokenType.START_BLOCK:
# Check for do-while.
is_do_while = False
pre_keyword_token = keyword_token.metadata.last_code
if (pre_keyword_token and
pre_keyword_token.type == TokenType.END_BLOCK):
start_block_token = pre_keyword_token.metadata.context.start_token
is_do_while = start_block_token.metadata.last_code.string == 'do'
# If it's not do-while, it's an implied block.
if not is_do_while:
self._AddContext(EcmaContext.IMPLIED_BLOCK)
token.metadata.is_implied_block = True
return result
# else (not else if) with no open brace after it should be considered the
# start of an implied block, similar to the case with if, for, and while
# above.
elif (token_type == TokenType.KEYWORD and
token.string == 'else'):
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
if (next_code.type != TokenType.START_BLOCK and
(next_code.type != TokenType.KEYWORD or next_code.string != 'if')):
self._AddContext(EcmaContext.IMPLIED_BLOCK)
token.metadata.is_implied_block = True
elif token_type == TokenType.START_PARAMETERS:
self._AddContext(EcmaContext.PARAMETERS)
elif token_type == TokenType.END_PARAMETERS:
return self._PopContextType(EcmaContext.PARAMETERS)
elif token_type == TokenType.START_BRACKET:
if (self._last_code and
self._last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
self._AddContext(EcmaContext.INDEX)
else:
self._AddContext(EcmaContext.ARRAY_LITERAL)
elif token_type == TokenType.END_BRACKET:
return self._PopContextType(EcmaContext.INDEX, EcmaContext.ARRAY_LITERAL)
elif token_type == TokenType.START_BLOCK:
if (self._last_code.type in (TokenType.END_PAREN,
TokenType.END_PARAMETERS) or
self._last_code.IsKeyword('else') or
self._last_code.IsKeyword('do') or
self._last_code.IsKeyword('try') or
self._last_code.IsKeyword('finally') or
(self._last_code.IsOperator(':') and
self._last_code.metadata.context.type == EcmaContext.CASE_BLOCK)):
# else, do, try, and finally all might have no () before {.
# Also, handle the bizzare syntax case 10: {...}.
self._AddContext(EcmaContext.BLOCK)
else:
self._AddContext(EcmaContext.OBJECT_LITERAL)
elif token_type == TokenType.END_BLOCK:
context = self._PopContextType(EcmaContext.BLOCK,
EcmaContext.OBJECT_LITERAL)
if self._context.type == EcmaContext.SWITCH:
# The end of the block also means the end of the switch statement it
# applies to.
return self._PopContext()
return context
elif token.IsKeyword('switch'):
self._AddContext(EcmaContext.SWITCH)
elif (token_type == TokenType.KEYWORD and
token.string in ('case', 'default') and
self._context.type != EcmaContext.OBJECT_LITERAL):
# Pop up to but not including the switch block.
while self._context.parent.type != EcmaContext.SWITCH:
self._PopContext()
if self._context.parent is None:
raise ParseError(token, 'Encountered case/default statement '
'without switch statement')
elif token.IsOperator('?'):
self._AddContext(EcmaContext.TERNARY_TRUE)
elif token.IsOperator(':'):
if self._context.type == EcmaContext.OBJECT_LITERAL:
self._AddContext(EcmaContext.LITERAL_ELEMENT)
elif self._context.type == EcmaContext.TERNARY_TRUE:
self._PopContext()
self._AddContext(EcmaContext.TERNARY_FALSE)
# Handle nested ternary statements like:
# foo = bar ? baz ? 1 : 2 : 3
# When we encounter the second ":" the context is
# ternary_false > ternary_true > statement > root
elif (self._context.type == EcmaContext.TERNARY_FALSE and
self._context.parent.type == EcmaContext.TERNARY_TRUE):
self._PopContext() # Leave current ternary false context.
self._PopContext() # Leave current parent ternary true
self._AddContext(EcmaContext.TERNARY_FALSE)
elif self._context.parent.type == EcmaContext.SWITCH:
self._AddContext(EcmaContext.CASE_BLOCK)
elif token.IsKeyword('var'):
self._AddContext(EcmaContext.VAR)
elif token.IsOperator(','):
while self._context.type not in (EcmaContext.VAR,
EcmaContext.ARRAY_LITERAL,
EcmaContext.OBJECT_LITERAL,
EcmaContext.STATEMENT,
EcmaContext.PARAMETERS,
EcmaContext.GROUP):
self._PopContext()
elif token_type == TokenType.SEMICOLON:
self._EndStatement()
def Process(self, first_token):
"""Processes the token stream starting with the given token."""
self._token = first_token
while self._token:
self._ProcessToken()
if self._token.IsCode():
self._last_code = self._token
self._token = self._token.next
try:
self._PopContextType(self, EcmaContext.ROOT)
except ParseError:
# Ignore the "popped to root" error.
pass
def _ProcessToken(self):
"""Process the given token."""
token = self._token
token.metadata = self._CreateMetaData()
context = (self._ProcessContext() or self._context)
token.metadata.context = context
token.metadata.last_code = self._last_code
# Determine the operator type of the token, if applicable.
if token.type == TokenType.OPERATOR:
token.metadata.operator_type = self._GetOperatorType(token)
# Determine if there is an implied semicolon after the token.
if token.type != TokenType.SEMICOLON:
next_code = tokenutil.SearchExcept(token, TokenType.NON_CODE_TYPES)
# A statement like if (x) does not need a semicolon after it
is_implied_block = self._context == EcmaContext.IMPLIED_BLOCK
is_last_code_in_line = token.IsCode() and (
not next_code or next_code.line_number != token.line_number)
is_continued_operator = (token.type == TokenType.OPERATOR and
not token.metadata.IsUnaryPostOperator())
is_continued_dot = token.string == '.'
next_code_is_operator = next_code and next_code.type == TokenType.OPERATOR
is_end_of_block = (
token.type == TokenType.END_BLOCK and
token.metadata.context.type != EcmaContext.OBJECT_LITERAL)
is_multiline_string = token.type == TokenType.STRING_TEXT
is_continued_var_decl = (token.IsKeyword('var') and
next_code and
(next_code.type in [TokenType.IDENTIFIER,
TokenType.SIMPLE_LVALUE]) and
token.line_number < next_code.line_number)
next_code_is_block = next_code and next_code.type == TokenType.START_BLOCK
if (is_last_code_in_line and
self._StatementCouldEndInContext() and
not is_multiline_string and
not is_end_of_block and
not is_continued_var_decl and
not is_continued_operator and
not is_continued_dot and
not next_code_is_operator and
not is_implied_block and
not next_code_is_block):
token.metadata.is_implied_semicolon = True
self._EndStatement()
def _StatementCouldEndInContext(self):
"""Returns if the current statement (if any) may end in this context."""
# In the basic statement or variable declaration context, statement can
# always end in this context.
if self._context.type in (EcmaContext.STATEMENT, EcmaContext.VAR):
return True
# End of a ternary false branch inside a statement can also be the
# end of the statement, for example:
# var x = foo ? foo.bar() : null
# In this case the statement ends after the null, when the context stack
# looks like ternary_false > var > statement > root.
if (self._context.type == EcmaContext.TERNARY_FALSE and
self._context.parent.type in (EcmaContext.STATEMENT, EcmaContext.VAR)):
return True
# In all other contexts like object and array literals, ternary true, etc.
# the statement can't yet end.
return False
def _GetOperatorType(self, token):
"""Returns the operator type of the given operator token.
Args:
token: The token to get arity for.
Returns:
The type of the operator. One of the *_OPERATOR constants defined in
EcmaMetaData.
"""
if token.string == '?':
return EcmaMetaData.TERNARY_OPERATOR
if token.string in TokenType.UNARY_OPERATORS:
return EcmaMetaData.UNARY_OPERATOR
last_code = token.metadata.last_code
if not last_code or last_code.type == TokenType.END_BLOCK:
return EcmaMetaData.UNARY_OPERATOR
if (token.string in TokenType.UNARY_POST_OPERATORS and
last_code.type in TokenType.EXPRESSION_ENDER_TYPES):
return EcmaMetaData.UNARY_POST_OPERATOR
if (token.string in TokenType.UNARY_OK_OPERATORS and
last_code.type not in TokenType.EXPRESSION_ENDER_TYPES and
last_code.string not in TokenType.UNARY_POST_OPERATORS):
return EcmaMetaData.UNARY_OPERATOR
return EcmaMetaData.BINARY_OPERATOR

95
tools/closure_linter/closure_linter/error_check.py

@ -1,95 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specific JSLint errors checker."""
import gflags as flags
FLAGS = flags.FLAGS
class Rule(object):
"""Different rules to check."""
# Documentations for specific rules goes in flag definition.
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
INDENTATION = 'indentation'
WELL_FORMED_AUTHOR = 'well_formed_author'
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
BRACES_AROUND_TYPE = 'braces_around_type'
OPTIONAL_TYPE_MARKER = 'optional_type_marker'
VARIABLE_ARG_MARKER = 'variable_arg_marker'
UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
# Rule to raise all known errors.
ALL = 'all'
# All rules that are to be checked when using the strict flag. E.g. the rules
# that are specific to the stricter Closure style.
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
INDENTATION,
WELL_FORMED_AUTHOR,
NO_BRACES_AROUND_INHERIT_DOC,
BRACES_AROUND_TYPE,
OPTIONAL_TYPE_MARKER,
VARIABLE_ARG_MARKER])
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style. '
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
flags.DEFINE_multistring('jslint_error', [],
'List of specific lint errors to check. Here is a list'
' of accepted values:\n'
' - ' + Rule.ALL + ': enables all following errors.\n'
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
'number of blank lines between blocks at top level.\n'
' - ' + Rule.INDENTATION + ': checks correct '
'indentation of code.\n'
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
'@author JsDoc tags.\n'
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
'forbids braces around @inheritdoc JsDoc tags.\n'
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
'around types in JsDoc tags.\n'
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
'use of optional marker = in param types.\n'
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
'unused private variables.\n'
' - ' + Rule.UNUSED_LOCAL_VARIABLES + ': checks for '
'unused local variables.\n')
def ShouldCheck(rule):
"""Returns whether the optional rule should be checked.
Computes different flags (strict, jslint_error, jslint_noerror) to find out if
this specific rule should be checked.
Args:
rule: Name of the rule (see Rule).
Returns:
True if the rule should be checked according to the flags, otherwise False.
"""
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
return True
# Checks strict rules.
return FLAGS.strict and rule in Rule.CLOSURE_RULES

618
tools/closure_linter/closure_linter/error_fixer.py

@ -1,618 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main class responsible for automatically fixing simple style violations."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = 'robbyw@google.com (Robert Walker)'
import re
import gflags as flags
from closure_linter import errors
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil
from closure_linter.common import errorhandler
# Shorthand
Token = javascripttokens.JavaScriptToken
Type = javascripttokens.JavaScriptTokenType
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
# Regex to represent common mistake inverting author name and email as
# @author User Name (user@company)
INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
r'(?P<name>[^(]+)'
r'(?P<whitespace_after_name>\s+)'
r'\('
r'(?P<email>[^\s]+@[^)\s]+)'
r'\)'
r'(?P<trailing_characters>.*)')
FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.')
flags.DEFINE_list('fix_error_codes', [], 'A list of specific error codes to '
'fix. Defaults to all supported error codes when empty. '
'See errors.py for a list of error codes.')
class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors."""
def __init__(self, external_file=None):
"""Initialize the error fixer.
Args:
external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in.
"""
errorhandler.ErrorHandler.__init__(self)
self._file_name = None
self._file_token = None
self._external_file = external_file
try:
self._fix_error_codes = set([errors.ByName(error.upper()) for error in
FLAGS.fix_error_codes])
except KeyError as ke:
raise ValueError('Unknown error code ' + ke.args[0])
def HandleFile(self, filename, first_token):
"""Notifies this ErrorPrinter that subsequent errors are in filename.
Args:
filename: The name of the file about to be checked.
first_token: The first token in the file.
"""
self._file_name = filename
self._file_is_html = filename.endswith('.html') or filename.endswith('.htm')
self._file_token = first_token
self._file_fix_count = 0
self._file_changed_lines = set()
def _AddFix(self, tokens):
"""Adds the fix to the internal count.
Args:
tokens: The token or sequence of tokens changed to fix an error.
"""
self._file_fix_count += 1
if hasattr(tokens, 'line_number'):
self._file_changed_lines.add(tokens.line_number)
else:
for token in tokens:
self._file_changed_lines.add(token.line_number)
def _FixJsDocPipeNull(self, js_type):
"""Change number|null or null|number to ?number.
Args:
js_type: The typeannotation.TypeAnnotation instance to fix.
"""
# Recurse into all sub_types if the error was at a deeper level.
map(self._FixJsDocPipeNull, js_type.IterTypes())
if js_type.type_group and len(js_type.sub_types) == 2:
# Find and remove the null sub_type:
sub_type = None
for sub_type in js_type.sub_types:
if sub_type.identifier == 'null':
map(tokenutil.DeleteToken, sub_type.tokens)
self._AddFix(sub_type.tokens)
break
else:
return
first_token = js_type.FirstToken()
question_mark = Token('?', Type.DOC_TYPE_MODIFIER, first_token.line,
first_token.line_number)
tokenutil.InsertTokenBefore(question_mark, first_token)
js_type.tokens.insert(0, question_mark)
js_type.tokens.remove(sub_type)
js_type.or_null = True
# Now also remove the separator, which is in the parent's token list,
# either before or after the sub_type, there is exactly one. Scan for it.
for token in js_type.tokens:
if (token and isinstance(token, Token) and
token.type == Type.DOC_TYPE_MODIFIER and token.string == '|'):
tokenutil.DeleteToken(token)
self._AddFix(token)
break
def HandleError(self, error):
"""Attempts to fix the error.
Args:
error: The error object
"""
code = error.code
token = error.token
if self._fix_error_codes and code not in self._fix_error_codes:
return
if code == errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL:
self._FixJsDocPipeNull(token.attached_object.jstype)
elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
iterator = token.attached_object.type_end_token
if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
iterator = iterator.previous
ending_space = len(iterator.string) - len(iterator.string.rstrip())
iterator.string = '%s=%s' % (iterator.string.rstrip(),
' ' * ending_space)
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code == errors.JSDOC_MISSING_VAR_ARGS_TYPE:
iterator = token.attached_object.type_start_token
if iterator.type == Type.DOC_START_BRACE or iterator.string.isspace():
iterator = iterator.next
starting_space = len(iterator.string) - len(iterator.string.lstrip())
iterator.string = '%s...%s' % (' ' * starting_space,
iterator.string.lstrip())
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
errors.MISSING_SEMICOLON):
semicolon_token = Token(';', Type.SEMICOLON, token.line,
token.line_number)
tokenutil.InsertTokenAfter(semicolon_token, token)
token.metadata.is_implied_semicolon = False
semicolon_token.metadata.is_implied_semicolon = False
self._AddFix(token)
elif code in (errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
errors.REDUNDANT_SEMICOLON,
errors.COMMA_AT_END_OF_LITERAL):
self._DeleteToken(token)
self._AddFix(token)
elif code == errors.INVALID_JSDOC_TAG:
if token.string == '@returns':
token.string = '@return'
self._AddFix(token)
elif code == errors.FILE_MISSING_NEWLINE:
# This error is fixed implicitly by the way we restore the file
self._AddFix(token)
elif code == errors.MISSING_SPACE:
if error.fix_data:
token.string = error.fix_data
self._AddFix(token)
elif error.position:
if error.position.IsAtBeginning():
tokenutil.InsertSpaceTokenAfter(token.previous)
elif error.position.IsAtEnd(token.string):
tokenutil.InsertSpaceTokenAfter(token)
else:
token.string = error.position.Set(token.string, ' ')
self._AddFix(token)
elif code == errors.EXTRA_SPACE:
if error.position:
token.string = error.position.Set(token.string, '')
self._AddFix(token)
elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning():
tokenutil.InsertBlankLineAfter(token.previous)
else:
tokenutil.InsertBlankLineAfter(token)
self._AddFix(token)
elif code == errors.EXTRA_LINE:
self._DeleteToken(token)
self._AddFix(token)
elif code == errors.WRONG_BLANK_LINE_COUNT:
if not token.previous:
# TODO(user): Add an insertBefore method to tokenutil.
return
num_lines = error.fix_data
should_delete = False
if num_lines < 0:
num_lines *= -1
should_delete = True
for unused_i in xrange(1, num_lines + 1):
if should_delete:
# TODO(user): DeleteToken should update line numbers.
self._DeleteToken(token.previous)
else:
tokenutil.InsertBlankLineAfter(token.previous)
self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote:
single_quote_start = Token(
"'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
single_quote_end = Token(
"'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote)
self._DeleteToken(token)
self._DeleteToken(end_quote)
self._AddFix([token, end_quote])
elif code == errors.MISSING_BRACES_AROUND_TYPE:
fixed_tokens = []
start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE:
leading_space = (
len(start_token.string) - len(start_token.string.lstrip()))
if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token
new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token
fixed_tokens.append(new_token)
end_token = token.attached_object.type_end_token
if end_token.type != Type.DOC_END_BRACE:
# If the start token was a brace, the end token will be a
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type.
last_type = end_token
if not fixed_tokens:
last_type = end_token.previous
while last_type.string.isspace():
last_type = last_type.previous
# If there was no starting brace then a lone end brace wouldn't have
# been type end token. Now that we've added any missing start brace,
# see if the last effective type token was an end brace.
if last_type.type != Type.DOC_END_BRACE:
trailing_space = (len(last_type.string) -
len(last_type.string.rstrip()))
if trailing_space:
tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space)
new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token
fixed_tokens.append(new_token)
self._AddFix(fixed_tokens)
elif code == errors.LINE_STARTS_WITH_OPERATOR:
# Remove whitespace following the operator so the line starts clean.
self._StripSpace(token, before=False)
# Remove the operator.
tokenutil.DeleteToken(token)
self._AddFix(token)
insertion_point = tokenutil.GetPreviousCodeToken(token)
# Insert a space between the previous token and the new operator.
space = Token(' ', Type.WHITESPACE, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenAfter(space, insertion_point)
# Insert the operator on the end of the previous line.
new_token = Token(token.string, token.type, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenAfter(new_token, space)
self._AddFix(new_token)
elif code == errors.LINE_ENDS_WITH_DOT:
# Remove whitespace preceding the operator to remove trailing whitespace.
self._StripSpace(token, before=True)
# Remove the dot.
tokenutil.DeleteToken(token)
self._AddFix(token)
insertion_point = tokenutil.GetNextCodeToken(token)
# Insert the dot at the beginning of the next line of code.
new_token = Token(token.string, token.type, insertion_point.line,
insertion_point.line_number)
tokenutil.InsertTokenBefore(new_token, insertion_point)
self._AddFix(new_token)
elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
require_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(require_start_token)
self._AddFix(require_start_token)
elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
provide_start_token = error.fix_data
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixProvides(provide_start_token)
self._AddFix(provide_start_token)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}':
self._DeleteToken(token.previous)
self._DeleteToken(token.next)
self._AddFix([token])
elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
match = INVERTED_AUTHOR_SPEC.match(token.string)
if match:
token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
match.group('email'),
match.group('whitespace_after_name'),
match.group('name'),
match.group('trailing_characters'))
self._AddFix(token)
elif (code == errors.WRONG_INDENTATION and
not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start
expected = error.position.length
# Cases where first token is param but with leading spaces.
if (len(token.string.lstrip()) == len(token.string) - actual and
token.string.lstrip()):
token.string = token.string.lstrip()
actual = 0
if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token])
else:
# We need to add indentation.
new_token = Token(' ' * expected, Type.WHITESPACE,
token.line, token.line_number)
# Note that we'll never need to add indentation at the first line,
# since it will always not be indented. Therefore it's safe to assume
# token.previous exists.
tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token])
elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
errors.MISSING_END_OF_SCOPE_COMMENT]:
# Only fix cases where }); is found with no trailing content on the line
# other than a comment. Value of 'token' is set to } for this error.
if (token.type == Type.END_BLOCK and
token.next.type == Type.END_PAREN and
token.next.next.type == Type.SEMICOLON):
current_token = token.next.next.next
removed_tokens = []
while current_token and current_token.line_number == token.line_number:
if current_token.IsAnyType(Type.WHITESPACE,
Type.START_SINGLE_LINE_COMMENT,
Type.COMMENT):
removed_tokens.append(current_token)
current_token = current_token.next
else:
return
if removed_tokens:
self._DeleteTokens(removed_tokens[0], len(removed_tokens))
whitespace_token = Token(' ', Type.WHITESPACE, token.line,
token.line_number)
start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
token.line, token.line_number)
comment_token = Token(' goog.scope', Type.COMMENT, token.line,
token.line_number)
insertion_tokens = [whitespace_token, start_comment_token,
comment_token]
tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
self._AddFix(removed_tokens + insertion_tokens)
elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
num_delete_tokens = len(tokens_in_line)
# If line being deleted is preceded and succeed with blank lines then
# delete one blank line also.
if (tokens_in_line[0].previous and tokens_in_line[-1].next
and tokens_in_line[0].previous.type == Type.BLANK_LINE
and tokens_in_line[-1].next.type == Type.BLANK_LINE):
num_delete_tokens += 1
self._DeleteTokens(tokens_in_line[0], num_delete_tokens)
self._AddFix(tokens_in_line)
elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
missing_namespaces = error.fix_data[0]
need_blank_line = error.fix_data[1] or (not token.previous)
insert_location = Token('', Type.NORMAL, '', token.line_number - 1)
dummy_first_token = insert_location
tokenutil.InsertTokenBefore(insert_location, token)
# If inserting a blank line check blank line does not exist before
# token to avoid extra blank lines.
if (need_blank_line and insert_location.previous
and insert_location.previous.type != Type.BLANK_LINE):
tokenutil.InsertBlankLineAfter(insert_location)
insert_location = insert_location.next
for missing_namespace in missing_namespaces:
new_tokens = self._GetNewRequireOrProvideTokens(
code == errors.MISSING_GOOG_PROVIDE,
missing_namespace, insert_location.line_number + 1)
tokenutil.InsertLineAfter(insert_location, new_tokens)
insert_location = new_tokens[-1]
self._AddFix(new_tokens)
# If inserting a blank line check blank line does not exist after
# token to avoid extra blank lines.
if (need_blank_line and insert_location.next
and insert_location.next.type != Type.BLANK_LINE):
tokenutil.InsertBlankLineAfter(insert_location)
tokenutil.DeleteToken(dummy_first_token)
def _StripSpace(self, token, before):
"""Strip whitespace tokens either preceding or following the given token.
Args:
token: The token.
before: If true, strip space before the token, if false, after it.
"""
token = token.previous if before else token.next
while token and token.type == Type.WHITESPACE:
tokenutil.DeleteToken(token)
token = token.previous if before else token.next
def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
"""Returns a list of tokens to create a goog.require/provide statement.
Args:
is_provide: True if getting tokens for a provide, False for require.
namespace: The required or provided namespaces to get tokens for.
line_number: The line number the new require or provide statement will be
on.
Returns:
Tokens to create a new goog.require or goog.provide statement.
"""
string = 'goog.require'
if is_provide:
string = 'goog.provide'
line_text = string + '(\'' + namespace + '\');\n'
return [
Token(string, Type.IDENTIFIER, line_text, line_number),
Token('(', Type.START_PAREN, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
Token(namespace, Type.STRING_TEXT, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
Token(')', Type.END_PAREN, line_text, line_number),
Token(';', Type.SEMICOLON, line_text, line_number)
]
def _DeleteToken(self, token):
"""Deletes the specified token from the linked list of tokens.
Updates instance variables pointing to tokens such as _file_token if
they reference the deleted token.
Args:
token: The token to delete.
"""
if token == self._file_token:
self._file_token = token.next
tokenutil.DeleteToken(token)
def _DeleteTokens(self, token, token_count):
"""Deletes the given number of tokens starting with the given token.
Updates instance variables pointing to tokens such as _file_token if
they reference the deleted token.
Args:
token: The first token to delete.
token_count: The total number of tokens to delete.
"""
if token == self._file_token:
for unused_i in xrange(token_count):
self._file_token = self._file_token.next
tokenutil.DeleteTokens(token, token_count)
def FinishFile(self):
"""Called when the current file has finished style checking.
Used to go back and fix any errors in the file. It currently supports both
js and html files. For js files it does a simple dump of all tokens, but in
order to support html file, we need to merge the original file with the new
token set back together. This works because the tokenized html file is the
original html file with all non js lines kept but blanked out with one blank
line token per line of html.
"""
if self._file_fix_count:
# Get the original file content for html.
if self._file_is_html:
f = open(self._file_name, 'r')
original_lines = f.readlines()
f.close()
f = self._external_file
if not f:
error_noun = 'error' if self._file_fix_count == 1 else 'errors'
print 'Fixed %d %s in %s' % (
self._file_fix_count, error_noun, self._file_name)
f = open(self._file_name, 'w')
token = self._file_token
# Finding the first not deleted token.
while token.is_deleted:
token = token.next
# If something got inserted before first token (e.g. due to sorting)
# then move to start. Bug 8398202.
while token.previous:
token = token.previous
char_count = 0
line = ''
while token:
line += token.string
char_count += len(token.string)
if token.IsLastInLine():
# We distinguish if a blank line in html was from stripped original
# file or newly added error fix by looking at the "org_line_number"
# field on the token. It is only set in the tokenizer, so for all
# error fixes, the value should be None.
if (line or not self._file_is_html or
token.orig_line_number is None):
f.write(line)
f.write('\n')
else:
f.write(original_lines[token.orig_line_number - 1])
line = ''
if char_count > 80 and token.line_number in self._file_changed_lines:
print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
token.line_number, self._file_name)
char_count = 0
token = token.next
if not self._external_file:
# Close the file if we created it
f.close()

57
tools/closure_linter/closure_linter/error_fixer_test.py

@ -1,57 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the error_fixer module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
import unittest as googletest
from closure_linter import error_fixer
from closure_linter import testutil
class ErrorFixerTest(googletest.TestCase):
"""Unit tests for error_fixer."""
def setUp(self):
self.error_fixer = error_fixer.ErrorFixer()
def testDeleteToken(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
second_token = start_token.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteToken(start_token)
self.assertEqual(second_token, self.error_fixer._file_token)
def testDeleteTokens(self):
start_token = testutil.TokenizeSourceAndRunEcmaPass(_TEST_SCRIPT)
fourth_token = start_token.next.next.next
self.error_fixer.HandleFile('test_file', start_token)
self.error_fixer._DeleteTokens(start_token, 3)
self.assertEqual(fourth_token, self.error_fixer._file_token)
_TEST_SCRIPT = """\
var x = 3;
"""
if __name__ == '__main__':
googletest.main()

66
tools/closure_linter/closure_linter/errorrecord.py

@ -1,66 +0,0 @@
#!/usr/bin/env python
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, pickle-serializable class to represent a lint error."""
__author__ = 'nnaze@google.com (Nathan Naze)'
import gflags as flags
from closure_linter import errors
from closure_linter.common import erroroutput
FLAGS = flags.FLAGS
class ErrorRecord(object):
"""Record-keeping struct that can be serialized back from a process.
Attributes:
path: Path to the file.
error_string: Error string for the user.
new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
"""
def __init__(self, path, error_string, new_error):
self.path = path
self.error_string = error_string
self.new_error = new_error
def MakeErrorRecord(path, error):
"""Make an error record with correctly formatted error string.
Errors are not able to be serialized (pickled) over processes because of
their pointers to the complex token/context graph. We use an intermediary
serializable class to pass back just the relevant information.
Args:
path: Path of file the error was found in.
error: An error.Error instance.
Returns:
_ErrorRecord instance.
"""
new_error = error.code in errors.NEW_ERRORS
if FLAGS.unix_mode:
error_string = erroroutput.GetUnixErrorOutput(
path, error, new_error=new_error)
else:
error_string = erroroutput.GetErrorOutput(error, new_error=new_error)
return ErrorRecord(path, error_string, new_error)

72
tools/closure_linter/closure_linter/errorrules.py

@ -1,72 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = 'robbyw@google.com (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
flags.DEFINE_list('disable', None,
'Disable specific error. Usage Ex.: gjslint --disable 1,'
'0011 foo.js.')
flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
'without warning.', lower_bound=1)
disabled_error_nums = None
def GetMaxLineLength():
"""Returns allowed maximum length of line.
Returns:
Length of line allowed without any warning.
"""
return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors and disabled
errors. For missing documentation, it returns the value of the
jsdoc flag.
"""
global disabled_error_nums
if disabled_error_nums is None:
disabled_error_nums = []
if FLAGS.disable:
for error_str in FLAGS.disable:
error_num = 0
try:
error_num = int(error_str)
except ValueError:
pass
disabled_error_nums.append(error_num)
return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)) and
(not FLAGS.disable or error not in disabled_error_nums))

117
tools/closure_linter/closure_linter/errorrules_test.py

@ -1,117 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gjslint errorrules.
Currently its just verifying that warnings can't be disabled.
"""
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class ErrorRulesTest(googletest.TestCase):
"""Test case to for gjslint errorrules."""
def testNoMaxLineLengthFlagExists(self):
"""Tests that --max_line_length flag does not exists."""
self.assertTrue('max_line_length' not in flags.FLAGS.FlagDict())
def testGetMaxLineLength(self):
"""Tests warning are reported for line greater than 80.
"""
# One line > 100 and one line > 80 and < 100. So should produce two
# line too long error.
original = [
'goog.require(\'dummy.aa\');',
'',
'function a() {',
' dummy.aa.i = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18 + 19 + 20;',
' dummy.aa.j = 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13'
' + 14 + 15 + 16 + 17 + 18;',
'}',
''
]
# Expect line too long.
expected = [errors.LINE_TOO_LONG, errors.LINE_TOO_LONG]
self._AssertErrors(original, expected)
def testNoDisableFlagExists(self):
"""Tests that --disable flag does not exists."""
self.assertTrue('disable' not in flags.FLAGS.FlagDict())
def testWarningsNotDisabled(self):
"""Tests warnings are reported when nothing is disabled.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
expected = [errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
errors.FILE_MISSING_NEWLINE]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors, include_header=True):
"""Asserts that the error fixer corrects original to expected."""
if include_header:
original = self._GetHeader() + original
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
'// Copyright 2011 Google Inc. All Rights Reserved.',
'',
'/**',
' * @fileoverview Fake file overview.',
' * @author fake@google.com (Fake Person)',
' */',
''
]
if __name__ == '__main__':
googletest.main()

154
tools/closure_linter/closure_linter/errors.py

@ -1,154 +0,0 @@
#!/usr/bin/env python
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error codes for JavaScript style checker."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
def ByName(name):
"""Get the error code for the given error name.
Args:
name: The name of the error
Returns:
The error code
"""
return globals()[name]
# "File-fatal" errors - these errors stop further parsing of a single file
FILE_NOT_FOUND = -1
FILE_DOES_NOT_PARSE = -2
# Spacing
EXTRA_SPACE = 1
MISSING_SPACE = 2
EXTRA_LINE = 3
MISSING_LINE = 4
ILLEGAL_TAB = 5
WRONG_INDENTATION = 6
WRONG_BLANK_LINE_COUNT = 7
# Semicolons
MISSING_SEMICOLON = 10
MISSING_SEMICOLON_AFTER_FUNCTION = 11
ILLEGAL_SEMICOLON_AFTER_FUNCTION = 12
REDUNDANT_SEMICOLON = 13
# Miscellaneous
ILLEGAL_PROTOTYPE_MEMBER_VALUE = 100
LINE_TOO_LONG = 110
LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121
LINE_ENDS_WITH_DOT = 122
MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
UNUSED_LOCAL_VARIABLE = 133
# Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140
GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
ALIAS_STMT_NEEDS_GOOG_REQUIRE = 146
# JsDoc
INVALID_JSDOC_TAG = 200
INVALID_USE_OF_DESC_TAG = 201
NO_BUG_NUMBER_AFTER_BUG_TAG = 202
MISSING_PARAMETER_DOCUMENTATION = 210
EXTRA_PARAMETER_DOCUMENTATION = 211
WRONG_PARAMETER_DOCUMENTATION = 212
MISSING_JSDOC_TAG_TYPE = 213
MISSING_JSDOC_TAG_DESCRIPTION = 214
MISSING_JSDOC_PARAM_NAME = 215
OUT_OF_ORDER_JSDOC_TAG_TYPE = 216
MISSING_RETURN_DOCUMENTATION = 217
UNNECESSARY_RETURN_DOCUMENTATION = 218
MISSING_BRACES_AROUND_TYPE = 219
MISSING_MEMBER_DOCUMENTATION = 220
MISSING_PRIVATE = 221
EXTRA_PRIVATE = 222
INVALID_OVERRIDE_PRIVATE = 223
INVALID_INHERIT_DOC_PRIVATE = 224
MISSING_JSDOC_TAG_THIS = 225
UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_MISSING_VAR_ARGS_TYPE = 234
JSDOC_MISSING_VAR_ARGS_NAME = 235
JSDOC_DOES_NOT_PARSE = 236
# TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250
INVALID_SUPPRESS_TYPE = 251
UNNECESSARY_SUPPRESS = 252
# File ending
FILE_MISSING_NEWLINE = 300
FILE_IN_BLOCK = 301
# Interfaces
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# goog.scope - Namespace aliasing
# TODO(nnaze) Add additional errors here and in aliaspass.py
INVALID_USE_OF_GOOG_SCOPE = 600
EXTRA_GOOG_SCOPE_USAGE = 601
# ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well.
# All ActionScript specific errors should have error number at least 1000.
FUNCTION_MISSING_RETURN_TYPE = 1132
PARAMETER_MISSING_TYPE = 1133
VAR_MISSING_TYPE = 1134
PARAMETER_MISSING_DEFAULT_VALUE = 1135
IMPORTS_NOT_ALPHABETIZED = 1140
IMPORT_CONTAINS_WILDCARD = 1141
UNUSED_IMPORT = 1142
INVALID_TRACE_SEVERITY_LEVEL = 1250
MISSING_TRACE_SEVERITY_LEVEL = 1251
MISSING_TRACE_MESSAGE = 1252
REMOVE_TRACE_BEFORE_SUBMIT = 1253
REMOVE_COMMENT_BEFORE_SUBMIT = 1254
# End of list of ActionScript specific errors.
NEW_ERRORS = frozenset([
# Errors added after 2.0.2:
WRONG_INDENTATION,
MISSING_SEMICOLON,
# Errors added after 2.3.9:
JSDOC_MISSING_VAR_ARGS_TYPE,
JSDOC_MISSING_VAR_ARGS_NAME,
# Errors added after 2.3.15:
ALIAS_STMT_NEEDS_GOOG_REQUIRE,
JSDOC_DOES_NOT_PARSE,
LINE_ENDS_WITH_DOT,
# Errors added after 2.3.17:
])

66
tools/closure_linter/closure_linter/fixjsstyle.py

@ -1,66 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Automatically fix simple style guide violations."""
__author__ = 'robbyw@google.com (Robert Walker)'
import StringIO
import sys
import gflags as flags
from closure_linter import error_fixer
from closure_linter import runner
from closure_linter.common import simplefileflags as fileflags
FLAGS = flags.FLAGS
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('dry_run', False, 'Do not modify the file, only print it.')
def main(argv=None):
"""Main function.
Args:
argv: Sequence of command line arguments.
"""
if argv is None:
argv = flags.FLAGS(sys.argv)
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
output_buffer = None
if FLAGS.dry_run:
output_buffer = StringIO.StringIO()
fixer = error_fixer.ErrorFixer(output_buffer)
# Check the list of files.
for filename in files:
runner.Run(filename, fixer)
if FLAGS.dry_run:
print output_buffer.getvalue()
if __name__ == '__main__':
main()

615
tools/closure_linter/closure_linter/fixjsstyle_test.py

@ -1,615 +0,0 @@
#!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gpylint auto-fixer."""
__author__ = 'robbyw@google.com (Robby Walker)'
import StringIO
import gflags as flags
import unittest as googletest
from closure_linter import error_fixer
from closure_linter import runner
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def setUp(self):
flags.FLAGS.dot_on_next_line = True
def tearDown(self):
flags.FLAGS.dot_on_next_line = False
def testFixJsStyle(self):
test_cases = [
['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js'],
['fixjsstyle.html.in.html', 'fixjsstyle.html.out.html'],
['fixjsstyle.oplineend.in.js', 'fixjsstyle.oplineend.out.js']]
for [running_input_file, running_output_file] in test_cases:
print 'Checking %s vs %s' % (running_input_file, running_output_file)
input_filename = None
golden_filename = None
current_filename = None
try:
input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError as ex:
raise IOError('Could not find testdata resource for %s: %s' %
(current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
runner.Run(input_filename, error_fixer.ErrorFixer(actual))
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
# Uncomment to generate new golden files and run
# open('/'.join(golden_filename.split('/')[4:]), 'w').write(actual.read())
# actual.seek(0)
self.assertEqual(actual.readlines(), expected.readlines())
def testAddProvideFirstLine(self):
"""Tests handling of case where goog.provide is added."""
original = [
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testAddRequireFirstLine(self):
"""Tests handling of case where goog.require is added."""
original = [
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteProvideAndAddProvideFirstLine(self):
"""Tests handling of case where goog.provide is deleted and added.
Bug 14832597.
"""
original = [
'goog.provide(\'dummy.aa\');',
'',
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.provide(\'dummy.aa\');',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteProvideAndAddRequireFirstLine(self):
"""Tests handling where goog.provide is deleted and goog.require added.
Bug 14832597.
"""
original = [
'goog.provide(\'dummy.aa\');',
'',
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.provide(\'dummy.aa\');',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteRequireAndAddRequireFirstLine(self):
"""Tests handling of case where goog.require is deleted and added.
Bug 14832597.
"""
original = [
'goog.require(\'dummy.aa\');',
'',
'a = dummy.bb.cc;',
]
expected = [
'goog.require(\'dummy.bb\');',
'',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.require(\'dummy.aa\');',
'a = dummy.bb.cc;',
]
self._AssertFixes(original, expected, include_header=False)
def testDeleteRequireAndAddProvideFirstLine(self):
"""Tests handling where goog.require is deleted and goog.provide added.
Bug 14832597.
"""
original = [
'goog.require(\'dummy.aa\');',
'',
'dummy.bb.cc = 1;',
]
expected = [
'goog.provide(\'dummy.bb\');',
'',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
original = [
'goog.require(\'dummy.aa\');',
'dummy.bb.cc = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testMultipleProvideInsert(self):
original = [
'goog.provide(\'dummy.bb\');',
'goog.provide(\'dummy.dd\');',
'',
'dummy.aa.ff = 1;',
'dummy.bb.ff = 1;',
'dummy.cc.ff = 1;',
'dummy.dd.ff = 1;',
'dummy.ee.ff = 1;',
]
expected = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.bb\');',
'goog.provide(\'dummy.cc\');',
'goog.provide(\'dummy.dd\');',
'goog.provide(\'dummy.ee\');',
'',
'dummy.aa.ff = 1;',
'dummy.bb.ff = 1;',
'dummy.cc.ff = 1;',
'dummy.dd.ff = 1;',
'dummy.ee.ff = 1;',
]
self._AssertFixes(original, expected, include_header=False)
def testMultipleRequireInsert(self):
original = [
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.dd\');',
'',
'a = dummy.aa.ff;',
'b = dummy.bb.ff;',
'c = dummy.cc.ff;',
'd = dummy.dd.ff;',
'e = dummy.ee.ff;',
]
expected = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.cc\');',
'goog.require(\'dummy.dd\');',
'goog.require(\'dummy.ee\');',
'',
'a = dummy.aa.ff;',
'b = dummy.bb.ff;',
'c = dummy.cc.ff;',
'd = dummy.dd.ff;',
'e = dummy.ee.ff;',
]
self._AssertFixes(original, expected, include_header=False)
def testUnsortedRequires(self):
"""Tests handling of unsorted goog.require statements without header.
Bug 8398202.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
expected = [
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'goog.require(\'dummy.aa\');',
'',
'function a() {',
' dummy.aa.i = 1;',
' dummy.Cc.i = 1;',
' dummy.Dd.i = 1;',
'}',
]
self._AssertFixes(original, expected, include_header=False)
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.Dd\');',
'',
'var x = new dummy.Bb();',
'dummy.Cc.someMethod();',
'dummy.aa.someMethod();',
]
expected = [
'goog.require(\'dummy.Bb\');',
'goog.require(\'dummy.Cc\');',
'goog.require(\'dummy.aa\');',
'',
'var x = new dummy.Bb();',
'dummy.Cc.someMethod();',
'dummy.aa.someMethod();',
]
self._AssertFixes(original, expected)
def testExtraRequireOnFirstLine(self):
"""Tests handling of extra goog.require statement on the first line.
There was a bug when fixjsstyle quits with an exception. It happened if
- the first line of the file is an extra goog.require() statement,
- goog.require() statements are not sorted.
"""
original = [
'goog.require(\'dummy.aa\');',
'goog.require(\'dummy.cc\');',
'goog.require(\'dummy.bb\');',
'',
'var x = new dummy.bb();',
'var y = new dummy.cc();',
]
expected = [
'goog.require(\'dummy.bb\');',
'goog.require(\'dummy.cc\');',
'',
'var x = new dummy.bb();',
'var y = new dummy.cc();',
]
self._AssertFixes(original, expected, include_header=False)
def testUnsortedProvides(self):
"""Tests handling of unsorted goog.provide statements without header.
Bug 8398202.
"""
original = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'',
'dummy.aa = function() {};'
'dummy.Cc = function() {};'
'dummy.Dd = function() {};'
]
expected = [
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'goog.provide(\'dummy.aa\');',
'',
'dummy.aa = function() {};'
'dummy.Cc = function() {};'
'dummy.Dd = function() {};'
]
self._AssertFixes(original, expected, include_header=False)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
'goog.provide(\'dummy.aa\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.Dd\');',
'',
'dummy.Cc = function() {};',
'dummy.Bb = function() {};',
'dummy.aa.someMethod = function();',
]
expected = [
'goog.provide(\'dummy.Bb\');',
'goog.provide(\'dummy.Cc\');',
'goog.provide(\'dummy.aa\');',
'',
'dummy.Cc = function() {};',
'dummy.Bb = function() {};',
'dummy.aa.someMethod = function();',
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
'goog.provide(\'dummy.Something\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
expected = [
'goog.provide(\'dummy.Something\');',
'',
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
expected = [
'goog.provide(\'dummy.Something\');',
'',
'goog.require(\'dummy.Bb\');',
'',
'dummy.Something = function() {};',
'',
'var x = new dummy.Bb();',
]
self._AssertFixes(original, expected)
def testOutputOkayWhenFirstTokenIsDeleted(self):
"""Tests that autofix output is is correct when first token is deleted.
Regression test for bug 4581567
"""
original = ['"use strict";']
expected = ["'use strict';"]
self._AssertFixes(original, expected, include_header=False)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testEndsWithIdentifier(self):
"""Tests Handling case where script ends with identifier. Bug 7643404."""
original = [
'goog.provide(\'xyz\');',
'',
'abc'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected)
def testFileStartsWithSemicolon(self):
"""Tests handling files starting with semicolon.
b/10062516
"""
original = [
';goog.provide(\'xyz\');',
'',
'abc;'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected, include_header=False)
def testCodeStartsWithSemicolon(self):
"""Tests handling code in starting with semicolon after comments.
b/10062516
"""
original = [
';goog.provide(\'xyz\');',
'',
'abc;'
]
expected = [
'goog.provide(\'xyz\');',
'',
'abc;'
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected, include_header=True):
"""Asserts that the error fixer corrects original to expected."""
if include_header:
original = self._GetHeader() + original
expected = self._GetHeader() + expected
actual = StringIO.StringIO()
runner.Run('testing.js', error_fixer.ErrorFixer(actual), original)
actual.seek(0)
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
'// Copyright 2011 Google Inc. All Rights Reserved.',
'',
'/**',
' * @fileoverview Fake file overview.',
' * @author fake@google.com (Fake Person)',
' */',
''
]
if __name__ == '__main__':
googletest.main()

121
tools/closure_linter/closure_linter/full_test.py

@ -1,121 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Full regression-type (Medium) tests for gjslint.
Tests every error that can be thrown by gjslint. Based heavily on
devtools/javascript/gpylint/full_test.py
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import error_check
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [
'all_js_wrapped.js',
'blank_lines.js',
'ends_with_block.js',
'empty_file.js',
'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html',
'indentation.js',
'interface.js',
'jsdoc.js',
'limited_doc_checks.js',
'minimal.js',
'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_alias.js',
'require_all_caps.js',
'require_blank.js',
'require_extra.js',
'require_function.js',
'require_function_missing.js',
'require_function_through_both.js',
'require_function_through_namespace.js',
'require_interface.js',
'require_interface_alias.js',
'require_interface_base.js',
'require_lower_case.js',
'require_missing.js',
'require_numeric.js',
'require_provide_blank.js',
'require_provide_missing.js',
'require_provide_ok.js',
'semicolon_missing.js',
'simple.html',
'spaces.js',
'tokenizer.js',
'unparseable.js',
'unused_local_variables.js',
'unused_private_members.js',
'utf8.html',
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(
filetestcase.AnnotatedFileTestCase(
resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save