Browse Source

tools: update closure_linter to the latest(2.3.5)

v0.9.1-release
Shigeki Ohtsu 13 years ago
committed by Ben Noordhuis
parent
commit
6d98524609
  1. 2
      tools/closure_linter/PKG-INFO
  2. 10
      tools/closure_linter/closure_linter.egg-info/PKG-INFO
  3. 41
      tools/closure_linter/closure_linter.egg-info/SOURCES.txt
  4. 1
      tools/closure_linter/closure_linter.egg-info/dependency_links.txt
  5. 4
      tools/closure_linter/closure_linter.egg-info/entry_points.txt
  6. 1
      tools/closure_linter/closure_linter.egg-info/requires.txt
  7. 1
      tools/closure_linter/closure_linter.egg-info/top_level.txt
  8. 15
      tools/closure_linter/closure_linter/__init__.py
  9. 84
      tools/closure_linter/closure_linter/checker.py
  10. 159
      tools/closure_linter/closure_linter/checkerbase.py
  11. 500
      tools/closure_linter/closure_linter/closurizednamespacesinfo.py
  12. 451
      tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py
  13. 15
      tools/closure_linter/closure_linter/common/__init__.py
  14. 2
      tools/closure_linter/closure_linter/common/erroraccumulator.py
  15. 52
      tools/closure_linter/closure_linter/common/erroroutput.py
  16. 203
      tools/closure_linter/closure_linter/common/errorprinter.py
  17. 7
      tools/closure_linter/closure_linter/common/filetestcase.py
  18. 14
      tools/closure_linter/closure_linter/common/tokens.py
  19. 101
      tools/closure_linter/closure_linter/common/tokens_test.py
  20. 122
      tools/closure_linter/closure_linter/ecmalintrules.py
  21. 90
      tools/closure_linter/closure_linter/error_check.py
  22. 209
      tools/closure_linter/closure_linter/error_fixer.py
  23. 65
      tools/closure_linter/closure_linter/errorrecord.py
  24. 17
      tools/closure_linter/closure_linter/errors.py
  25. 12
      tools/closure_linter/closure_linter/fixjsstyle.py
  26. 199
      tools/closure_linter/closure_linter/fixjsstyle_test.py
  27. 17
      tools/closure_linter/closure_linter/full_test.py
  28. 185
      tools/closure_linter/closure_linter/gjslint.py
  29. 48
      tools/closure_linter/closure_linter/indentation.py
  30. 409
      tools/closure_linter/closure_linter/javascriptlintrules.py
  31. 128
      tools/closure_linter/closure_linter/javascriptstatetracker.py
  32. 53
      tools/closure_linter/closure_linter/javascriptstatetracker_test.py
  33. 28
      tools/closure_linter/closure_linter/javascripttokenizer.py
  34. 74
      tools/closure_linter/closure_linter/not_strict_test.py
  35. 272
      tools/closure_linter/closure_linter/requireprovidesorter.py
  36. 74
      tools/closure_linter/closure_linter/requireprovidesorter_test.py
  37. 56
      tools/closure_linter/closure_linter/statetracker.py
  38. 135
      tools/closure_linter/closure_linter/tokenutil.py
  39. 2489
      tools/closure_linter/gflags.py
  40. 5
      tools/closure_linter/setup.cfg
  41. 2
      tools/closure_linter/setup.py

2
tools/closure_linter/PKG-INFO

@ -1,6 +1,6 @@
Metadata-Version: 1.0 Metadata-Version: 1.0
Name: closure_linter Name: closure_linter
Version: 2.2.6 Version: 2.3.5
Summary: Closure Linter Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors Author: The Closure Linter Authors

10
tools/closure_linter/closure_linter.egg-info/PKG-INFO

@ -1,10 +0,0 @@
Metadata-Version: 1.0
Name: closure-linter
Version: 2.2.6
Summary: Closure Linter
Home-page: http://code.google.com/p/closure-linter
Author: The Closure Linter Authors
Author-email: opensource@google.com
License: Apache
Description: UNKNOWN
Platform: UNKNOWN

41
tools/closure_linter/closure_linter.egg-info/SOURCES.txt

@ -1,41 +0,0 @@
README
setup.py
closure_linter/__init__.py
closure_linter/checker.py
closure_linter/checkerbase.py
closure_linter/ecmalintrules.py
closure_linter/ecmametadatapass.py
closure_linter/error_fixer.py
closure_linter/errorrules.py
closure_linter/errors.py
closure_linter/fixjsstyle.py
closure_linter/fixjsstyle_test.py
closure_linter/full_test.py
closure_linter/gjslint.py
closure_linter/indentation.py
closure_linter/javascriptlintrules.py
closure_linter/javascriptstatetracker.py
closure_linter/javascriptstatetracker_test.py
closure_linter/javascripttokenizer.py
closure_linter/javascripttokens.py
closure_linter/statetracker.py
closure_linter/tokenutil.py
closure_linter.egg-info/PKG-INFO
closure_linter.egg-info/SOURCES.txt
closure_linter.egg-info/dependency_links.txt
closure_linter.egg-info/entry_points.txt
closure_linter.egg-info/requires.txt
closure_linter.egg-info/top_level.txt
closure_linter/common/__init__.py
closure_linter/common/error.py
closure_linter/common/erroraccumulator.py
closure_linter/common/errorhandler.py
closure_linter/common/errorprinter.py
closure_linter/common/filetestcase.py
closure_linter/common/htmlutil.py
closure_linter/common/lintrunner.py
closure_linter/common/matcher.py
closure_linter/common/position.py
closure_linter/common/simplefileflags.py
closure_linter/common/tokenizer.py
closure_linter/common/tokens.py

1
tools/closure_linter/closure_linter.egg-info/dependency_links.txt

@ -1 +0,0 @@

4
tools/closure_linter/closure_linter.egg-info/entry_points.txt

@ -1,4 +0,0 @@
[console_scripts]
fixjsstyle = closure_linter.fixjsstyle:main
gjslint = closure_linter.gjslint:main

1
tools/closure_linter/closure_linter.egg-info/requires.txt

@ -1 +0,0 @@
python-gflags

1
tools/closure_linter/closure_linter.egg-info/top_level.txt

@ -1 +0,0 @@
closure_linter

15
tools/closure_linter/closure_linter/__init__.py

@ -1 +1,16 @@
#!/usr/bin/env python #!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint."""

84
tools/closure_linter/closure_linter/checker.py

@ -22,11 +22,10 @@ __author__ = ('robbyw@google.com (Robert Walker)',
import gflags as flags import gflags as flags
from closure_linter import checkerbase from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascriptlintrules from closure_linter import javascriptlintrules
from closure_linter import javascriptstatetracker from closure_linter import javascriptstatetracker
from closure_linter.common import errorprinter
from closure_linter.common import lintrunner from closure_linter.common import lintrunner
flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'], flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
@ -34,6 +33,12 @@ flags.DEFINE_list('limited_doc_files', ['dummy.js', 'externs.js'],
'report errors for missing documentation, some missing ' 'report errors for missing documentation, some missing '
'descriptions, or methods whose @return tags don\'t have a ' 'descriptions, or methods whose @return tags don\'t have a '
'matching return statement.') 'matching return statement.')
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase): class JavaScriptStyleChecker(checkerbase.CheckerBase):
@ -43,40 +48,83 @@ class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Initialize an JavaScriptStyleChecker object. """Initialize an JavaScriptStyleChecker object.
Args: Args:
error_handler: Error handler to pass all errors to error_handler: Error handler to pass all errors to.
""" """
self._namespaces_info = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
checkerbase.CheckerBase.__init__( checkerbase.CheckerBase.__init__(
self, self,
error_handler=error_handler, error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(), lint_rules=javascriptlintrules.JavaScriptLintRules(
state_tracker=javascriptstatetracker.JavaScriptStateTracker( self._namespaces_info),
closurized_namespaces=flags.FLAGS.closurized_namespaces), state_tracker=javascriptstatetracker.JavaScriptStateTracker(),
metadata_pass=ecmametadatapass.EcmaMetaDataPass(), metadata_pass=ecmametadatapass.EcmaMetaDataPass(),
limited_doc_files=flags.FLAGS.limited_doc_files) limited_doc_files=flags.FLAGS.limited_doc_files)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
token: The first token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info and not parse_error:
self._namespaces_info.Reset()
result = (self._ExecutePass(token, self._DependencyPass) and
self._ExecutePass(token, self._LintPass,
debug_tokens=debug_tokens))
else:
result = self._ExecutePass(token, self._LintPass, parse_error,
debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _DependencyPass(self, token):
"""Processes an invidual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)
class GJsLintRunner(lintrunner.LintRunner): class GJsLintRunner(lintrunner.LintRunner):
"""Wrapper class to run GJsLint.""" """Wrapper class to run GJsLint."""
def Run(self, filenames, error_handler=None): def Run(self, filenames, error_handler):
"""Run GJsLint on the given filenames. """Run GJsLint on the given filenames.
Args: Args:
filenames: The filenames to check filenames: The filenames to check
error_handler: An optional ErrorHandler object, an ErrorPrinter is used if error_handler: An ErrorHandler object.
none is specified.
Returns:
error_count, file_count: The number of errors and the number of files that
contain errors.
""" """
if not error_handler:
error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS)
checker = JavaScriptStyleChecker(error_handler) checker = JavaScriptStyleChecker(error_handler)
# Check the list of files. # Check the list of files.
for filename in filenames: for filename in filenames:
checker.Check(filename) checker.Check(filename)
return error_handler

159
tools/closure_linter/closure_linter/checkerbase.py

@ -20,6 +20,7 @@ __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)', 'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)') 'jacobr@google.com (Jacob Richman)')
import StringIO
import traceback import traceback
import gflags as flags import gflags as flags
@ -37,6 +38,7 @@ flags.DEFINE_boolean('debug_tokens', False,
flags.DEFINE_boolean('error_trace', False, flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.') 'Whether to show error exceptions.')
class LintRulesBase(object): class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language.""" """Base class for all classes defining the lint rules for a language."""
@ -61,6 +63,14 @@ class LintRulesBase(object):
if errorrules.ShouldReportError(code): if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data) self.__checker.HandleError(code, message, token, position, fix_data)
def _SetLimitedDocChecks(self, limited_doc_checks):
"""Sets whether doc checking is relaxed for this file.
Args:
limited_doc_checks: Whether doc checking is relaxed for this file.
"""
self._limited_doc_checks = limited_doc_checks
def CheckToken(self, token, parser_state): def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors. """Checks a token, given the current parser_state, for warnings and errors.
@ -102,13 +112,17 @@ class CheckerBase(object):
documentation comments. documentation comments.
metadata_pass: Object that builds metadata about the token stream. metadata_pass: Object that builds metadata about the token stream.
""" """
self.__error_handler = error_handler self._error_handler = error_handler
self.__lint_rules = lint_rules self._lint_rules = lint_rules
self.__state_tracker = state_tracker self._state_tracker = state_tracker
self.__metadata_pass = metadata_pass self._metadata_pass = metadata_pass
self.__limited_doc_files = limited_doc_files self._limited_doc_files = limited_doc_files
self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
self.__has_errors = False # TODO(user): Factor out. A checker does not need to know about the
# tokenizer, only the token stream.
self._tokenizer = javascripttokenizer.JavaScriptTokenizer()
self._has_errors = False
def HandleError(self, code, message, token, position=None, def HandleError(self, code, message, token, position=None,
fix_data=None): fix_data=None):
@ -122,8 +136,8 @@ class CheckerBase(object):
position: The position of the error, defaults to None. position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error. fix_data: Metadata used for fixing the error.
""" """
self.__has_errors = True self._has_errors = True
self.__error_handler.HandleError( self._error_handler.HandleError(
error.Error(code, message, token, position, fix_data)) error.Error(code, message, token, position, fix_data))
def HasErrors(self): def HasErrors(self):
@ -132,21 +146,31 @@ class CheckerBase(object):
Returns: Returns:
True if the style checker has found any errors. True if the style checker has found any errors.
""" """
return self.__has_errors return self._has_errors
def Check(self, filename): def Check(self, filename, source=None):
"""Checks the file, printing warnings and errors as they are found. """Checks the file, printing warnings and errors as they are found.
Args: Args:
filename: The name of the file to check. filename: The name of the file to check.
source: Optional. The contents of the file. Can be either a string or
file-like object. If omitted, contents will be read from disk from
the given filename.
""" """
if source is None:
try: try:
f = open(filename) f = open(filename)
except IOError: except IOError:
self.__error_handler.HandleFile(filename, None) self._error_handler.HandleFile(filename, None)
self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None) self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
self.__error_handler.FinishFile() self._error_handler.FinishFile()
return return
else:
if type(source) in [str, unicode]:
f = StringIO.StringIO(source)
else:
f = source
try: try:
if filename.endswith('.html') or filename.endswith('.htm'): if filename.endswith('.html') or filename.endswith('.htm'):
@ -170,24 +194,22 @@ class CheckerBase(object):
failed prematurely. failed prematurely.
""" """
limited_doc_checks = False limited_doc_checks = False
if self.__limited_doc_files: if self._limited_doc_files:
for limited_doc_filename in self.__limited_doc_files: for limited_doc_filename in self._limited_doc_files:
if filename.endswith(limited_doc_filename): if filename.endswith(limited_doc_filename):
limited_doc_checks = True limited_doc_checks = True
break break
state_tracker = self.__state_tracker lint_rules = self._lint_rules
lint_rules = self.__lint_rules
state_tracker.Reset()
lint_rules.Initialize(self, limited_doc_checks, is_html) lint_rules.Initialize(self, limited_doc_checks, is_html)
token = self.__tokenizer.TokenizeFile(lines_iter) token = self._tokenizer.TokenizeFile(lines_iter)
parse_error = None parse_error = None
if self.__metadata_pass: if self._metadata_pass:
try: try:
self.__metadata_pass.Reset() self._metadata_pass.Reset()
self.__metadata_pass.Process(token) self._metadata_pass.Process(token)
except ecmametadatapass.ParseError, caught_parse_error: except ecmametadatapass.ParseError, caught_parse_error:
if FLAGS.error_trace: if FLAGS.error_trace:
traceback.print_exc() traceback.print_exc()
@ -197,41 +219,94 @@ class CheckerBase(object):
traceback.print_exc() traceback.print_exc()
return False return False
self.__error_handler.HandleFile(filename, token) self._error_handler.HandleFile(filename, token)
return self._CheckTokens(token, parse_error=parse_error,
debug_tokens=FLAGS.debug_tokens)
def _CheckTokens(self, token, parse_error, debug_tokens):
"""Checks a token stream for lint warnings/errors.
Args:
token: The first token in the token stream to check.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
"""
result = self._ExecutePass(token, self._LintPass, parse_error, debug_tokens)
if not result:
return False
self._lint_rules.Finalize(self._state_tracker, self._tokenizer.mode)
self._error_handler.FinishFile()
return True
def _LintPass(self, token):
"""Checks an individual token for lint warnings/errors.
Used to encapsulate the logic needed to check an individual token so that it
can be passed to _ExecutePass.
Args:
token: The token to check.
"""
self._lint_rules.CheckToken(token, self._state_tracker)
def _ExecutePass(self, token, pass_function, parse_error=None,
debug_tokens=False):
"""Calls the given function for every token in the given token stream.
As each token is passed to the given function, state is kept up to date and,
depending on the error_trace flag, errors are either caught and reported, or
allowed to bubble up so developers can see the full stack trace. If a parse
error is specified, the pass will proceed as normal until the token causing
the parse error is reached.
Args:
token: The first token in the token stream.
pass_function: The function to call for each token in the token stream.
parse_error: A ParseError if any errors occurred.
debug_tokens: Whether every token should be printed as it is encountered
during the pass.
Returns:
A boolean indicating whether the full token stream could be checked or if
checking failed prematurely.
Raises:
Exception: If any error occurred while calling the given function.
"""
self._state_tracker.Reset()
while token: while token:
if FLAGS.debug_tokens: if debug_tokens:
print token print token
if parse_error and parse_error.token == token: if parse_error and parse_error.token == token:
# Report any parse errors from above once we find the token.
message = ('Error parsing file at token "%s". Unable to ' message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string) 'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token) self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
self.__error_handler.FinishFile() self._error_handler.FinishFile()
return False return
if FLAGS.error_trace:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
else:
try: try:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken()) self._state_tracker.HandleToken(
token, self._state_tracker.GetLastNonSpaceToken())
pass_function(token)
self._state_tracker.HandleAfterToken(token)
except: except:
if FLAGS.error_trace:
raise
else:
self.HandleError(errors.FILE_DOES_NOT_PARSE, self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to ' ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string), 'check the rest of file.' % token.string),
token) token)
self.__error_handler.FinishFile() self._error_handler.FinishFile()
return False return False
# Check the token for style guide violations.
lint_rules.CheckToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
# Move to the next token.
token = token.next token = token.next
lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
self.__error_handler.FinishFile()
return True return True

500
tools/closure_linter/closure_linter/closurizednamespacesinfo.py

@ -0,0 +1,500 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file and the second is the identifier itself.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file and the second is the identifier itself.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return list(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return list(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
if self._scopified_file:
return False
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
if self._scopified_file:
return False
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for used_namespace, used_identifier in self._used_namespaces:
if namespace == used_namespace or namespace == used_identifier:
return False
return True
def GetMissingProvides(self):
"""Returns the set of missing provided namespaces for the current file.
Returns:
Returns a set of strings where each string is a namespace that should be
provided by this file, but is not.
"""
if self._scopified_file:
return set()
missing_provides = set()
for namespace, identifier in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces):
missing_provides.add(namespace)
return missing_provides
def GetMissingRequires(self):
"""Returns the set of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a set of strings where each string is a namespace that should be
required by this file, but is not.
"""
if self._scopified_file:
return set()
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
created_identifiers = set()
for namespace, identifier in self._created_namespaces:
created_identifiers.add(identifier)
missing_requires = set()
for namespace, identifier in self._used_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers):
missing_requires.add(namespace)
return missing_requires
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifer is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.endswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = self._GetWholeIdentifierString(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraRequire' in jsdoc.suppressions):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraProvide' in jsdoc.suppressions):
self._AddCreatedNamespace(state_tracker, namespace)
elif token.string == 'goog.scope':
self._scopified_file = True
else:
jsdoc = state_tracker.GetDocComment()
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
self.GetClosurizedNamespace(
whole_identifier_string))
else:
self._AddUsedNamespace(state_tracker, whole_identifier_string)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier, namespace)
elif token.type == TokenType.DOC_FLAG:
flag_type = token.attached_object.flag_type
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, TokenType.COMMENT)
self._AddUsedNamespace(state_tracker, interface.string)
def _GetWholeIdentifierString(self, token):
"""Returns the whole identifier string for the given token.
Checks the tokens after the current one to see if the token is one in a
sequence of tokens which are actually just one identifier (i.e. a line was
wrapped in the middle of an identifier).
Args:
token: The token to check.
Returns:
The whole identifier string or None if this token is not the first token
in a multi-token identifier.
"""
result = ''
# Search backward to determine if this token is the first token of the
# identifier. If it is not the first token, return None to signal that this
# token should be ignored.
prev_token = token.previous
while prev_token:
if (prev_token.IsType(TokenType.IDENTIFIER) or
prev_token.IsType(TokenType.NORMAL) and prev_token.string == '.'):
return None
elif (not prev_token.IsType(TokenType.WHITESPACE) and
not prev_token.IsAnyType(TokenType.COMMENT_TYPES)):
break
prev_token = prev_token.previous
# Search forward to find other parts of this identifier separated by white
# space.
next_token = token
while next_token:
if (next_token.IsType(TokenType.IDENTIFIER) or
next_token.IsType(TokenType.NORMAL) and next_token.string == '.'):
result += next_token.string
elif (not next_token.IsType(TokenType.WHITESPACE) and
not next_token.IsAnyType(TokenType.COMMENT_TYPES)):
break
next_token = next_token.next
return result
def _AddCreatedNamespace(self, state_tracker, identifier, namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingProvide' in jsdoc.suppressions:
return
self._created_namespaces.append([namespace, identifier])
def _AddUsedNamespace(self, state_tracker, identifier):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
"""
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingRequire' in jsdoc.suppressions:
return
namespace = self.GetClosurizedNamespace(identifier)
if namespace:
self._used_namespaces.append([namespace, identifier])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].endswith('_'):
parts.pop()
return '.'.join(parts)
return None

451
tools/closure_linter/closure_linter/closurizednamespacesinfo_test.py

@ -0,0 +1,451 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
'The whole class, not the object, should be required.');
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
self.assertEquals('package.Foo.veryLong.identifier',
namespaces_info._GetWholeIdentifierString(token))
self.assertEquals(None,
namespaces_info._GetWholeIdentifierString(token.next))
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
while token:
namespaces_info.ProcessToken(token, state_tracker)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
if __name__ == '__main__':
googletest.main()

15
tools/closure_linter/closure_linter/common/__init__.py

@ -1 +1,16 @@
#!/usr/bin/env python #!/usr/bin/env python
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package indicator for gjslint.common."""

2
tools/closure_linter/closure_linter/common/erroraccumulator.py

@ -35,7 +35,7 @@ class ErrorAccumulator(errorhandler.ErrorHandler):
Args: Args:
error: The error object error: The error object
""" """
self._errors.append((error.token.line_number, error.code)) self._errors.append(error)
def GetErrors(self): def GetErrors(self):
"""Returns the accumulated errors. """Returns the accumulated errors.

52
tools/closure_linter/closure_linter/common/erroroutput.py

@ -0,0 +1,52 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to format errors."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'nnaze@google.com (Nathan Naze)')
def GetUnixErrorOutput(filename, error, new_error=False):
"""Get a output line for an error in UNIX format."""
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def GetErrorOutput(error, new_error=False):
"""Get a output line for an error in regular format."""
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
error_message = error.message
if new_error:
error_message = 'New Error ' + error_message
return '%s%s: %s' % (line, code, error.message)

203
tools/closure_linter/closure_linter/common/errorprinter.py

@ -1,203 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error handler class that prints errors to stdout."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
from closure_linter.common import error
from closure_linter.common import errorhandler
Error = error.Error
# The error message is of the format:
# Line <number>, E:<code>: message
DEFAULT_FORMAT = 1
# The error message is of the format:
# filename:[line number]:message
UNIX_FORMAT = 2
class ErrorPrinter(errorhandler.ErrorHandler):
"""ErrorHandler that prints errors to stdout."""
def __init__(self, new_errors=None):
"""Initializes this error printer.
Args:
new_errors: A sequence of error codes representing recently introduced
errors, defaults to None.
"""
# Number of errors
self._error_count = 0
# Number of new errors
self._new_error_count = 0
# Number of files checked
self._total_file_count = 0
# Number of files with errors
self._error_file_count = 0
# Dict of file name to number of errors
self._file_table = {}
# List of errors for each file
self._file_errors = None
# Current file
self._filename = None
self._format = DEFAULT_FORMAT
if new_errors:
self._new_errors = frozenset(new_errors)
else:
self._new_errors = frozenset(set())
def SetFormat(self, format):
"""Sets the print format of errors.
Args:
format: One of {DEFAULT_FORMAT, UNIX_FORMAT}.
"""
self._format = format
def HandleFile(self, filename, first_token):
"""Notifies this ErrorPrinter that subsequent errors are in filename.
Sets the current file name, and sets a flag stating the header for this file
has not been printed yet.
Should be called by a linter before a file is style checked.
Args:
filename: The name of the file about to be checked.
first_token: The first token in the file, or None if there was an error
opening the file
"""
if self._filename and self._file_table[self._filename]:
print
self._filename = filename
self._file_table[filename] = 0
self._total_file_count += 1
self._file_errors = []
def HandleError(self, error):
"""Prints a formatted error message about the specified error.
The error message is of the format:
Error #<code>, line #<number>: message
Args:
error: The error object
"""
self._file_errors.append(error)
self._file_table[self._filename] += 1
self._error_count += 1
if self._new_errors and error.code in self._new_errors:
self._new_error_count += 1
def _PrintError(self, error):
"""Prints a formatted error message about the specified error.
Args:
error: The error object
"""
new_error = self._new_errors and error.code in self._new_errors
if self._format == DEFAULT_FORMAT:
line = ''
if error.token:
line = 'Line %d, ' % error.token.line_number
code = 'E:%04d' % error.code
if new_error:
print '%s%s: (New error) %s' % (line, code, error.message)
else:
print '%s%s: %s' % (line, code, error.message)
else:
# UNIX format
filename = self._filename
line = ''
if error.token:
line = '%d' % error.token.line_number
error_code = '%04d' % error.code
if new_error:
error_code = 'New Error ' + error_code
print '%s:%s:(%s) %s' % (filename, line, error_code, error.message)
def FinishFile(self):
"""Finishes handling the current file."""
if self._file_errors:
self._error_file_count += 1
if self._format != UNIX_FORMAT:
print '----- FILE : %s -----' % (self._filename)
self._file_errors.sort(Error.Compare)
for error in self._file_errors:
self._PrintError(error)
def HasErrors(self):
"""Whether this error printer encountered any errors.
Returns:
True if the error printer encountered any errors.
"""
return self._error_count
def HasNewErrors(self):
"""Whether this error printer encountered any new errors.
Returns:
True if the error printer encountered any new errors.
"""
return self._new_error_count
def HasOldErrors(self):
"""Whether this error printer encountered any old errors.
Returns:
True if the error printer encountered any old errors.
"""
return self._error_count - self._new_error_count
def PrintSummary(self):
"""Print a summary of the number of errors and files."""
if self.HasErrors() or self.HasNewErrors():
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
self._error_count,
self._new_error_count,
self._error_file_count,
self._total_file_count - self._error_file_count))
else:
print '%d files checked, no errors found.' % self._total_file_count
def PrintFileSummary(self):
"""Print a detailed summary of the number of errors in each file."""
keys = self._file_table.keys()
keys.sort()
for filename in keys:
print '%s: %d' % (filename, self._file_table[filename])

7
tools/closure_linter/closure_linter/common/filetestcase.py

@ -101,5 +101,8 @@ class AnnotatedFileTestCase(googletest.TestCase):
self._runner.Run([filename], errors) self._runner.Run([filename], errors)
errors = errors.GetErrors() errors = errors.GetErrors()
errors.sort()
return errors # Convert to expected tuple format.
error_msgs = [(error.token.line_number, error.code) for error in errors]
error_msgs.sort()
return error_msgs

14
tools/closure_linter/closure_linter/common/tokens.py

@ -123,3 +123,17 @@ class Token(object):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string, return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number, self.values, self.line_number,
self.metadata) self.metadata)
def __iter__(self):
"""Returns a token iterator."""
node = self
while node:
yield node
node = node.next
def __reversed__(self):
"""Returns a reverse-direction token iterator."""
node = self
while node:
yield node
node = node.previous

101
tools/closure_linter/closure_linter/common/tokens_test.py

@ -0,0 +1,101 @@
#!/usr/bin/env python
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
import unittest as googletest
from closure_linter.common import tokens
def _CreateDummyToken():
return tokens.Token('foo', None, 1, 1)
def _CreateDummyTokens(count):
dummy_tokens = []
for _ in xrange(count):
dummy_tokens.append(_CreateDummyToken())
return dummy_tokens
def _SetTokensAsNeighbors(neighbor_tokens):
for i in xrange(len(neighbor_tokens)):
prev_index = i - 1
next_index = i + 1
if prev_index >= 0:
neighbor_tokens[i].previous = neighbor_tokens[prev_index]
if next_index < len(neighbor_tokens):
neighbor_tokens[i].next = neighbor_tokens[next_index]
class TokensTest(googletest.TestCase):
def testIsFirstInLine(self):
# First token in file (has no previous).
self.assertTrue(_CreateDummyToken().IsFirstInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(b.IsFirstInLine())
# Tokens on different lines
b.line_number = 31
self.assertTrue(b.IsFirstInLine())
def testIsLastInLine(self):
# Last token in file (has no next).
self.assertTrue(_CreateDummyToken().IsLastInLine())
a, b = _CreateDummyTokens(2)
_SetTokensAsNeighbors([a, b])
# Tokens on same line
a.line_number = 30
b.line_number = 30
self.assertFalse(a.IsLastInLine())
b.line_number = 31
self.assertTrue(a.IsLastInLine())
def testIsType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsType('fakeType1'))
self.assertFalse(a.IsType('fakeType2'))
def testIsAnyType(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2']))
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4']))
def testRepr(self):
a = tokens.Token('foo', 'fakeType1', 1, 1)
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a))
def testIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
i = iter(a)
self.assertListEqual([a, b, c, d, e], list(i))
def testReverseIter(self):
dummy_tokens = _CreateDummyTokens(5)
_SetTokensAsNeighbors(dummy_tokens)
a, b, c, d, e = dummy_tokens
ri = reversed(e)
self.assertListEqual([e, d, c, b, a], list(ri))
if __name__ == '__main__':
googletest.main()

122
tools/closure_linter/closure_linter/ecmalintrules.py

@ -25,6 +25,7 @@ import re
from closure_linter import checkerbase from closure_linter import checkerbase
from closure_linter import ecmametadatapass from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errors from closure_linter import errors
from closure_linter import indentation from closure_linter import indentation
from closure_linter import javascripttokens from closure_linter import javascripttokens
@ -39,8 +40,6 @@ from closure_linter.common import tokens
import gflags as flags import gflags as flags
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style.')
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow') flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(robbyw): Check for extra parens on return statements # TODO(robbyw): Check for extra parens on return statements
@ -53,6 +52,7 @@ Context = ecmametadatapass.EcmaContext
Error = error.Error Error = error.Error
Modes = javascripttokenizer.JavaScriptModes Modes = javascripttokenizer.JavaScriptModes
Position = position.Position Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase): class EcmaScriptLintRules(checkerbase.LintRulesBase):
@ -183,7 +183,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE, self._HandleError(errors.JSDOC_ILLEGAL_QUESTION_WITH_PIPE,
'JsDoc types cannot contain both "?" and "|": "%s"' % p, token) 'JsDoc types cannot contain both "?" and "|": "%s"' % p, token)
if FLAGS.strict and (flag.type_start_token.type != Type.DOC_START_BRACE or if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE): flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(errors.MISSING_BRACES_AROUND_TYPE, self._HandleError(errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token) 'Type must always be surrounded by curly braces.', token)
@ -249,7 +250,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
type = token.type type = token.type
# Process the line change. # Process the line change.
if not self._is_html and FLAGS.strict: if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files. # TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state) indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors: for indentation_error in indentation_errors:
@ -360,32 +361,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
token.previous, Position.All(token.previous.string)) token.previous, Position.All(token.previous.string))
elif type == Type.START_BRACKET: elif type == Type.START_BRACKET:
if (not first_in_line and token.previous.type == Type.WHITESPACE and self._HandleStartBracket(token, last_non_space_token)
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not first_in_line and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
elif type in (Type.END_PAREN, Type.END_BRACKET): elif type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when # Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the # it's in a for statement with an omitted section, or when it's at the
@ -408,9 +384,14 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
elif type == Type.WHITESPACE: elif type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string): if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine(): if token.IsFirstInLine():
if token.next:
self._HandleError(errors.ILLEGAL_TAB, self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string, 'Illegal tab in whitespace before "%s"' % token.next.string,
token, Position.All(token.string)) token, Position.All(token.string))
else:
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, Position.All(token.string))
else: else:
self._HandleError(errors.ILLEGAL_TAB, self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string, 'Illegal tab in whitespace after "%s"' % token.previous.string,
@ -471,12 +452,15 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX, self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. ' 'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token) 'Spaces matter.', token)
elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES: else:
for suppress_type in flag.type.split('|'):
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(errors.INVALID_SUPPRESS_TYPE, self._HandleError(errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % flag.type, 'Invalid suppression type: %s' % suppress_type,
token) token)
elif FLAGS.strict and flag.flag_type == 'author': elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as # TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required. # it exists, though the full form checked below isn't required.
string = token.next.string string = token.next.string
@ -570,7 +554,8 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.INVALID_JSDOC_TAG, self._HandleError(errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token) 'Invalid JsDoc tag: %s' % token.values['name'], token)
if (FLAGS.strict and token.values['name'] == 'inheritDoc' and if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
type == Type.DOC_INLINE_FLAG): type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC, self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc', 'Unnecessary braces around @inheritDoc',
@ -599,18 +584,24 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
identifier.startswith('this.')): identifier.startswith('this.')):
# We are at the top level and the function/member is documented. # We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'): if identifier.endswith('_') and not identifier.endswith('__'):
if jsdoc.HasFlag('override'): # Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and not ('accessControls' in jsdoc.suppressions)):
self._HandleError(errors.INVALID_OVERRIDE_PRIVATE, self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier, '%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token) jsdoc.GetFlag('override').flag_token)
# Can have a private class which inherits documentation from a if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
# public superclass. and not ('accessControls' in jsdoc.suppressions)):
if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):
self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE, self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier, '%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token) jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and if (not jsdoc.HasFlag('private') and
not ('underscore' in jsdoc.suppressions)): not ('underscore' in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(errors.MISSING_PRIVATE, self._HandleError(errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' % 'Member "%s" must have @private JsDoc.' %
identifier, token) identifier, token)
@ -618,18 +609,22 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.UNNECESSARY_SUPPRESS, self._HandleError(errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private', '@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore']) jsdoc.suppressions['underscore'])
elif jsdoc.HasFlag('private'): elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(errors.EXTRA_PRIVATE, self._HandleError(errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' % 'Member "%s" must not have @private JsDoc' %
identifier, token) identifier, token)
if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden')) # These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_') and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1): and identifier.find('.MSG_') == -1):
# TODO(user): Update error message to show the actual invalid
# tag, either @desc or @hidden.
self._HandleError(errors.INVALID_USE_OF_DESC_TAG, self._HandleError(errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @desc JsDoc' % identifier, 'Member "%s" should not have @%s JsDoc' % (identifier, f),
token) token)
# Check for illegaly assigning live objects as prototype property values. # Check for illegaly assigning live objects as prototype property values.
@ -677,6 +672,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
# Languages that don't allow variables to by typed such as # Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java # JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care. # that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next()) self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D': elif op == 'D':
@ -686,6 +682,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
docs_iter.next(), token) docs_iter.next(), token)
elif op == 'S': elif op == 'S':
# Substitution # Substitution
if not self._limited_doc_checks:
self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION, self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' % 'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token) (params_iter.next(), docs_iter.next()), token)
@ -722,6 +719,39 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
self._HandleError(errors.MISSING_SEMICOLON, self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token) 'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
not token.previous.type in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, Position.AtBeginning())
def Finalize(self, state, tokenizer_mode): def Finalize(self, state, tokenizer_mode):
last_non_space_token = state.GetLastNonSpaceToken() last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline. # Check last line for ending with newline.
@ -750,3 +780,7 @@ class EcmaScriptLintRules(checkerbase.LintRulesBase):
def GetLongLineExceptions(self): def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.""" """Gets a list of regexps for lines which can be longer than the limit."""
return [] return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False

90
tools/closure_linter/closure_linter/error_check.py

@ -0,0 +1,90 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specific JSLint errors checker."""
import gflags as flags
FLAGS = flags.FLAGS
class Rule(object):
"""Different rules to check."""
# Documentations for specific rules goes in flag definition.
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
INDENTATION = 'indentation'
WELL_FORMED_AUTHOR = 'well_formed_author'
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
BRACES_AROUND_TYPE = 'braces_around_type'
OPTIONAL_TYPE_MARKER = 'optional_type_marker'
UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
# Rule to raise all known errors.
ALL = 'all'
# All rules that are to be checked when using the strict flag. E.g. the rules
# that are specific to the stricter Closure style.
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
INDENTATION,
WELL_FORMED_AUTHOR,
NO_BRACES_AROUND_INHERIT_DOC,
BRACES_AROUND_TYPE,
OPTIONAL_TYPE_MARKER])
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style. '
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
flags.DEFINE_multistring('jslint_error', [],
'List of specific lint errors to check. Here is a list'
' of accepted values:\n'
' - ' + Rule.ALL + ': enables all following errors.\n'
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
'number of blank lines between blocks at top level.\n'
' - ' + Rule.INDENTATION + ': checks correct '
'indentation of code.\n'
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
'@author JsDoc tags.\n'
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
'forbids braces around @inheritdoc JsDoc tags.\n'
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
'around types in JsDoc tags.\n'
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
'use of optional marker = in param types.\n'
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
'unused private variables.\n')
def ShouldCheck(rule):
"""Returns whether the optional rule should be checked.
Computes different flags (strict, jslint_error, jslint_noerror) to find out if
this specific rule should be checked.
Args:
rule: Name of the rule (see Rule).
Returns:
True if the rule should be checked according to the flags, otherwise False.
"""
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
return True
# Checks strict rules.
return FLAGS.strict and rule in Rule.CLOSURE_RULES

209
tools/closure_linter/closure_linter/error_fixer.py

@ -24,6 +24,7 @@ import gflags as flags
from closure_linter import errors from closure_linter import errors
from closure_linter import javascriptstatetracker from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil from closure_linter import tokenutil
from closure_linter.common import errorhandler from closure_linter.common import errorhandler
@ -33,10 +34,21 @@ Type = javascripttokens.JavaScriptTokenType
END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$') END_OF_FLAG_TYPE = re.compile(r'(}?\s*)$')
# Regex to represent common mistake inverting author name and email as
# @author User Name (user@company)
INVERTED_AUTHOR_SPEC = re.compile(r'(?P<leading_whitespace>\s*)'
'(?P<name>[^(]+)'
'(?P<whitespace_after_name>\s+)'
'\('
'(?P<email>[^\s]+@[^)\s]+)'
'\)'
'(?P<trailing_characters>.*)')
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_boolean('disable_indentation_fixing', False, flags.DEFINE_boolean('disable_indentation_fixing', False,
'Whether to disable automatic fixing of indentation.') 'Whether to disable automatic fixing of indentation.')
class ErrorFixer(errorhandler.ErrorHandler): class ErrorFixer(errorhandler.ErrorHandler):
"""Object that fixes simple style errors.""" """Object that fixes simple style errors."""
@ -47,6 +59,8 @@ class ErrorFixer(errorhandler.ErrorHandler):
external_file: If included, all output will be directed to this file external_file: If included, all output will be directed to this file
instead of overwriting the files the errors are found in. instead of overwriting the files the errors are found in.
""" """
errorhandler.ErrorHandler.__init__(self)
self._file_name = None self._file_name = None
self._file_token = None self._file_token = None
self._external_file = external_file self._external_file = external_file
@ -104,6 +118,19 @@ class ErrorFixer(errorhandler.ErrorHandler):
token.attached_object = javascriptstatetracker.JsDocFlag(token) token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token) self._AddFix(token)
elif code == errors.JSDOC_MISSING_OPTIONAL_TYPE:
iterator = token.attached_object.type_end_token
if iterator.type == Type.DOC_END_BRACE or iterator.string.isspace():
iterator = iterator.previous
ending_space = len(iterator.string) - len(iterator.string.rstrip())
iterator.string = '%s=%s' % (iterator.string.rstrip(),
' ' * ending_space)
# Create a new flag object with updated type info.
token.attached_object = javascriptstatetracker.JsDocFlag(token)
self._AddFix(token)
elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION, elif code in (errors.MISSING_SEMICOLON_AFTER_FUNCTION,
errors.MISSING_SEMICOLON): errors.MISSING_SEMICOLON):
semicolon_token = Token(';', Type.SEMICOLON, token.line, semicolon_token = Token(';', Type.SEMICOLON, token.line,
@ -149,9 +176,9 @@ class ErrorFixer(errorhandler.ErrorHandler):
elif code == errors.MISSING_LINE: elif code == errors.MISSING_LINE:
if error.position.IsAtBeginning(): if error.position.IsAtBeginning():
tokenutil.InsertLineAfter(token.previous) tokenutil.InsertBlankLineAfter(token.previous)
else: else:
tokenutil.InsertLineAfter(token) tokenutil.InsertBlankLineAfter(token)
self._AddFix(token) self._AddFix(token)
elif code == errors.EXTRA_LINE: elif code == errors.EXTRA_LINE:
@ -167,7 +194,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
should_delete = False should_delete = False
if num_lines < 0: if num_lines < 0:
num_lines = num_lines * -1 num_lines *= -1
should_delete = True should_delete = True
for i in xrange(1, num_lines + 1): for i in xrange(1, num_lines + 1):
@ -175,16 +202,17 @@ class ErrorFixer(errorhandler.ErrorHandler):
# TODO(user): DeleteToken should update line numbers. # TODO(user): DeleteToken should update line numbers.
tokenutil.DeleteToken(token.previous) tokenutil.DeleteToken(token.previous)
else: else:
tokenutil.InsertLineAfter(token.previous) tokenutil.InsertBlankLineAfter(token.previous)
self._AddFix(token) self._AddFix(token)
elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING: elif code == errors.UNNECESSARY_DOUBLE_QUOTED_STRING:
end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END) end_quote = tokenutil.Search(token, Type.DOUBLE_QUOTE_STRING_END)
if end_quote: if end_quote:
single_quote_start = Token("'", Type.SINGLE_QUOTE_STRING_START, single_quote_start = Token(
token.line, token.line_number) "'", Type.SINGLE_QUOTE_STRING_START, token.line, token.line_number)
single_quote_end = Token("'", Type.SINGLE_QUOTE_STRING_START, single_quote_end = Token(
end_quote.line, token.line_number) "'", Type.SINGLE_QUOTE_STRING_START, end_quote.line,
token.line_number)
tokenutil.InsertTokenAfter(single_quote_start, token) tokenutil.InsertTokenAfter(single_quote_start, token)
tokenutil.InsertTokenAfter(single_quote_end, end_quote) tokenutil.InsertTokenAfter(single_quote_end, end_quote)
@ -197,15 +225,15 @@ class ErrorFixer(errorhandler.ErrorHandler):
start_token = token.attached_object.type_start_token start_token = token.attached_object.type_start_token
if start_token.type != Type.DOC_START_BRACE: if start_token.type != Type.DOC_START_BRACE:
leading_space = (len(start_token.string) - leading_space = (
len(start_token.string.lstrip())) len(start_token.string) - len(start_token.string.lstrip()))
if leading_space: if leading_space:
start_token = tokenutil.SplitToken(start_token, leading_space) start_token = tokenutil.SplitToken(start_token, leading_space)
# Fix case where start and end token were the same. # Fix case where start and end token were the same.
if token.attached_object.type_end_token == start_token.previous: if token.attached_object.type_end_token == start_token.previous:
token.attached_object.type_end_token = start_token token.attached_object.type_end_token = start_token
new_token = Token("{", Type.DOC_START_BRACE, start_token.line, new_token = Token('{', Type.DOC_START_BRACE, start_token.line,
start_token.line_number) start_token.line_number)
tokenutil.InsertTokenAfter(new_token, start_token.previous) tokenutil.InsertTokenAfter(new_token, start_token.previous)
token.attached_object.type_start_token = new_token token.attached_object.type_start_token = new_token
@ -217,7 +245,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
# FLAG_ENDING_TYPE token, if there wasn't a starting brace then # FLAG_ENDING_TYPE token, if there wasn't a starting brace then
# the end token is the last token of the actual type. # the end token is the last token of the actual type.
last_type = end_token last_type = end_token
if not len(fixed_tokens): if not fixed_tokens:
last_type = end_token.previous last_type = end_token.previous
while last_type.string.isspace(): while last_type.string.isspace():
@ -233,7 +261,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.SplitToken(last_type, tokenutil.SplitToken(last_type,
len(last_type.string) - trailing_space) len(last_type.string) - trailing_space)
new_token = Token("}", Type.DOC_END_BRACE, last_type.line, new_token = Token('}', Type.DOC_END_BRACE, last_type.line,
last_type.line_number) last_type.line_number)
tokenutil.InsertTokenAfter(new_token, last_type) tokenutil.InsertTokenAfter(new_token, last_type)
token.attached_object.type_end_token = new_token token.attached_object.type_end_token = new_token
@ -241,21 +269,19 @@ class ErrorFixer(errorhandler.ErrorHandler):
self._AddFix(fixed_tokens) self._AddFix(fixed_tokens)
elif code in (errors.GOOG_REQUIRES_NOT_ALPHABETIZED, elif code == errors.GOOG_REQUIRES_NOT_ALPHABETIZED:
errors.GOOG_PROVIDES_NOT_ALPHABETIZED): require_start_token = error.fix_data
tokens = error.fix_data sorter = requireprovidesorter.RequireProvideSorter()
strings = map(lambda x: x.string, tokens) sorter.FixRequires(require_start_token)
sorted_strings = sorted(strings)
index = 0 self._AddFix(require_start_token)
changed_tokens = []
for token in tokens: elif code == errors.GOOG_PROVIDES_NOT_ALPHABETIZED:
if token.string != sorted_strings[index]: provide_start_token = error.fix_data
token.string = sorted_strings[index] sorter = requireprovidesorter.RequireProvideSorter()
changed_tokens.append(token) sorter.FixProvides(provide_start_token)
index += 1
self._AddFix(changed_tokens) self._AddFix(provide_start_token)
elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC: elif code == errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC:
if token.previous.string == '{' and token.next.string == '}': if token.previous.string == '{' and token.next.string == '}':
@ -263,13 +289,23 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.DeleteToken(token.next) tokenutil.DeleteToken(token.next)
self._AddFix([token]) self._AddFix([token])
elif code == errors.INVALID_AUTHOR_TAG_DESCRIPTION:
match = INVERTED_AUTHOR_SPEC.match(token.string)
if match:
token.string = '%s%s%s(%s)%s' % (match.group('leading_whitespace'),
match.group('email'),
match.group('whitespace_after_name'),
match.group('name'),
match.group('trailing_characters'))
self._AddFix(token)
elif (code == errors.WRONG_INDENTATION and elif (code == errors.WRONG_INDENTATION and
not FLAGS.disable_indentation_fixing): not FLAGS.disable_indentation_fixing):
token = tokenutil.GetFirstTokenInSameLine(token) token = tokenutil.GetFirstTokenInSameLine(token)
actual = error.position.start actual = error.position.start
expected = error.position.length expected = error.position.length
if token.type in (Type.WHITESPACE, Type.PARAMETERS): if token.type in (Type.WHITESPACE, Type.PARAMETERS) and actual != 0:
token.string = token.string.lstrip() + (' ' * expected) token.string = token.string.lstrip() + (' ' * expected)
self._AddFix([token]) self._AddFix([token])
else: else:
@ -282,26 +318,102 @@ class ErrorFixer(errorhandler.ErrorHandler):
tokenutil.InsertTokenAfter(new_token, token.previous) tokenutil.InsertTokenAfter(new_token, token.previous)
self._AddFix([token]) self._AddFix([token])
elif code == errors.EXTRA_GOOG_REQUIRE: elif code in [errors.MALFORMED_END_OF_SCOPE_COMMENT,
fixed_tokens = [] errors.MISSING_END_OF_SCOPE_COMMENT]:
while token: # Only fix cases where }); is found with no trailing content on the line
if token.type == Type.IDENTIFIER: # other than a comment. Value of 'token' is set to } for this error.
if token.string not in ['goog.require', 'goog.provide']: if (token.type == Type.END_BLOCK and
# Stop iterating over tokens once we're out of the requires and token.next.type == Type.END_PAREN and
# provides. token.next.next.type == Type.SEMICOLON):
break current_token = token.next.next.next
if token.string == 'goog.require': removed_tokens = []
# Text of form: goog.require('required'), skipping past open paren while current_token and current_token.line_number == token.line_number:
# and open quote to the string text. if current_token.IsAnyType(Type.WHITESPACE,
required = token.next.next.next.string Type.START_SINGLE_LINE_COMMENT,
if required in error.fix_data: Type.COMMENT):
fixed_tokens.append(token) removed_tokens.append(current_token)
# Want to delete: goog.require + open paren + open single-quote + current_token = current_token.next
# text + close single-quote + close paren + semi-colon = 7. else:
tokenutil.DeleteTokens(token, 7) return
token = token.next
self._AddFix(fixed_tokens) if removed_tokens:
tokenutil.DeleteTokens(removed_tokens[0], len(removed_tokens))
whitespace_token = Token(' ', Type.WHITESPACE, token.line,
token.line_number)
start_comment_token = Token('//', Type.START_SINGLE_LINE_COMMENT,
token.line, token.line_number)
comment_token = Token(' goog.scope', Type.COMMENT, token.line,
token.line_number)
insertion_tokens = [whitespace_token, start_comment_token,
comment_token]
tokenutil.InsertTokensAfter(insertion_tokens, token.next.next)
self._AddFix(removed_tokens + insertion_tokens)
elif code in [errors.EXTRA_GOOG_PROVIDE, errors.EXTRA_GOOG_REQUIRE]:
tokens_in_line = tokenutil.GetAllTokensInSameLine(token)
tokenutil.DeleteTokens(tokens_in_line[0], len(tokens_in_line))
self._AddFix(tokens_in_line)
elif code in [errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_REQUIRE]:
is_provide = code == errors.MISSING_GOOG_PROVIDE
is_require = code == errors.MISSING_GOOG_REQUIRE
missing_namespaces = error.fix_data[0]
need_blank_line = error.fix_data[1]
if need_blank_line is None:
# TODO(user): This happens when there are no existing
# goog.provide or goog.require statements to position new statements
# relative to. Consider handling this case with a heuristic.
return
insert_location = token.previous
# If inserting a missing require with no existing requires, insert a
# blank line first.
if need_blank_line and is_require:
tokenutil.InsertBlankLineAfter(insert_location)
insert_location = insert_location.next
for missing_namespace in missing_namespaces:
new_tokens = self._GetNewRequireOrProvideTokens(
is_provide, missing_namespace, insert_location.line_number + 1)
tokenutil.InsertLineAfter(insert_location, new_tokens)
insert_location = new_tokens[-1]
self._AddFix(new_tokens)
# If inserting a missing provide with no existing provides, insert a
# blank line after.
if need_blank_line and is_provide:
tokenutil.InsertBlankLineAfter(insert_location)
def _GetNewRequireOrProvideTokens(self, is_provide, namespace, line_number):
"""Returns a list of tokens to create a goog.require/provide statement.
Args:
is_provide: True if getting tokens for a provide, False for require.
namespace: The required or provided namespaces to get tokens for.
line_number: The line number the new require or provide statement will be
on.
Returns:
Tokens to create a new goog.require or goog.provide statement.
"""
string = 'goog.require'
if is_provide:
string = 'goog.provide'
line_text = string + '(\'' + namespace + '\');\n'
return [
Token(string, Type.IDENTIFIER, line_text, line_number),
Token('(', Type.START_PAREN, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_START, line_text, line_number),
Token(namespace, Type.STRING_TEXT, line_text, line_number),
Token('\'', Type.SINGLE_QUOTE_STRING_END, line_text, line_number),
Token(')', Type.END_PAREN, line_text, line_number),
Token(';', Type.SEMICOLON, line_text, line_number)
]
def FinishFile(self): def FinishFile(self):
"""Called when the current file has finished style checking. """Called when the current file has finished style checking.
@ -311,7 +423,7 @@ class ErrorFixer(errorhandler.ErrorHandler):
if self._file_fix_count: if self._file_fix_count:
f = self._external_file f = self._external_file
if not f: if not f:
print "Fixed %d errors in %s" % (self._file_fix_count, self._file_name) print 'Fixed %d errors in %s' % (self._file_fix_count, self._file_name)
f = open(self._file_name, 'w') f = open(self._file_name, 'w')
token = self._file_token token = self._file_token
@ -323,11 +435,10 @@ class ErrorFixer(errorhandler.ErrorHandler):
if token.IsLastInLine(): if token.IsLastInLine():
f.write('\n') f.write('\n')
if char_count > 80 and token.line_number in self._file_changed_lines: if char_count > 80 and token.line_number in self._file_changed_lines:
print "WARNING: Line %d of %s is now longer than 80 characters." % ( print 'WARNING: Line %d of %s is now longer than 80 characters.' % (
token.line_number, self._file_name) token.line_number, self._file_name)
char_count = 0 char_count = 0
self._file_changed_lines
token = token.next token = token.next

65
tools/closure_linter/closure_linter/errorrecord.py

@ -0,0 +1,65 @@
#!/usr/bin/env python
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple, pickle-serializable class to represent a lint error."""
import gflags as flags
from closure_linter import errors
from closure_linter.common import erroroutput
FLAGS = flags.FLAGS
class ErrorRecord(object):
"""Record-keeping struct that can be serialized back from a process.
Attributes:
path: Path to the file.
error_string: Error string for the user.
new_error: Whether this is a "new error" (see errors.NEW_ERRORS).
"""
def __init__(self, path, error_string, new_error):
self.path = path
self.error_string = error_string
self.new_error = new_error
def MakeErrorRecord(path, error):
"""Make an error record with correctly formatted error string.
Errors are not able to be serialized (pickled) over processes because of
their pointers to the complex token/context graph. We use an intermediary
serializable class to pass back just the relevant information.
Args:
path: Path of file the error was found in.
error: An error.Error instance.
Returns:
_ErrorRecord instance.
"""
new_error = error.code in errors.NEW_ERRORS
if FLAGS.unix_mode:
error_string = erroroutput.GetUnixErrorOutput(path, error, new_error)
else:
error_string = erroroutput.GetErrorOutput(error, new_error)
return ErrorRecord(path, error_string, new_error)

17
tools/closure_linter/closure_linter/errors.py

@ -19,6 +19,7 @@
__author__ = ('robbyw@google.com (Robert Walker)', __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)') 'ajp@google.com (Andy Perelson)')
def ByName(name): def ByName(name):
"""Get the error code for the given error name. """Get the error code for the given error name.
@ -57,6 +58,7 @@ LINE_STARTS_WITH_OPERATOR = 120
COMMA_AT_END_OF_LITERAL = 121 COMMA_AT_END_OF_LITERAL = 121
MULTI_LINE_STRING = 130 MULTI_LINE_STRING = 130
UNNECESSARY_DOUBLE_QUOTED_STRING = 131 UNNECESSARY_DOUBLE_QUOTED_STRING = 131
UNUSED_PRIVATE_MEMBER = 132
# Requires, provides # Requires, provides
GOOG_REQUIRES_NOT_ALPHABETIZED = 140 GOOG_REQUIRES_NOT_ALPHABETIZED = 140
@ -64,6 +66,7 @@ GOOG_PROVIDES_NOT_ALPHABETIZED = 141
MISSING_GOOG_REQUIRE = 142 MISSING_GOOG_REQUIRE = 142
MISSING_GOOG_PROVIDE = 143 MISSING_GOOG_PROVIDE = 143
EXTRA_GOOG_REQUIRE = 144 EXTRA_GOOG_REQUIRE = 144
EXTRA_GOOG_PROVIDE = 145
# JsDoc # JsDoc
INVALID_JSDOC_TAG = 200 INVALID_JSDOC_TAG = 200
@ -89,6 +92,8 @@ UNNECESSARY_BRACES_AROUND_INHERIT_DOC = 226
INVALID_AUTHOR_TAG_DESCRIPTION = 227 INVALID_AUTHOR_TAG_DESCRIPTION = 227
JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230 JSDOC_PREFER_QUESTION_TO_PIPE_NULL = 230
JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231 JSDOC_ILLEGAL_QUESTION_WITH_PIPE = 231
JSDOC_MISSING_OPTIONAL_TYPE = 232
JSDOC_MISSING_OPTIONAL_PREFIX = 233
JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240 JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER = 240
# TODO(robbyw): Split this in to more specific syntax problems. # TODO(robbyw): Split this in to more specific syntax problems.
INCORRECT_SUPPRESS_SYNTAX = 250 INCORRECT_SUPPRESS_SYNTAX = 250
@ -103,6 +108,10 @@ FILE_IN_BLOCK = 301
INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400 INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS = 400
INTERFACE_METHOD_CANNOT_HAVE_CODE = 401 INTERFACE_METHOD_CANNOT_HAVE_CODE = 401
# Comments
MISSING_END_OF_SCOPE_COMMENT = 500
MALFORMED_END_OF_SCOPE_COMMENT = 501
# ActionScript specific errors: # ActionScript specific errors:
# TODO(user): move these errors to their own file and move all JavaScript # TODO(user): move these errors to their own file and move all JavaScript
# specific errors to their own file as well. # specific errors to their own file as well.
@ -125,7 +134,9 @@ NEW_ERRORS = frozenset([
# Errors added after 2.0.2: # Errors added after 2.0.2:
WRONG_INDENTATION, WRONG_INDENTATION,
MISSING_SEMICOLON, MISSING_SEMICOLON,
# Errors added after 2.2.5: # Errors added after 2.3.4:
WRONG_BLANK_LINE_COUNT, MISSING_END_OF_SCOPE_COMMENT,
EXTRA_GOOG_REQUIRE, MALFORMED_END_OF_SCOPE_COMMENT,
UNUSED_PRIVATE_MEMBER,
# Errors added after 2.3.5:
]) ])

12
tools/closure_linter/closure_linter/fixjsstyle.py

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
# #
# Copyright 2007 The Closure Linter Authors. All Rights Reserved. # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
# #
@ -25,6 +26,11 @@ from closure_linter import checker
from closure_linter import error_fixer from closure_linter import error_fixer
from closure_linter.common import simplefileflags as fileflags from closure_linter.common import simplefileflags as fileflags
FLAGS = flags.FLAGS
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
def main(argv = None): def main(argv = None):
"""Main function. """Main function.
@ -35,7 +41,11 @@ def main(argv = None):
if argv is None: if argv is None:
argv = flags.FLAGS(sys.argv) argv = flags.FLAGS(sys.argv)
files = fileflags.GetFileList(argv, 'JavaScript', ['.js']) suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
files = fileflags.GetFileList(argv, 'JavaScript', suffixes)
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer()) style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())

199
tools/closure_linter/closure_linter/fixjsstyle_test.py

@ -31,18 +31,34 @@ flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js') flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy') flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase): class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing.""" """Test case to for gjslint auto-fixing."""
def testFixJsStyle(self): def testFixJsStyle(self):
test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js']]
for [running_input_file, running_output_file] in test_cases:
input_filename = None input_filename = None
golden_filename = None
current_filename = None
try: try:
input_filename = '%s/fixjsstyle.in.js' % (_RESOURCE_PREFIX) input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
golden_filename = '%s/fixjsstyle.out.js' % (_RESOURCE_PREFIX) golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError, ex: except IOError, ex:
raise IOError('Could not find testdata resource for %s: %s' % raise IOError('Could not find testdata resource for %s: %s' %
(self._filename, ex)) (current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file. # Autofix the file, sending output to a fake file.
actual = StringIO.StringIO() actual = StringIO.StringIO()
@ -56,6 +72,183 @@ class FixJsStyleTest(googletest.TestCase):
self.assertEqual(actual.readlines(), expected.readlines()) self.assertEqual(actual.readlines(), expected.readlines())
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
"goog.require('dummy.aa');",
"goog.require('dummy.Cc');",
"goog.require('dummy.Dd');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
expected = [
"goog.require('dummy.Bb');",
"goog.require('dummy.Cc');",
"goog.require('dummy.aa');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
self._AssertFixes(original, expected)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
"goog.provide('dummy.aa');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.Dd');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
expected = [
"goog.provide('dummy.Bb');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.aa');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
"goog.provide('dummy.Something');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected):
"""Asserts that the error fixer corrects original to expected."""
original = self._GetHeader() + original
expected = self._GetHeader() + expected
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.CheckLines('testing.js', original, False)
actual.seek(0)
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
"// Copyright 2011 Google Inc. All Rights Reserved.",
"",
"/**",
" * @fileoverview Fake file overview.",
" * @author fake@google.com (Fake Person)",
" */",
""
]
if __name__ == '__main__': if __name__ == '__main__':
googletest.main() googletest.main()

17
tools/closure_linter/closure_linter/full_test.py

@ -33,6 +33,7 @@ import unittest as googletest
from closure_linter import checker from closure_linter import checker
from closure_linter import errors from closure_linter import errors
from closure_linter import error_check
from closure_linter.common import filetestcase from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata' _RESOURCE_PREFIX = 'closure_linter/testdata'
@ -40,36 +41,50 @@ _RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True flags.FLAGS.strict = True
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy') flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js') flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
flags.FLAGS.jslint_error = error_check.Rule.ALL
# List of files under testdata to test. # List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories. # We need to list files explicitly since pyglib can't list directories.
# TODO(user): Figure out how to list the directory.
_TEST_FILES = [ _TEST_FILES = [
'all_js_wrapped.js', 'all_js_wrapped.js',
'blank_lines.js', 'blank_lines.js',
'ends_with_block.js', 'ends_with_block.js',
'externs.js', 'externs.js',
'externs_jsdoc.js',
'goog_scope.js',
'html_parse_error.html', 'html_parse_error.html',
'indentation.js', 'indentation.js',
'interface.js', 'interface.js',
'jsdoc.js', 'jsdoc.js',
'limited_doc_checks.js',
'minimal.js', 'minimal.js',
'other.js', 'other.js',
'provide_blank.js',
'provide_extra.js',
'provide_missing.js',
'require_all_caps.js', 'require_all_caps.js',
'require_blank.js',
'require_extra.js', 'require_extra.js',
'require_function.js', 'require_function.js',
'require_function_missing.js', 'require_function_missing.js',
'require_function_through_both.js', 'require_function_through_both.js',
'require_function_through_namespace.js', 'require_function_through_namespace.js',
'require_interface.js', 'require_interface.js',
'require_interface_base.js',
'require_lower_case.js', 'require_lower_case.js',
'require_missing.js',
'require_numeric.js', 'require_numeric.js',
'require_provide_blank.js',
'require_provide_ok.js', 'require_provide_ok.js',
'require_provide_missing.js', 'require_provide_missing.js',
'simple.html', 'simple.html',
'spaces.js', 'spaces.js',
'tokenizer.js', 'tokenizer.js',
'unparseable.js', 'unparseable.js',
'unused_private_members.js',
'utf8.html' 'utf8.html'
] ]

185
tools/closure_linter/closure_linter/gjslint.py

@ -1,4 +1,5 @@
#!/usr/bin/env python #!/usr/bin/env python
# python2.6 for command-line runs using p4lib. pylint: disable-msg=C6301
# #
# Copyright 2007 The Closure Linter Authors. All Rights Reserved. # Copyright 2007 The Closure Linter Authors. All Rights Reserved.
# #
@ -35,15 +36,24 @@ is in tokenizer.py and checker.py.
__author__ = ('robbyw@google.com (Robert Walker)', __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)') 'ajp@google.com (Andy Perelson)')
import functools
import itertools
import sys import sys
import time import time
import gflags as flags
from closure_linter import checker from closure_linter import checker
from closure_linter import errors from closure_linter import errorrecord
from closure_linter.common import errorprinter from closure_linter.common import erroraccumulator
from closure_linter.common import simplefileflags as fileflags from closure_linter.common import simplefileflags as fileflags
import gflags as flags
# Attempt import of multiprocessing (should be available in Python 2.6 and up).
try:
# pylint: disable-msg=C6204
import multiprocessing
except ImportError:
multiprocessing = None
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
flags.DEFINE_boolean('unix_mode', False, flags.DEFINE_boolean('unix_mode', False,
@ -54,12 +64,144 @@ flags.DEFINE_boolean('check_html', False,
'Whether to check javascript in html files.') 'Whether to check javascript in html files.')
flags.DEFINE_boolean('summary', False, flags.DEFINE_boolean('summary', False,
'Whether to show an error count summary.') 'Whether to show an error count summary.')
flags.DEFINE_list('additional_extensions', None, 'List of additional file '
'extensions (not js) that should be treated as '
'JavaScript files.')
flags.DEFINE_boolean('multiprocess', False,
'Whether to parallalize linting using the '
'multiprocessing module. Disabled by default.')
GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time', GJSLINT_ONLY_FLAGS = ['--unix_mode', '--beep', '--nobeep', '--time',
'--check_html', '--summary'] '--check_html', '--summary']
def FormatTime(t): def _MultiprocessCheckPaths(paths):
"""Run _CheckPath over mutltiple processes.
Tokenization, passes, and checks are expensive operations. Running in a
single process, they can only run on one CPU/core. Instead,
shard out linting over all CPUs with multiprocessing to parallelize.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
pool = multiprocessing.Pool()
for results in pool.imap(_CheckPath, paths):
for record in results:
yield record
pool.close()
pool.join()
def _CheckPaths(paths):
"""Run _CheckPath on all paths in one thread.
Args:
paths: paths to check.
Yields:
errorrecord.ErrorRecords for any found errors.
"""
for path in paths:
results = _CheckPath(path)
for record in results:
yield record
def _CheckPath(path):
"""Check a path and return any errors.
Args:
path: paths to check.
Returns:
A list of errorrecord.ErrorRecords for any found errors.
"""
error_accumulator = erroraccumulator.ErrorAccumulator()
style_checker = checker.JavaScriptStyleChecker(error_accumulator)
style_checker.Check(path)
# Return any errors as error records.
make_error_record = functools.partial(errorrecord.MakeErrorRecord, path)
return map(make_error_record, error_accumulator.GetErrors())
def _GetFilePaths(argv):
suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html:
suffixes += ['.html', '.htm']
return fileflags.GetFileList(argv, 'JavaScript', suffixes)
# Error printing functions
def _PrintFileSummary(paths, records):
"""Print a detailed summary of the number of errors in each file."""
paths = list(paths)
paths.sort()
for path in paths:
path_errors = [e for e in records if e.path == path]
print '%s: %d' % (path, len(path_errors))
def _PrintFileSeparator(path):
print '----- FILE : %s -----' % path
def _PrintSummary(paths, error_records):
"""Print a summary of the number of errors and files."""
error_count = len(error_records)
all_paths = set(paths)
all_paths_count = len(all_paths)
if error_count is 0:
print '%d files checked, no errors found.' % all_paths_count
new_error_count = len([e for e in error_records if e.new_error])
error_paths = set([e.path for e in error_records])
error_paths_count = len(error_paths)
no_error_paths_count = all_paths_count - error_paths_count
if error_count or new_error_count:
print ('Found %d errors, including %d new errors, in %d files '
'(%d files OK).' % (
error_count,
new_error_count,
error_paths_count,
no_error_paths_count))
def _PrintErrorRecords(error_records):
"""Print error records strings in the expected format."""
current_path = None
for record in error_records:
if current_path != record.path:
current_path = record.path
if not FLAGS.unix_mode:
_PrintFileSeparator(current_path)
print record.error_string
def _FormatTime(t):
"""Formats a duration as a human-readable string. """Formats a duration as a human-readable string.
Args: Args:
@ -87,28 +229,36 @@ def main(argv = None):
start_time = time.time() start_time = time.time()
suffixes = ['.js'] suffixes = ['.js']
if FLAGS.additional_extensions:
suffixes += ['.%s' % ext for ext in FLAGS.additional_extensions]
if FLAGS.check_html: if FLAGS.check_html:
suffixes += ['.html', '.htm'] suffixes += ['.html', '.htm']
files = fileflags.GetFileList(argv, 'JavaScript', suffixes) paths = fileflags.GetFileList(argv, 'JavaScript', suffixes)
error_handler = None if FLAGS.multiprocess:
if FLAGS.unix_mode: records_iter = _MultiprocessCheckPaths(paths)
error_handler = errorprinter.ErrorPrinter(errors.NEW_ERRORS) else:
error_handler.SetFormat(errorprinter.UNIX_FORMAT) records_iter = _CheckPaths(paths)
runner = checker.GJsLintRunner() records_iter, records_iter_copy = itertools.tee(records_iter, 2)
result = runner.Run(files, error_handler) _PrintErrorRecords(records_iter_copy)
result.PrintSummary()
error_records = list(records_iter)
_PrintSummary(paths, error_records)
exit_code = 0 exit_code = 0
if result.HasOldErrors():
# If there are any errors
if error_records:
exit_code += 1 exit_code += 1
if result.HasNewErrors():
# If there are any new errors
if [r for r in error_records if r.new_error]:
exit_code += 2 exit_code += 2
if exit_code: if exit_code:
if FLAGS.summary: if FLAGS.summary:
result.PrintFileSummary() _PrintFileSummary(paths, error_records)
if FLAGS.beep: if FLAGS.beep:
# Make a beep noise. # Make a beep noise.
@ -129,11 +279,10 @@ Some of the errors reported by GJsLint may be auto-fixable using the script
fixjsstyle. Please double check any changes it makes and report any bugs. The fixjsstyle. Please double check any changes it makes and report any bugs. The
script can be run by executing: script can be run by executing:
fixjsstyle %s fixjsstyle %s """ % ' '.join(fix_args)
""" % ' '.join(fix_args)
if FLAGS.time: if FLAGS.time:
print 'Done in %s.' % FormatTime(time.time() - start_time) print 'Done in %s.' % _FormatTime(time.time() - start_time)
sys.exit(exit_code) sys.exit(exit_code)

48
tools/closure_linter/closure_linter/indentation.py

@ -152,7 +152,31 @@ class IndentationRules(object):
self._PopTo(Type.START_BRACKET) self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK: elif token_type == Type.END_BLOCK:
self._PopTo(Type.START_BLOCK) start_token = self._PopTo(Type.START_BLOCK)
# Check for required goog.scope comment.
if start_token:
goog_scope = self._GoogScopeOrNone(start_token.token)
if goog_scope is not None:
if not token.line.endswith('; // goog.scope\n'):
if (token.line.find('//') > -1 and
token.line.find('goog.scope') >
token.line.find('//')):
indentation_errors.append([
errors.MALFORMED_END_OF_SCOPE_COMMENT,
('Malformed end of goog.scope comment. Please use the '
'exact following syntax to close the scope:\n'
'}); // goog.scope'),
token,
Position(token.start_index, token.length)])
else:
indentation_errors.append([
errors.MISSING_END_OF_SCOPE_COMMENT,
('Missing comment for end of goog.scope which opened at line '
'%d. End the scope with:\n'
'}); // goog.scope' %
(start_token.line_number)),
token,
Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'): elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK)) self._Add(self._PopTo(Type.START_BLOCK))
@ -423,6 +447,27 @@ class IndentationRules(object):
if token.type not in Type.NON_CODE_TYPES: if token.type not in Type.NON_CODE_TYPES:
return False return False
def _GoogScopeOrNone(self, token):
"""Determines if the given START_BLOCK is part of a goog.scope statement.
Args:
token: A token of type START_BLOCK.
Returns:
The goog.scope function call token, or None if such call doesn't exist.
"""
# Search for a goog.scope statement, which will be 5 tokens before the
# block. Illustration of the tokens found prior to the start block:
# goog.scope(function() {
# 5 4 3 21 ^
maybe_goog_scope = token
for unused_i in xrange(5):
maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
maybe_goog_scope.previous else None)
if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
return maybe_goog_scope
def _Add(self, token_info): def _Add(self, token_info):
"""Adds the given token info to the stack. """Adds the given token info to the stack.
@ -434,6 +479,7 @@ class IndentationRules(object):
return return
if token_info.is_block or token_info.token.type == Type.START_PAREN: if token_info.is_block or token_info.token.type == Type.START_PAREN:
token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
index = 1 index = 1
while index <= len(self._stack): while index <= len(self._stack):
stack_info = self._stack[-index] stack_info = self._stack[-index]

409
tools/closure_linter/closure_linter/javascriptlintrules.py

@ -1,6 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
# #
# Copyright 2008 The Closure Linter Authors. All Rights Reserved. # Copyright 2011 The Closure Linter Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -24,32 +24,36 @@ __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)', 'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)') 'jacobr@google.com (Jacob Richman)')
import gflags as flags import re
from sets import Set
from closure_linter import ecmalintrules from closure_linter import ecmalintrules
from closure_linter import error_check
from closure_linter import errors from closure_linter import errors
from closure_linter import javascripttokenizer from closure_linter import javascripttokenizer
from closure_linter import javascripttokens from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
from closure_linter import tokenutil from closure_linter import tokenutil
from closure_linter.common import error from closure_linter.common import error
from closure_linter.common import position from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
# Shorthand # Shorthand
Error = error.Error Error = error.Error
Position = position.Position Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType Type = javascripttokens.JavaScriptTokenType
class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules): class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
"""JavaScript lint rules that catch JavaScript specific style errors.""" """JavaScript lint rules that catch JavaScript specific style errors."""
def __init__(self, namespaces_info):
"""Initializes a JavaScriptLintRules instance."""
ecmalintrules.EcmaScriptLintRules.__init__(self)
self._namespaces_info = namespaces_info
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
def HandleMissingParameterDoc(self, token, param_name): def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a param tag.""" """Handle errors associated with a parameter missing a param tag."""
self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION, self._HandleError(errors.MISSING_PARAMETER_DOCUMENTATION,
@ -60,14 +64,17 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
Args: Args:
token: The token being checked token: The token being checked
Returns:
True if the token contains a record type, False otherwise.
""" """
# If we see more than one left-brace in the string of an annotation token, # If we see more than one left-brace in the string of an annotation token,
# then there's a record type in there. # then there's a record type in there.
return (token and token.type == Type.DOC_FLAG and return (
token and token.type == Type.DOC_FLAG and
token.attached_object.type is not None and token.attached_object.type is not None and
token.attached_object.type.find('{') != token.string.rfind('{')) token.attached_object.type.find('{') != token.string.rfind('{'))
def CheckToken(self, token, state): def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors. """Checks a token, given the current parser_state, for warnings and errors.
@ -85,23 +92,68 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
super(JavaScriptLintRules, self).CheckToken(token, state) super(JavaScriptLintRules, self).CheckToken(token, state)
# Store some convenience variables # Store some convenience variables
first_in_line = token.IsFirstInLine() namespaces_info = self._namespaces_info
last_in_line = token.IsLastInLine()
type = token.type
if type == Type.DOC_FLAG: if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Find all assignments to private members.
if token.type == Type.SIMPLE_LVALUE:
identifier = token.string
if identifier.endswith('_') and not identifier.endswith('__'):
doc_comment = state.GetDocComment()
suppressed = (doc_comment and doc_comment.HasFlag('suppress') and
doc_comment.GetFlag('suppress').type == 'underscore')
if not suppressed:
# Look for static members defined on a provided namespace.
namespace = namespaces_info.GetClosurizedNamespace(identifier)
provided_namespaces = namespaces_info.GetProvidedNamespaces()
# Skip cases of this.something_.somethingElse_.
regex = re.compile('^this\.[a-zA-Z_]+$')
if namespace in provided_namespaces or regex.match(identifier):
variable = identifier.split('.')[-1]
self._declared_private_member_tokens[variable] = token
self._declared_private_members.add(variable)
elif not identifier.endswith('__'):
# Consider setting public members of private members to be a usage.
for piece in identifier.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
# Find all usages of private members.
if token.type == Type.IDENTIFIER:
for piece in token.string.split('.'):
if piece.endswith('_'):
self._used_private_members.add(piece)
if token.type == Type.DOC_FLAG:
flag = token.attached_object flag = token.attached_object
if flag.flag_type == 'param' and flag.name_token is not None: if flag.flag_type == 'param' and flag.name_token is not None:
self._CheckForMissingSpaceBeforeToken( self._CheckForMissingSpaceBeforeToken(
token.attached_object.name_token) token.attached_object.name_token)
if (error_check.ShouldCheck(Rule.OPTIONAL_TYPE_MARKER) and
flag.type is not None and flag.name is not None):
# Check for optional marker in type.
if (flag.type.endswith('=') and
not flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_PREFIX,
'Optional parameter name %s must be prefixed '
'with opt_.' % flag.name,
token)
elif (not flag.type.endswith('=') and
flag.name.startswith('opt_')):
self._HandleError(errors.JSDOC_MISSING_OPTIONAL_TYPE,
'Optional parameter %s type must end with =.' %
flag.name,
token)
if flag.flag_type in state.GetDocFlag().HAS_TYPE: if flag.flag_type in state.GetDocFlag().HAS_TYPE:
# Check for both missing type token and empty type braces '{}' # Check for both missing type token and empty type braces '{}'
# Missing suppress types are reported separately and we allow enums # Missing suppress types are reported separately and we allow enums
# without types. # without types.
if (flag.flag_type not in ('suppress', 'enum') and if (flag.flag_type not in ('suppress', 'enum') and
(flag.type == None or flag.type == '' or flag.type.isspace())): (not flag.type or flag.type.isspace())):
self._HandleError(errors.MISSING_JSDOC_TAG_TYPE, self._HandleError(errors.MISSING_JSDOC_TAG_TYPE,
'Missing type in %s tag' % token.string, token) 'Missing type in %s tag' % token.string, token)
@ -112,13 +164,13 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'Type should be immediately after %s tag' % token.string, 'Type should be immediately after %s tag' % token.string,
token) token)
elif type == Type.DOUBLE_QUOTE_STRING_START: elif token.type == Type.DOUBLE_QUOTE_STRING_START:
next = token.next next_token = token.next
while next.type == Type.STRING_TEXT: while next_token.type == Type.STRING_TEXT:
if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search( if javascripttokenizer.JavaScriptTokenizer.SINGLE_QUOTE.search(
next.string): next_token.string):
break break
next = next.next next_token = next_token.next
else: else:
self._HandleError( self._HandleError(
errors.UNNECESSARY_DOUBLE_QUOTED_STRING, errors.UNNECESSARY_DOUBLE_QUOTED_STRING,
@ -126,13 +178,20 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
token, token,
Position.All(token.string)) Position.All(token.string))
elif type == Type.END_DOC_COMMENT: elif token.type == Type.END_DOC_COMMENT:
if (FLAGS.strict and not self._is_html and state.InTopLevel() and doc_comment = state.GetDocComment()
not state.InBlock()):
# When @externs appears in a @fileoverview comment, it should trigger
# the same limited doc checks as a special filename like externs.js.
if doc_comment.HasFlag('fileoverview') and doc_comment.HasFlag('externs'):
self._SetLimitedDocChecks(True)
if (error_check.ShouldCheck(Rule.BLANK_LINES_AT_TOP_LEVEL) and
not self._is_html and state.InTopLevel() and not state.InBlock()):
# Check if we're in a fileoverview or constructor JsDoc. # Check if we're in a fileoverview or constructor JsDoc.
doc_comment = state.GetDocComment() is_constructor = (
is_constructor = (doc_comment.HasFlag('constructor') or doc_comment.HasFlag('constructor') or
doc_comment.HasFlag('interface')) doc_comment.HasFlag('interface'))
is_file_overview = doc_comment.HasFlag('fileoverview') is_file_overview = doc_comment.HasFlag('fileoverview')
@ -140,9 +199,16 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
# precede some code, skip it. # precede some code, skip it.
# NOTE: The tokenutil methods are not used here because of their # NOTE: The tokenutil methods are not used here because of their
# behavior at the top of a file. # behavior at the top of a file.
next = token.next next_token = token.next
if (not next or if (not next_token or
(not is_file_overview and next.type in Type.NON_CODE_TYPES)): (not is_file_overview and next_token.type in Type.NON_CODE_TYPES)):
return
# Don't require extra blank lines around suppression of extra
# goog.require errors.
if (doc_comment.SuppressionOnly() and
next_token.type == Type.IDENTIFIER and
next_token.string in ['goog.provide', 'goog.require']):
return return
# Find the start of this block (include comments above the block, unless # Find the start of this block (include comments above the block, unless
@ -174,19 +240,20 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
error_message = 'Should have a blank line before a file overview.' error_message = 'Should have a blank line before a file overview.'
expected_blank_lines = 1 expected_blank_lines = 1
elif is_constructor and blank_lines != 3: elif is_constructor and blank_lines != 3:
error_message = ('Should have 3 blank lines before a constructor/' error_message = (
'interface.') 'Should have 3 blank lines before a constructor/interface.')
expected_blank_lines = 3 expected_blank_lines = 3
elif not is_file_overview and not is_constructor and blank_lines != 2: elif not is_file_overview and not is_constructor and blank_lines != 2:
error_message = 'Should have 2 blank lines between top-level blocks.' error_message = 'Should have 2 blank lines between top-level blocks.'
expected_blank_lines = 2 expected_blank_lines = 2
if error_message: if error_message:
self._HandleError(errors.WRONG_BLANK_LINE_COUNT, error_message, self._HandleError(
errors.WRONG_BLANK_LINE_COUNT, error_message,
block_start, Position.AtBeginning(), block_start, Position.AtBeginning(),
expected_blank_lines - blank_lines) expected_blank_lines - blank_lines)
elif type == Type.END_BLOCK: elif token.type == Type.END_BLOCK:
if state.InFunction() and state.IsFunctionClose(): if state.InFunction() and state.IsFunctionClose():
is_immediately_called = (token.next and is_immediately_called = (token.next and
token.next.type == Type.START_PAREN) token.next.type == Type.START_PAREN)
@ -203,7 +270,9 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
errors.MISSING_RETURN_DOCUMENTATION, errors.MISSING_RETURN_DOCUMENTATION,
'Missing @return JsDoc in function with non-trivial return', 'Missing @return JsDoc in function with non-trivial return',
function.doc.end_token, Position.AtBeginning()) function.doc.end_token, Position.AtBeginning())
elif (not function.has_return and function.doc and elif (not function.has_return and
not function.has_throw and
function.doc and
function.doc.HasFlag('return') and function.doc.HasFlag('return') and
not state.InInterfaceMethod()): not state.InInterfaceMethod()):
return_flag = function.doc.GetFlag('return') return_flag = function.doc.GetFlag('return')
@ -232,7 +301,7 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'constructor with @constructor)', 'constructor with @constructor)',
function.doc.end_token, Position.AtBeginning()) function.doc.end_token, Position.AtBeginning())
elif type == Type.IDENTIFIER: elif token.type == Type.IDENTIFIER:
if token.string == 'goog.inherits' and not state.InFunction(): if token.string == 'goog.inherits' and not state.InFunction():
if state.GetLastNonSpaceToken().line_number == token.line_number: if state.GetLastNonSpaceToken().line_number == token.line_number:
self._HandleError( self._HandleError(
@ -253,7 +322,71 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
# TODO(robbyw): Test the last function was a constructor. # TODO(robbyw): Test the last function was a constructor.
# TODO(robbyw): Test correct @extends and @implements documentation. # TODO(robbyw): Test correct @extends and @implements documentation.
elif type == Type.OPERATOR: elif (token.string == 'goog.provide' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# Report extra goog.provide statement.
if namespaces_info.IsExtraProvide(token):
self._HandleError(
errors.EXTRA_GOOG_PROVIDE,
'Unnecessary goog.provide: ' + namespace,
token, position=Position.AtBeginning())
if namespaces_info.IsLastProvide(token):
# Report missing provide statements after the last existing provide.
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetLastTokenInSameLine(token).next,
False)
# If there are no require statements, missing requires should be
# reported after the last provide.
if not namespaces_info.GetRequiredNamespaces():
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
True)
elif (token.string == 'goog.require' and
not state.InFunction() and
namespaces_info is not None):
namespace = tokenutil.Search(token, Type.STRING_TEXT).string
# If there are no provide statements, missing provides should be
# reported before the first require.
if (namespaces_info.IsFirstRequire(token) and
not namespaces_info.GetProvidedNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides,
tokenutil.GetFirstTokenInSameLine(token),
True)
# Report extra goog.require statement.
if namespaces_info.IsExtraRequire(token):
self._HandleError(
errors.EXTRA_GOOG_REQUIRE,
'Unnecessary goog.require: ' + namespace,
token, position=Position.AtBeginning())
# Report missing goog.require statements.
if namespaces_info.IsLastRequire(token):
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires,
tokenutil.GetLastTokenInSameLine(token).next,
False)
elif token.type == Type.OPERATOR:
last_in_line = token.IsLastInLine()
# If the token is unary and appears to be used in a unary context # If the token is unary and appears to be used in a unary context
# it's ok. Otherwise, if it's at the end of the line or immediately # it's ok. Otherwise, if it's at the end of the line or immediately
# before a comment, it's ok. # before a comment, it's ok.
@ -270,7 +403,9 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
'Missing space after "%s"' % token.string, 'Missing space after "%s"' % token.string,
token, token,
Position.AtEnd(token.string)) Position.AtEnd(token.string))
elif type == Type.WHITESPACE: elif token.type == Type.WHITESPACE:
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
# Check whitespace length if it's not the first token of the line and # Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment. # if it's not immediately before a comment.
if not last_in_line and not first_in_line and not token.next.IsComment(): if not last_in_line and not first_in_line and not token.next.IsComment():
@ -284,112 +419,120 @@ class JavaScriptLintRules(ecmalintrules.EcmaScriptLintRules):
token, token,
Position.All(token.string)) Position.All(token.string))
def Finalize(self, state, tokenizer_mode): def _ReportMissingProvides(self, missing_provides, token, need_blank_line):
"""Perform all checks that need to occur after all lines are processed.""" """Reports missing provide statements to the error handler.
# Call the base class's Finalize function.
super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
# Check for sorted requires statements.
goog_require_tokens = state.GetGoogRequireTokens()
requires = [require_token.string for require_token in goog_require_tokens]
sorted_requires = sorted(requires)
index = 0
bad = False
for item in requires:
if item != sorted_requires[index]:
bad = True
break
index += 1
if bad: Args:
self._HandleError( missing_provides: A list of strings where each string is a namespace that
errors.GOOG_REQUIRES_NOT_ALPHABETIZED, should be provided, but is not.
'goog.require classes must be alphabetized. The correct code is:\n' + token: The token where the error was detected (also where the new provides
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, will be inserted.
sorted_requires)), need_blank_line: Whether a blank line needs to be inserted after the new
goog_require_tokens[index], provides are inserted. May be True, False, or None, where None
position=Position.AtBeginning(), indicates that the insert location is unknown.
fix_data=goog_require_tokens) """
# Check for sorted provides statements.
goog_provide_tokens = state.GetGoogProvideTokens()
provides = [provide_token.string for provide_token in goog_provide_tokens]
sorted_provides = sorted(provides)
index = 0
bad = False
for item in provides:
if item != sorted_provides[index]:
bad = True
break
index += 1
if bad:
self._HandleError(
errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'goog.provide classes must be alphabetized. The correct code is:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted_provides)),
goog_provide_tokens[index],
position=Position.AtBeginning(),
fix_data=goog_provide_tokens)
if FLAGS.closurized_namespaces:
# Check that we provide everything we need.
provided_namespaces = state.GetProvidedNamespaces()
missing_provides = provided_namespaces - set(provides)
if missing_provides:
self._HandleError( self._HandleError(
errors.MISSING_GOOG_PROVIDE, errors.MISSING_GOOG_PROVIDE,
'Missing the following goog.provide statements:\n' + 'Missing the following goog.provide statements:\n' +
'\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x, '\n'.join(map(lambda x: 'goog.provide(\'%s\');' % x,
sorted(missing_provides))), sorted(missing_provides))),
state.GetFirstToken(), position=Position.AtBeginning(), token, position=Position.AtBeginning(),
fix_data=missing_provides) fix_data=(missing_provides, need_blank_line))
# Compose a set of all available namespaces. Explicitly omit goog
# because if you can call goog.require, you already have goog.
available_namespaces = (set(requires) | set(provides) | set(['goog']) |
provided_namespaces)
# Check that we require everything we need.
missing_requires = set()
for namespace_variants in state.GetUsedNamespaces():
# Namespace variants is a list of potential things to require. If we
# find we're missing one, we are lazy and choose to require the first
# in the sequence - which should be the namespace.
if not set(namespace_variants) & available_namespaces:
missing_requires.add(namespace_variants[0])
if missing_requires: def _ReportMissingRequires(self, missing_requires, token, need_blank_line):
"""Reports missing require statements to the error handler.
Args:
missing_requires: A list of strings where each string is a namespace that
should be required, but is not.
token: The token where the error was detected (also where the new requires
will be inserted.
need_blank_line: Whether a blank line needs to be inserted before the new
requires are inserted. May be True, False, or None, where None
indicates that the insert location is unknown.
"""
self._HandleError( self._HandleError(
errors.MISSING_GOOG_REQUIRE, errors.MISSING_GOOG_REQUIRE,
'Missing the following goog.require statements:\n' + 'Missing the following goog.require statements:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, '\n'.join(map(lambda x: 'goog.require(\'%s\');' % x,
sorted(missing_requires))), sorted(missing_requires))),
state.GetFirstToken(), position=Position.AtBeginning(), token, position=Position.AtBeginning(),
fix_data=missing_requires) fix_data=(missing_requires, need_blank_line))
# Check that we don't require things we don't actually use. def Finalize(self, state, tokenizer_mode):
namespace_variants = state.GetUsedNamespaces() """Perform all checks that need to occur after all lines are processed."""
used_namespaces = set() # Call the base class's Finalize function.
for a, b in namespace_variants: super(JavaScriptLintRules, self).Finalize(state, tokenizer_mode)
used_namespaces.add(a)
used_namespaces.add(b) if error_check.ShouldCheck(Rule.UNUSED_PRIVATE_MEMBERS):
# Report an error for any declared private member that was never used.
extra_requires = set() unused_private_members = (self._declared_private_members -
for i in requires: self._used_private_members)
baseNamespace = i.split('.')[0]
if (i not in used_namespaces and for variable in unused_private_members:
baseNamespace in FLAGS.closurized_namespaces and token = self._declared_private_member_tokens[variable]
i not in FLAGS.ignored_extra_namespaces): self._HandleError(errors.UNUSED_PRIVATE_MEMBER,
extra_requires.add(i) 'Unused private member: %s.' % token.string,
token)
if extra_requires:
# Clear state to prepare for the next file.
self._declared_private_member_tokens = {}
self._declared_private_members = Set()
self._used_private_members = Set()
namespaces_info = self._namespaces_info
if namespaces_info is not None:
# If there are no provide or require statements, missing provides and
# requires should be reported on line 1.
if (not namespaces_info.GetProvidedNamespaces() and
not namespaces_info.GetRequiredNamespaces()):
missing_provides = namespaces_info.GetMissingProvides()
if missing_provides:
self._ReportMissingProvides(
missing_provides, state.GetFirstToken(), None)
missing_requires = namespaces_info.GetMissingRequires()
if missing_requires:
self._ReportMissingRequires(
missing_requires, state.GetFirstToken(), None)
self._CheckSortedRequiresProvides(state.GetFirstToken())
def _CheckSortedRequiresProvides(self, token):
"""Checks that all goog.require and goog.provide statements are sorted.
Note that this method needs to be run after missing statements are added to
preserve alphabetical order.
Args:
token: The first token in the token stream.
"""
sorter = requireprovidesorter.RequireProvideSorter()
provides_result = sorter.CheckProvides(token)
if provides_result:
self._HandleError( self._HandleError(
errors.EXTRA_GOOG_REQUIRE, errors.GOOG_PROVIDES_NOT_ALPHABETIZED,
'The following goog.require statements appear unnecessary:\n' + 'goog.provide classes must be alphabetized. The correct code is:\n' +
'\n'.join(map(lambda x: 'goog.require(\'%s\');' % x, '\n'.join(
sorted(extra_requires))), map(lambda x: 'goog.provide(\'%s\');' % x, provides_result[1])),
state.GetFirstToken(), position=Position.AtBeginning(), provides_result[0],
fix_data=extra_requires) position=Position.AtBeginning(),
fix_data=provides_result[0])
requires_result = sorter.CheckRequires(token)
if requires_result:
self._HandleError(
errors.GOOG_REQUIRES_NOT_ALPHABETIZED,
'goog.require classes must be alphabetized. The correct code is:\n' +
'\n'.join(
map(lambda x: 'goog.require(\'%s\');' % x, requires_result[1])),
requires_result[0],
position=Position.AtBeginning(),
fix_data=requires_result[0])
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit."""
return [
re.compile('goog\.require\(.+\);?\s*$'),
re.compile('goog\.provide\(.+\);?\s*$')
]

128
tools/closure_linter/closure_linter/javascriptstatetracker.py

@ -50,7 +50,7 @@ class JsDocFlag(statetracker.DocFlag):
# TODO(robbyw): determine which of these, if any, should be illegal. # TODO(robbyw): determine which of these, if any, should be illegal.
EXTENDED_DOC = frozenset([ EXTENDED_DOC = frozenset([
'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link', 'class', 'code', 'desc', 'final', 'hidden', 'inheritDoc', 'link',
'protected', 'notypecheck', 'throws']) 'meaning', 'protected', 'notypecheck', 'throws'])
LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC LEGAL_DOC = EXTENDED_DOC | statetracker.DocFlag.LEGAL_DOC
@ -70,24 +70,9 @@ class JavaScriptStateTracker(statetracker.StateTracker):
functionality needed for JavaScript. functionality needed for JavaScript.
""" """
def __init__(self, closurized_namespaces=''): def __init__(self):
"""Initializes a JavaScript token stream state tracker. """Initializes a JavaScript token stream state tracker."""
Args:
closurized_namespaces: An optional list of namespace prefixes used for
testing of goog.provide/require.
"""
statetracker.StateTracker.__init__(self, JsDocFlag) statetracker.StateTracker.__init__(self, JsDocFlag)
self.__closurized_namespaces = closurized_namespaces
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
super(JavaScriptStateTracker, self).Reset()
self.__goog_require_tokens = []
self.__goog_provide_tokens = []
self.__provided_namespaces = set()
self.__used_namespaces = []
def InTopLevel(self): def InTopLevel(self):
"""Compute whether we are at the top level in the class. """Compute whether we are at the top level in the class.
@ -102,22 +87,6 @@ class JavaScriptStateTracker(statetracker.StateTracker):
""" """
return not self.InParentheses() return not self.InParentheses()
def GetGoogRequireTokens(self):
"""Returns list of require tokens."""
return self.__goog_require_tokens
def GetGoogProvideTokens(self):
"""Returns list of provide tokens."""
return self.__goog_provide_tokens
def GetProvidedNamespaces(self):
"""Returns list of provided namespaces."""
return self.__provided_namespaces
def GetUsedNamespaces(self):
"""Returns list of used namespaces, is a list of sequences."""
return self.__used_namespaces
def GetBlockType(self, token): def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token. """Determine the block type given a START_BLOCK token.
@ -145,94 +114,3 @@ class JavaScriptStateTracker(statetracker.StateTracker):
""" """
super(JavaScriptStateTracker, self).HandleToken(token, super(JavaScriptStateTracker, self).HandleToken(token,
last_non_space_token) last_non_space_token)
if token.IsType(Type.IDENTIFIER):
if token.string == 'goog.require':
class_token = tokenutil.Search(token, Type.STRING_TEXT)
self.__goog_require_tokens.append(class_token)
elif token.string == 'goog.provide':
class_token = tokenutil.Search(token, Type.STRING_TEXT)
self.__goog_provide_tokens.append(class_token)
elif self.__closurized_namespaces:
self.__AddUsedNamespace(token.string)
if token.IsType(Type.SIMPLE_LVALUE) and not self.InFunction():
identifier = token.values['identifier']
if self.__closurized_namespaces:
namespace = self.GetClosurizedNamespace(identifier)
if namespace and identifier == namespace:
self.__provided_namespaces.add(namespace)
if (self.__closurized_namespaces and
token.IsType(Type.DOC_FLAG) and
token.attached_object.flag_type == 'implements'):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, Type.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, Type.COMMENT)
self.__AddUsedNamespace(interface.string)
def __AddUsedNamespace(self, identifier):
"""Adds the namespace of an identifier to the list of used namespaces.
Args:
identifier: An identifier which has been used.
"""
namespace = self.GetClosurizedNamespace(identifier)
if namespace:
# We add token.string as a 'namespace' as it is something that could
# potentially be provided to satisfy this dependency.
self.__used_namespaces.append([namespace, identifier])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
parts = identifier.split('.')
for part in parts:
if part.endswith('_'):
# Ignore private variables / inner classes.
return None
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
for namespace in self.__closurized_namespaces:
if identifier.startswith(namespace + '.'):
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
if last_part in ('apply', 'inherits', 'call'):
# Calling one of Function's methods usually indicates use of a
# superclass.
parts.pop()
last_part = parts[-1]
for i in xrange(1, len(parts)):
part = parts[i]
if part.isupper():
# If an identifier is of the form foo.bar.BAZ.x or foo.bar.BAZ,
# the namespace is foo.bar.
return '.'.join(parts[:i])
if part == 'prototype':
# If an identifier is of the form foo.bar.prototype.x, the
# namespace is foo.bar.
return '.'.join(parts[:i])
if last_part.isupper() or not last_part[0].isupper():
# Strip off the last part of an enum or constant reference.
parts.pop()
return '.'.join(parts)
return None

53
tools/closure_linter/closure_linter/javascriptstatetracker_test.py

@ -1,53 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for JavaScriptStateTracker."""
import unittest as googletest
from closure_linter import javascriptstatetracker
class JavaScriptStateTrackerTest(googletest.TestCase):
__test_cases = {
'package.CONSTANT' : 'package',
'package.methodName' : 'package',
'package.subpackage.methodName' : 'package.subpackage',
'package.ClassName.something' : 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum',
'package.ClassName.CONSTANT' : 'package.ClassName',
'package.ClassName.inherits' : 'package.ClassName',
'package.ClassName.apply' : 'package.ClassName',
'package.ClassName.methodName.apply' : 'package.ClassName',
'package.ClassName.methodName.call' : 'package.ClassName',
'package.ClassName.prototype.methodName' : 'package.ClassName',
'package.ClassName.privateMethod_' : None,
'package.ClassName.prototype.methodName.apply' : 'package.ClassName'
}
def testGetClosurizedNamespace(self):
stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package'])
for identifier, expected_namespace in self.__test_cases.items():
actual_namespace = stateTracker.GetClosurizedNamespace(identifier)
self.assertEqual(expected_namespace, actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
if __name__ == '__main__':
googletest.main()

28
tools/closure_linter/closure_linter/javascripttokenizer.py

@ -51,7 +51,7 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
""" """
# Useful patterns for JavaScript parsing. # Useful patterns for JavaScript parsing.
IDENTIFIER_CHAR = r'A-Za-z0-9_$.'; IDENTIFIER_CHAR = r'A-Za-z0-9_$.'
# Number patterns based on: # Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html # http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
@ -201,7 +201,9 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG), Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG, Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE), JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
Matcher(DOC_FLAG, Type.DOC_FLAG),
# Encountering a doc flag should leave lex spaces mode.
Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types. # Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE), Matcher(START_BLOCK, Type.DOC_START_BRACE),
@ -218,7 +220,8 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
JavaScriptModes.TEXT_MODE: [ JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all # Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/', # of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and so on) # /regex with 'string'/, /* comment with /regex/ and string */ (and so
# on)
Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT, Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE), JavaScriptModes.DOC_COMMENT_MODE),
Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT, Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
@ -233,7 +236,8 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE), JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(REGEX, Type.REGEX), Matcher(REGEX, Type.REGEX),
# Next we check for start blocks appearing outside any of the items above. # Next we check for start blocks appearing outside any of the items
# above.
Matcher(START_BLOCK, Type.START_BLOCK), Matcher(START_BLOCK, Type.START_BLOCK),
Matcher(END_BLOCK, Type.END_BLOCK), Matcher(END_BLOCK, Type.END_BLOCK),
@ -257,31 +261,28 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE), Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(OPERATOR, Type.OPERATOR), Matcher(OPERATOR, Type.OPERATOR),
# Find key words and whitespace # Find key words and whitespace.
Matcher(KEYWORD, Type.KEYWORD), Matcher(KEYWORD, Type.KEYWORD),
Matcher(WHITESPACE, Type.WHITESPACE), Matcher(WHITESPACE, Type.WHITESPACE),
# Find identifiers # Find identifiers.
Matcher(IDENTIFIER, Type.IDENTIFIER), Matcher(IDENTIFIER, Type.IDENTIFIER),
# Finally, we convert semicolons to tokens. # Finally, we convert semicolons to tokens.
Matcher(SEMICOLON, Type.SEMICOLON)], Matcher(SEMICOLON, Type.SEMICOLON)],
# Matchers for single quote strings. # Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [ JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END, Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)], JavaScriptModes.TEXT_MODE)],
# Matchers for double quote strings. # Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [ JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT), Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END, Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)], JavaScriptModes.TEXT_MODE)],
# Matchers for block comments. # Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [ JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment. # First we check for exiting a block comment.
@ -291,7 +292,6 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# Match non-comment-ending text.. # Match non-comment-ending text..
Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)], Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for doc comments. # Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [ JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
Matcher(DOC_COMMENT_TEXT, Type.COMMENT)], Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
@ -305,17 +305,16 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
# We greedy match until the end of the line in line comment mode. # We greedy match until the end of the line in line comment mode.
Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)], Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Matchers for code after the function keyword. # Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [ JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter mode, # Must match open paren before anything else and move into parameter
# otherwise everything inside the parameter list is parsed incorrectly. # mode, otherwise everything inside the parameter list is parsed
# incorrectly.
Matcher(OPENING_PAREN, Type.START_PARAMETERS, Matcher(OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE), JavaScriptModes.PARAMETER_MODE),
Matcher(WHITESPACE, Type.WHITESPACE), Matcher(WHITESPACE, Type.WHITESPACE),
Matcher(IDENTIFIER, Type.FUNCTION_NAME)], Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters # Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [ JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated specially. # When in function parameter mode, a closing paren is treated specially.
@ -324,7 +323,6 @@ class JavaScriptTokenizer(tokenizer.Tokenizer):
JavaScriptModes.TEXT_MODE), JavaScriptModes.TEXT_MODE),
Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]} Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
# When text is not matched, it is given this default type based on mode. # When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL. # If unspecified in this map, the default default is Type.NORMAL.
JAVASCRIPT_DEFAULT_TYPES = { JAVASCRIPT_DEFAULT_TYPES = {

74
tools/closure_linter/closure_linter/not_strict_test.py

@ -0,0 +1,74 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import errors
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
checker.GJsLintRunner(),
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')

272
tools/closure_linter/closure_linter/requireprovidesorter.py

@ -0,0 +1,272 @@
#!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
A tuple containing the first provide token in the token stream and a list
of provided objects sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return [provide_tokens[0], sorted_provide_strings]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
A tuple containing the first require token in the token stream and a list
of required dependencies sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return (require_tokens[0], sorted_require_strings)
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in ['goog.require', 'goog.provide']:
# The goog.provide and goog.require identifiers are at the top of the
# file. So if any other identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
name = tokenutil.Search(token, Type.STRING_TEXT).string
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.Search(token, Type.STRING_TEXT).string
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list

74
tools/closure_linter/closure_linter/requireprovidesorter_test.py

@ -0,0 +1,74 @@
#!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RequireProvideSorter."""
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class RequireProvideSorterTest(googletest.TestCase):
"""Tests for RequireProvideSorter."""
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
_metadata_pass = ecmametadatapass.EcmaMetaDataPass()
def testFixRequires_removeBlankLines(self):
"""Tests that blank lines are omitted in sorted goog.require statements."""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
token = self._tokenizer.TokenizeFile(input_lines)
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def _GetLines(self, token):
"""Returns an array of lines based on the specified token stream."""
lines = []
line = ''
while token:
line += token.string
if token.IsLastInLine():
lines.append(line)
line = ''
token = token.next
return lines
if __name__ == '__main__':
googletest.main()

56
tools/closure_linter/closure_linter/statetracker.py

@ -65,6 +65,7 @@ class DocFlag(object):
'implements', 'implements',
'implicitCast', 'implicitCast',
'interface', 'interface',
'lends',
'license', 'license',
'noalias', 'noalias',
'nocompile', 'nocompile',
@ -89,18 +90,38 @@ class DocFlag(object):
# Includes all Closure Compiler @suppress types. # Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter. # Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([ SUPPRESS_TYPES = frozenset([
'accessControls', 'accessControls',
'ambiguousFunctionDecl',
'checkRegExp', 'checkRegExp',
'checkTypes', 'checkTypes',
'checkVars', 'checkVars',
'const',
'constantProperty',
'deprecated', 'deprecated',
'duplicate', 'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags', 'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts', 'invalidCasts',
'missingProperties', 'missingProperties',
'missingProvide',
'missingRequire',
'nonStandardJsDocs', 'nonStandardJsDocs',
'strictModuleDepCheck', 'strictModuleDepCheck',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars', 'undefinedVars',
'underscore', 'underscore',
'unknownDefines', 'unknownDefines',
@ -249,7 +270,15 @@ class DocComment(object):
[Type.DOC_FLAG]) [Type.DOC_FLAG])
if brace: if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace) end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.suppressions[contents] = token for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
for flag_type in self.__flags.keys():
if flag_type != 'suppress':
return False
return True
def AddFlag(self, flag): def AddFlag(self, flag):
"""Add a new document flag. """Add a new document flag.
@ -265,10 +294,7 @@ class DocComment(object):
Returns: Returns:
True if documentation may be pulled off the superclass. True if documentation may be pulled off the superclass.
""" """
return (self.HasFlag('inheritDoc') or return self.HasFlag('inheritDoc') or self.HasFlag('override')
(self.HasFlag('override') and
not self.HasFlag('return') and
not self.HasFlag('param')))
def HasFlag(self, flag_type): def HasFlag(self, flag_type):
"""Test if the given flag has been set. """Test if the given flag has been set.
@ -455,7 +481,8 @@ def _GetEndTokenAndContents(start_token):
last_line = iterator.line_number last_line = iterator.line_number
last_token = None last_token = None
contents = '' contents = ''
while not iterator.type in Type.FLAG_ENDING_TYPES: doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)): DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit # If we have a blank comment line, consider that an implicit
@ -470,6 +497,17 @@ def _GetEndTokenAndContents(start_token):
# only a doc comment prefix or whitespace. # only a doc comment prefix or whitespace.
break break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES: if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string contents += iterator.string
last_token = iterator last_token = iterator
@ -509,6 +547,7 @@ class Function(object):
self.is_constructor = doc and doc.HasFlag('constructor') self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface') self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False self.has_return = False
self.has_throw = False
self.has_this = False self.has_this = False
self.name = name self.name = name
self.doc = doc self.doc = doc
@ -893,6 +932,11 @@ class StateTracker(object):
if function: if function:
function.has_return = True function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.SIMPLE_LVALUE: elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier'] identifier = token.values['identifier']
jsdoc = self.GetDocComment() jsdoc = self.GetDocComment()

135
tools/closure_linter/closure_linter/tokenutil.py

@ -19,15 +19,16 @@
__author__ = ('robbyw@google.com (Robert Walker)', __author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)') 'ajp@google.com (Andy Perelson)')
from closure_linter.common import tokens
from closure_linter import javascripttokens
import copy import copy
from closure_linter import javascripttokens
from closure_linter.common import tokens
# Shorthand # Shorthand
JavaScriptToken = javascripttokens.JavaScriptToken JavaScriptToken = javascripttokens.JavaScriptToken
Type = tokens.TokenType Type = tokens.TokenType
def GetFirstTokenInSameLine(token): def GetFirstTokenInSameLine(token):
"""Returns the first token in the same line as token. """Returns the first token in the same line as token.
@ -42,6 +43,58 @@ def GetFirstTokenInSameLine(token):
return token return token
def GetFirstTokenInPreviousLine(token):
"""Returns the first token in the previous line as token.
Args:
token: Any token in the line.
Returns:
The first token in the previous line as token, or None if token is on the
first line.
"""
first_in_line = GetFirstTokenInSameLine(token)
if first_in_line.previous:
return GetFirstTokenInSameLine(first_in_line.previous)
return None
def GetLastTokenInSameLine(token):
"""Returns the last token in the same line as token.
Args:
token: Any token in the line.
Returns:
The last token in the same line as token.
"""
while not token.IsLastInLine():
token = token.next
return token
def GetAllTokensInSameLine(token):
"""Returns all tokens in the same line as the given token.
Args:
token: Any token in the line.
Returns:
All tokens on the same line as the given token.
"""
first_token = GetFirstTokenInSameLine(token)
last_token = GetLastTokenInSameLine(token)
tokens_in_line = []
while first_token != last_token:
tokens_in_line.append(first_token)
first_token = first_token.next
tokens_in_line.append(last_token)
return tokens_in_line
def CustomSearch(start_token, func, end_func=None, distance=None, def CustomSearch(start_token, func, end_func=None, distance=None,
reverse=False): reverse=False):
"""Returns the first token where func is True within distance of this token. """Returns the first token where func is True within distance of this token.
@ -77,14 +130,14 @@ def CustomSearch(start_token, func, end_func=None, distance=None,
else: else:
while token and (distance is None or distance > 0): while token and (distance is None or distance > 0):
next = token.next next_token = token.next
if next: if next_token:
if func(next): if func(next_token):
return next return next_token
if end_func and end_func(next): if end_func and end_func(next_token):
return None return None
token = next token = next_token
if distance is not None: if distance is not None:
distance -= 1 distance -= 1
@ -123,7 +176,6 @@ def SearchExcept(start_token, token_types, distance=None, reverse=False):
reverse: When true, search the tokens before this one instead of the tokens reverse: When true, search the tokens before this one instead of the tokens
after it after it
Returns: Returns:
The first token of any type in token_types within distance of this token, or The first token of any type in token_types within distance of this token, or
None if no such token is found. None if no such token is found.
@ -173,19 +225,21 @@ def DeleteToken(token):
following_token.metadata.last_code = token.metadata.last_code following_token.metadata.last_code = token.metadata.last_code
following_token = following_token.next following_token = following_token.next
def DeleteTokens(token, tokenCount):
def DeleteTokens(token, token_count):
"""Deletes the given number of tokens starting with the given token. """Deletes the given number of tokens starting with the given token.
Args: Args:
token: The token to start deleting at. token: The token to start deleting at.
tokenCount: The total number of tokens to delete. token_count: The total number of tokens to delete.
""" """
for i in xrange(1, tokenCount): for i in xrange(1, token_count):
DeleteToken(token.next) DeleteToken(token.next)
DeleteToken(token) DeleteToken(token)
def InsertTokenAfter(new_token, token): def InsertTokenAfter(new_token, token):
"""Insert new_token after token """Insert new_token after token.
Args: Args:
new_token: A token to be added to the stream new_token: A token to be added to the stream
@ -221,6 +275,21 @@ def InsertTokenAfter(new_token, token):
iterator = iterator.next iterator = iterator.next
def InsertTokensAfter(new_tokens, token):
"""Insert multiple tokens after token.
Args:
new_tokens: An array of tokens to be added to the stream
token: A token already in the stream
"""
# TODO(user): It would be nicer to have InsertTokenAfter defer to here
# instead of vice-versa.
current_token = token
for new_token in new_tokens:
InsertTokenAfter(new_token, current_token)
current_token = new_token
def InsertSpaceTokenAfter(token): def InsertSpaceTokenAfter(token):
"""Inserts a space token after the given token. """Inserts a space token after the given token.
@ -228,28 +297,44 @@ def InsertSpaceTokenAfter(token):
token: The token to insert a space token after token: The token to insert a space token after
Returns: Returns:
A single space token""" A single space token
"""
space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line, space_token = JavaScriptToken(' ', Type.WHITESPACE, token.line,
token.line_number) token.line_number)
InsertTokenAfter(space_token, token) InsertTokenAfter(space_token, token)
def InsertLineAfter(token): def InsertBlankLineAfter(token):
"""Inserts a blank line after the given token. """Inserts a blank line after the given token.
Args: Args:
token: The token to insert a blank line after token: The token to insert a blank line after
Returns: Returns:
A single space token""" A single space token
"""
blank_token = JavaScriptToken('', Type.BLANK_LINE, '', blank_token = JavaScriptToken('', Type.BLANK_LINE, '',
token.line_number + 1) token.line_number + 1)
InsertTokenAfter(blank_token, token) InsertLineAfter(token, [blank_token])
# Update all subsequent ine numbers.
blank_token = blank_token.next
while blank_token: def InsertLineAfter(token, new_tokens):
blank_token.line_number += 1 """Inserts a new line consisting of new_tokens after the given token.
blank_token = blank_token.next
Args:
token: The token to insert after.
new_tokens: The tokens that will make up the new line.
"""
insert_location = token
for new_token in new_tokens:
InsertTokenAfter(new_token, insert_location)
insert_location = new_token
# Update all subsequent line numbers.
next_token = new_tokens[-1].next
while next_token:
next_token.line_number += 1
next_token = next_token.next
def SplitToken(token, position): def SplitToken(token, position):
@ -275,6 +360,10 @@ def SplitToken(token, position):
def Compare(token1, token2): def Compare(token1, token2):
"""Compares two tokens and determines their relative order. """Compares two tokens and determines their relative order.
Args:
token1: The first token to compare.
token2: The second token to compare.
Returns: Returns:
A negative integer, zero, or a positive integer as the first token is A negative integer, zero, or a positive integer as the first token is
before, equal, or after the second in the token stream. before, equal, or after the second in the token stream.

2489
tools/closure_linter/gflags.py

File diff suppressed because it is too large

5
tools/closure_linter/setup.cfg

@ -1,5 +0,0 @@
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

2
tools/closure_linter/setup.py

@ -20,7 +20,7 @@ except ImportError:
from distutils.core import setup from distutils.core import setup
setup(name='closure_linter', setup(name='closure_linter',
version='2.2.6', version='2.3.5',
description='Closure Linter', description='Closure Linter',
license='Apache', license='Apache',
author='The Closure Linter Authors', author='The Closure Linter Authors',

Loading…
Cancel
Save