mirror of https://github.com/lukechilds/node.git
Shigeki Ohtsu
13 years ago
committed by
Ben Noordhuis
41 changed files with 3196 additions and 3434 deletions
@ -1,10 +0,0 @@ |
|||
Metadata-Version: 1.0 |
|||
Name: closure-linter |
|||
Version: 2.2.6 |
|||
Summary: Closure Linter |
|||
Home-page: http://code.google.com/p/closure-linter |
|||
Author: The Closure Linter Authors |
|||
Author-email: opensource@google.com |
|||
License: Apache |
|||
Description: UNKNOWN |
|||
Platform: UNKNOWN |
@ -1,41 +0,0 @@ |
|||
README |
|||
setup.py |
|||
closure_linter/__init__.py |
|||
closure_linter/checker.py |
|||
closure_linter/checkerbase.py |
|||
closure_linter/ecmalintrules.py |
|||
closure_linter/ecmametadatapass.py |
|||
closure_linter/error_fixer.py |
|||
closure_linter/errorrules.py |
|||
closure_linter/errors.py |
|||
closure_linter/fixjsstyle.py |
|||
closure_linter/fixjsstyle_test.py |
|||
closure_linter/full_test.py |
|||
closure_linter/gjslint.py |
|||
closure_linter/indentation.py |
|||
closure_linter/javascriptlintrules.py |
|||
closure_linter/javascriptstatetracker.py |
|||
closure_linter/javascriptstatetracker_test.py |
|||
closure_linter/javascripttokenizer.py |
|||
closure_linter/javascripttokens.py |
|||
closure_linter/statetracker.py |
|||
closure_linter/tokenutil.py |
|||
closure_linter.egg-info/PKG-INFO |
|||
closure_linter.egg-info/SOURCES.txt |
|||
closure_linter.egg-info/dependency_links.txt |
|||
closure_linter.egg-info/entry_points.txt |
|||
closure_linter.egg-info/requires.txt |
|||
closure_linter.egg-info/top_level.txt |
|||
closure_linter/common/__init__.py |
|||
closure_linter/common/error.py |
|||
closure_linter/common/erroraccumulator.py |
|||
closure_linter/common/errorhandler.py |
|||
closure_linter/common/errorprinter.py |
|||
closure_linter/common/filetestcase.py |
|||
closure_linter/common/htmlutil.py |
|||
closure_linter/common/lintrunner.py |
|||
closure_linter/common/matcher.py |
|||
closure_linter/common/position.py |
|||
closure_linter/common/simplefileflags.py |
|||
closure_linter/common/tokenizer.py |
|||
closure_linter/common/tokens.py |
@ -1 +0,0 @@ |
|||
|
@ -1,4 +0,0 @@ |
|||
[console_scripts] |
|||
fixjsstyle = closure_linter.fixjsstyle:main |
|||
gjslint = closure_linter.gjslint:main |
|||
|
@ -1 +0,0 @@ |
|||
python-gflags |
@ -1 +0,0 @@ |
|||
closure_linter |
@ -1 +1,16 @@ |
|||
#!/usr/bin/env python |
|||
# Copyright 2008 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Package indicator for gjslint.""" |
|||
|
@ -0,0 +1,500 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2008 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Logic for computing dependency information for closurized JavaScript files. |
|||
|
|||
Closurized JavaScript files express dependencies using goog.require and |
|||
goog.provide statements. In order for the linter to detect when a statement is |
|||
missing or unnecessary, all identifiers in the JavaScript file must first be |
|||
processed to determine if they constitute the creation or usage of a dependency. |
|||
""" |
|||
|
|||
|
|||
|
|||
from closure_linter import javascripttokens |
|||
from closure_linter import tokenutil |
|||
|
|||
# pylint: disable-msg=C6409 |
|||
TokenType = javascripttokens.JavaScriptTokenType |
|||
|
|||
DEFAULT_EXTRA_NAMESPACES = [ |
|||
'goog.testing.asserts', |
|||
'goog.testing.jsunit', |
|||
] |
|||
|
|||
class ClosurizedNamespacesInfo(object): |
|||
"""Dependency information for closurized JavaScript files. |
|||
|
|||
Processes token streams for dependency creation or usage and provides logic |
|||
for determining if a given require or provide statement is unnecessary or if |
|||
there are missing require or provide statements. |
|||
""" |
|||
|
|||
def __init__(self, closurized_namespaces, ignored_extra_namespaces): |
|||
"""Initializes an instance the ClosurizedNamespacesInfo class. |
|||
|
|||
Args: |
|||
closurized_namespaces: A list of namespace prefixes that should be |
|||
processed for dependency information. Non-matching namespaces are |
|||
ignored. |
|||
ignored_extra_namespaces: A list of namespaces that should not be reported |
|||
as extra regardless of whether they are actually used. |
|||
""" |
|||
self._closurized_namespaces = closurized_namespaces |
|||
self._ignored_extra_namespaces = (ignored_extra_namespaces + |
|||
DEFAULT_EXTRA_NAMESPACES) |
|||
self.Reset() |
|||
|
|||
def Reset(self): |
|||
"""Resets the internal state to prepare for processing a new file.""" |
|||
|
|||
# A list of goog.provide tokens in the order they appeared in the file. |
|||
self._provide_tokens = [] |
|||
|
|||
# A list of goog.require tokens in the order they appeared in the file. |
|||
self._require_tokens = [] |
|||
|
|||
# Namespaces that are already goog.provided. |
|||
self._provided_namespaces = [] |
|||
|
|||
# Namespaces that are already goog.required. |
|||
self._required_namespaces = [] |
|||
|
|||
# Note that created_namespaces and used_namespaces contain both namespaces |
|||
# and identifiers because there are many existing cases where a method or |
|||
# constant is provided directly instead of its namespace. Ideally, these |
|||
# two lists would only have to contain namespaces. |
|||
|
|||
# A list of tuples where the first element is the namespace of an identifier |
|||
# created in the file and the second is the identifier itself. |
|||
self._created_namespaces = [] |
|||
|
|||
# A list of tuples where the first element is the namespace of an identifier |
|||
# used in the file and the second is the identifier itself. |
|||
self._used_namespaces = [] |
|||
|
|||
# A list of seemingly-unnecessary namespaces that are goog.required() and |
|||
# annotated with @suppress {extraRequire}. |
|||
self._suppressed_requires = [] |
|||
|
|||
# A list of goog.provide tokens which are duplicates. |
|||
self._duplicate_provide_tokens = [] |
|||
|
|||
# A list of goog.require tokens which are duplicates. |
|||
self._duplicate_require_tokens = [] |
|||
|
|||
# Whether this file is in a goog.scope. Someday, we may add support |
|||
# for checking scopified namespaces, but for now let's just fail |
|||
# in a more reasonable way. |
|||
self._scopified_file = False |
|||
|
|||
# TODO(user): Handle the case where there are 2 different requires |
|||
# that can satisfy the same dependency, but only one is necessary. |
|||
|
|||
def GetProvidedNamespaces(self): |
|||
"""Returns the namespaces which are already provided by this file. |
|||
|
|||
Returns: |
|||
A list of strings where each string is a 'namespace' corresponding to an |
|||
existing goog.provide statement in the file being checked. |
|||
""" |
|||
return list(self._provided_namespaces) |
|||
|
|||
def GetRequiredNamespaces(self): |
|||
"""Returns the namespaces which are already required by this file. |
|||
|
|||
Returns: |
|||
A list of strings where each string is a 'namespace' corresponding to an |
|||
existing goog.require statement in the file being checked. |
|||
""" |
|||
return list(self._required_namespaces) |
|||
|
|||
def IsExtraProvide(self, token): |
|||
"""Returns whether the given goog.provide token is unnecessary. |
|||
|
|||
Args: |
|||
token: A goog.provide token. |
|||
|
|||
Returns: |
|||
True if the given token corresponds to an unnecessary goog.provide |
|||
statement, otherwise False. |
|||
""" |
|||
if self._scopified_file: |
|||
return False |
|||
|
|||
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string |
|||
|
|||
base_namespace = namespace.split('.', 1)[0] |
|||
if base_namespace not in self._closurized_namespaces: |
|||
return False |
|||
|
|||
if token in self._duplicate_provide_tokens: |
|||
return True |
|||
|
|||
# TODO(user): There's probably a faster way to compute this. |
|||
for created_namespace, created_identifier in self._created_namespaces: |
|||
if namespace == created_namespace or namespace == created_identifier: |
|||
return False |
|||
|
|||
return True |
|||
|
|||
def IsExtraRequire(self, token): |
|||
"""Returns whether the given goog.require token is unnecessary. |
|||
|
|||
Args: |
|||
token: A goog.require token. |
|||
|
|||
Returns: |
|||
True if the given token corresponds to an unnecessary goog.require |
|||
statement, otherwise False. |
|||
""" |
|||
if self._scopified_file: |
|||
return False |
|||
|
|||
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string |
|||
|
|||
base_namespace = namespace.split('.', 1)[0] |
|||
if base_namespace not in self._closurized_namespaces: |
|||
return False |
|||
|
|||
if namespace in self._ignored_extra_namespaces: |
|||
return False |
|||
|
|||
if token in self._duplicate_require_tokens: |
|||
return True |
|||
|
|||
if namespace in self._suppressed_requires: |
|||
return False |
|||
|
|||
# If the namespace contains a component that is initial caps, then that |
|||
# must be the last component of the namespace. |
|||
parts = namespace.split('.') |
|||
if len(parts) > 1 and parts[-2][0].isupper(): |
|||
return True |
|||
|
|||
# TODO(user): There's probably a faster way to compute this. |
|||
for used_namespace, used_identifier in self._used_namespaces: |
|||
if namespace == used_namespace or namespace == used_identifier: |
|||
return False |
|||
|
|||
return True |
|||
|
|||
def GetMissingProvides(self): |
|||
"""Returns the set of missing provided namespaces for the current file. |
|||
|
|||
Returns: |
|||
Returns a set of strings where each string is a namespace that should be |
|||
provided by this file, but is not. |
|||
""" |
|||
if self._scopified_file: |
|||
return set() |
|||
|
|||
missing_provides = set() |
|||
for namespace, identifier in self._created_namespaces: |
|||
if (not self._IsPrivateIdentifier(identifier) and |
|||
namespace not in self._provided_namespaces and |
|||
identifier not in self._provided_namespaces and |
|||
namespace not in self._required_namespaces): |
|||
missing_provides.add(namespace) |
|||
|
|||
return missing_provides |
|||
|
|||
def GetMissingRequires(self): |
|||
"""Returns the set of missing required namespaces for the current file. |
|||
|
|||
For each non-private identifier used in the file, find either a |
|||
goog.require, goog.provide or a created identifier that satisfies it. |
|||
goog.require statements can satisfy the identifier by requiring either the |
|||
namespace of the identifier or the identifier itself. goog.provide |
|||
statements can satisfy the identifier by providing the namespace of the |
|||
identifier. A created identifier can only satisfy the used identifier if |
|||
it matches it exactly (necessary since things can be defined on a |
|||
namespace in more than one file). Note that provided namespaces should be |
|||
a subset of created namespaces, but we check both because in some cases we |
|||
can't always detect the creation of the namespace. |
|||
|
|||
Returns: |
|||
Returns a set of strings where each string is a namespace that should be |
|||
required by this file, but is not. |
|||
""" |
|||
if self._scopified_file: |
|||
return set() |
|||
|
|||
external_dependencies = set(self._required_namespaces) |
|||
|
|||
# Assume goog namespace is always available. |
|||
external_dependencies.add('goog') |
|||
|
|||
created_identifiers = set() |
|||
for namespace, identifier in self._created_namespaces: |
|||
created_identifiers.add(identifier) |
|||
|
|||
missing_requires = set() |
|||
for namespace, identifier in self._used_namespaces: |
|||
if (not self._IsPrivateIdentifier(identifier) and |
|||
namespace not in external_dependencies and |
|||
namespace not in self._provided_namespaces and |
|||
identifier not in external_dependencies and |
|||
identifier not in created_identifiers): |
|||
missing_requires.add(namespace) |
|||
|
|||
return missing_requires |
|||
|
|||
def _IsPrivateIdentifier(self, identifier): |
|||
"""Returns whether the given identifer is private.""" |
|||
pieces = identifier.split('.') |
|||
for piece in pieces: |
|||
if piece.endswith('_'): |
|||
return True |
|||
return False |
|||
|
|||
def IsFirstProvide(self, token): |
|||
"""Returns whether token is the first provide token.""" |
|||
return self._provide_tokens and token == self._provide_tokens[0] |
|||
|
|||
def IsFirstRequire(self, token): |
|||
"""Returns whether token is the first require token.""" |
|||
return self._require_tokens and token == self._require_tokens[0] |
|||
|
|||
def IsLastProvide(self, token): |
|||
"""Returns whether token is the last provide token.""" |
|||
return self._provide_tokens and token == self._provide_tokens[-1] |
|||
|
|||
def IsLastRequire(self, token): |
|||
"""Returns whether token is the last require token.""" |
|||
return self._require_tokens and token == self._require_tokens[-1] |
|||
|
|||
def ProcessToken(self, token, state_tracker): |
|||
"""Processes the given token for dependency information. |
|||
|
|||
Args: |
|||
token: The token to process. |
|||
state_tracker: The JavaScript state tracker. |
|||
""" |
|||
|
|||
# Note that this method is in the critical path for the linter and has been |
|||
# optimized for performance in the following ways: |
|||
# - Tokens are checked by type first to minimize the number of function |
|||
# calls necessary to determine if action needs to be taken for the token. |
|||
# - The most common tokens types are checked for first. |
|||
# - The number of function calls has been minimized (thus the length of this |
|||
# function. |
|||
|
|||
if token.type == TokenType.IDENTIFIER: |
|||
# TODO(user): Consider saving the whole identifier in metadata. |
|||
whole_identifier_string = self._GetWholeIdentifierString(token) |
|||
if whole_identifier_string is None: |
|||
# We only want to process the identifier one time. If the whole string |
|||
# identifier is None, that means this token was part of a multi-token |
|||
# identifier, but it was not the first token of the identifier. |
|||
return |
|||
|
|||
# In the odd case that a goog.require is encountered inside a function, |
|||
# just ignore it (e.g. dynamic loading in test runners). |
|||
if token.string == 'goog.require' and not state_tracker.InFunction(): |
|||
self._require_tokens.append(token) |
|||
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string |
|||
if namespace in self._required_namespaces: |
|||
self._duplicate_require_tokens.append(token) |
|||
else: |
|||
self._required_namespaces.append(namespace) |
|||
|
|||
# If there is a suppression for the require, add a usage for it so it |
|||
# gets treated as a regular goog.require (i.e. still gets sorted). |
|||
jsdoc = state_tracker.GetDocComment() |
|||
if jsdoc and ('extraRequire' in jsdoc.suppressions): |
|||
self._suppressed_requires.append(namespace) |
|||
self._AddUsedNamespace(state_tracker, namespace) |
|||
|
|||
elif token.string == 'goog.provide': |
|||
self._provide_tokens.append(token) |
|||
namespace = tokenutil.Search(token, TokenType.STRING_TEXT).string |
|||
if namespace in self._provided_namespaces: |
|||
self._duplicate_provide_tokens.append(token) |
|||
else: |
|||
self._provided_namespaces.append(namespace) |
|||
|
|||
# If there is a suppression for the provide, add a creation for it so it |
|||
# gets treated as a regular goog.provide (i.e. still gets sorted). |
|||
jsdoc = state_tracker.GetDocComment() |
|||
if jsdoc and ('extraProvide' in jsdoc.suppressions): |
|||
self._AddCreatedNamespace(state_tracker, namespace) |
|||
|
|||
elif token.string == 'goog.scope': |
|||
self._scopified_file = True |
|||
|
|||
else: |
|||
jsdoc = state_tracker.GetDocComment() |
|||
if jsdoc and jsdoc.HasFlag('typedef'): |
|||
self._AddCreatedNamespace(state_tracker, whole_identifier_string, |
|||
self.GetClosurizedNamespace( |
|||
whole_identifier_string)) |
|||
else: |
|||
self._AddUsedNamespace(state_tracker, whole_identifier_string) |
|||
|
|||
elif token.type == TokenType.SIMPLE_LVALUE: |
|||
identifier = token.values['identifier'] |
|||
namespace = self.GetClosurizedNamespace(identifier) |
|||
if state_tracker.InFunction(): |
|||
self._AddUsedNamespace(state_tracker, identifier) |
|||
elif namespace and namespace != 'goog': |
|||
self._AddCreatedNamespace(state_tracker, identifier, namespace) |
|||
|
|||
elif token.type == TokenType.DOC_FLAG: |
|||
flag_type = token.attached_object.flag_type |
|||
is_interface = state_tracker.GetDocComment().HasFlag('interface') |
|||
if flag_type == 'implements' or (flag_type == 'extends' and is_interface): |
|||
# Interfaces should be goog.require'd. |
|||
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE) |
|||
interface = tokenutil.Search(doc_start, TokenType.COMMENT) |
|||
self._AddUsedNamespace(state_tracker, interface.string) |
|||
|
|||
|
|||
def _GetWholeIdentifierString(self, token): |
|||
"""Returns the whole identifier string for the given token. |
|||
|
|||
Checks the tokens after the current one to see if the token is one in a |
|||
sequence of tokens which are actually just one identifier (i.e. a line was |
|||
wrapped in the middle of an identifier). |
|||
|
|||
Args: |
|||
token: The token to check. |
|||
|
|||
Returns: |
|||
The whole identifier string or None if this token is not the first token |
|||
in a multi-token identifier. |
|||
""" |
|||
result = '' |
|||
|
|||
# Search backward to determine if this token is the first token of the |
|||
# identifier. If it is not the first token, return None to signal that this |
|||
# token should be ignored. |
|||
prev_token = token.previous |
|||
while prev_token: |
|||
if (prev_token.IsType(TokenType.IDENTIFIER) or |
|||
prev_token.IsType(TokenType.NORMAL) and prev_token.string == '.'): |
|||
return None |
|||
elif (not prev_token.IsType(TokenType.WHITESPACE) and |
|||
not prev_token.IsAnyType(TokenType.COMMENT_TYPES)): |
|||
break |
|||
prev_token = prev_token.previous |
|||
|
|||
# Search forward to find other parts of this identifier separated by white |
|||
# space. |
|||
next_token = token |
|||
while next_token: |
|||
if (next_token.IsType(TokenType.IDENTIFIER) or |
|||
next_token.IsType(TokenType.NORMAL) and next_token.string == '.'): |
|||
result += next_token.string |
|||
elif (not next_token.IsType(TokenType.WHITESPACE) and |
|||
not next_token.IsAnyType(TokenType.COMMENT_TYPES)): |
|||
break |
|||
next_token = next_token.next |
|||
|
|||
return result |
|||
|
|||
def _AddCreatedNamespace(self, state_tracker, identifier, namespace=None): |
|||
"""Adds the namespace of an identifier to the list of created namespaces. |
|||
|
|||
If the identifier is annotated with a 'missingProvide' suppression, it is |
|||
not added. |
|||
|
|||
Args: |
|||
state_tracker: The JavaScriptStateTracker instance. |
|||
identifier: The identifier to add. |
|||
namespace: The namespace of the identifier or None if the identifier is |
|||
also the namespace. |
|||
""" |
|||
if not namespace: |
|||
namespace = identifier |
|||
|
|||
jsdoc = state_tracker.GetDocComment() |
|||
if jsdoc and 'missingProvide' in jsdoc.suppressions: |
|||
return |
|||
|
|||
self._created_namespaces.append([namespace, identifier]) |
|||
|
|||
def _AddUsedNamespace(self, state_tracker, identifier): |
|||
"""Adds the namespace of an identifier to the list of used namespaces. |
|||
|
|||
If the identifier is annotated with a 'missingRequire' suppression, it is |
|||
not added. |
|||
|
|||
Args: |
|||
state_tracker: The JavaScriptStateTracker instance. |
|||
identifier: An identifier which has been used. |
|||
""" |
|||
jsdoc = state_tracker.GetDocComment() |
|||
if jsdoc and 'missingRequire' in jsdoc.suppressions: |
|||
return |
|||
|
|||
namespace = self.GetClosurizedNamespace(identifier) |
|||
if namespace: |
|||
self._used_namespaces.append([namespace, identifier]) |
|||
|
|||
def GetClosurizedNamespace(self, identifier): |
|||
"""Given an identifier, returns the namespace that identifier is from. |
|||
|
|||
Args: |
|||
identifier: The identifier to extract a namespace from. |
|||
|
|||
Returns: |
|||
The namespace the given identifier resides in, or None if one could not |
|||
be found. |
|||
""" |
|||
if identifier.startswith('goog.global'): |
|||
# Ignore goog.global, since it is, by definition, global. |
|||
return None |
|||
|
|||
parts = identifier.split('.') |
|||
for namespace in self._closurized_namespaces: |
|||
if not identifier.startswith(namespace + '.'): |
|||
continue |
|||
|
|||
last_part = parts[-1] |
|||
if not last_part: |
|||
# TODO(robbyw): Handle this: it's a multi-line identifier. |
|||
return None |
|||
|
|||
# The namespace for a class is the shortest prefix ending in a class |
|||
# name, which starts with a capital letter but is not a capitalized word. |
|||
# |
|||
# We ultimately do not want to allow requiring or providing of inner |
|||
# classes/enums. Instead, a file should provide only the top-level class |
|||
# and users should require only that. |
|||
namespace = [] |
|||
for part in parts: |
|||
if part == 'prototype' or part.isupper(): |
|||
return '.'.join(namespace) |
|||
namespace.append(part) |
|||
if part[0].isupper(): |
|||
return '.'.join(namespace) |
|||
|
|||
# At this point, we know there's no class or enum, so the namespace is |
|||
# just the identifier with the last part removed. With the exception of |
|||
# apply, inherits, and call, which should also be stripped. |
|||
if parts[-1] in ('apply', 'inherits', 'call'): |
|||
parts.pop() |
|||
parts.pop() |
|||
|
|||
# If the last part ends with an underscore, it is a private variable, |
|||
# method, or enum. The namespace is whatever is before it. |
|||
if parts and parts[-1].endswith('_'): |
|||
parts.pop() |
|||
|
|||
return '.'.join(parts) |
|||
|
|||
return None |
@ -0,0 +1,451 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2010 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Unit tests for ClosurizedNamespacesInfo.""" |
|||
|
|||
|
|||
|
|||
import unittest as googletest |
|||
from closure_linter import closurizednamespacesinfo |
|||
from closure_linter import javascriptstatetracker |
|||
from closure_linter import javascripttokenizer |
|||
from closure_linter import javascripttokens |
|||
from closure_linter import tokenutil |
|||
|
|||
# pylint: disable-msg=C6409 |
|||
TokenType = javascripttokens.JavaScriptTokenType |
|||
|
|||
|
|||
class ClosurizedNamespacesInfoTest(googletest.TestCase): |
|||
"""Tests for ClosurizedNamespacesInfo.""" |
|||
|
|||
_test_cases = { |
|||
'goog.global.anything': None, |
|||
'package.CONSTANT': 'package', |
|||
'package.methodName': 'package', |
|||
'package.subpackage.methodName': 'package.subpackage', |
|||
'package.subpackage.methodName.apply': 'package.subpackage', |
|||
'package.ClassName.something': 'package.ClassName', |
|||
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName', |
|||
'package.ClassName.CONSTANT': 'package.ClassName', |
|||
'package.namespace.CONSTANT.methodName': 'package.namespace', |
|||
'package.ClassName.inherits': 'package.ClassName', |
|||
'package.ClassName.apply': 'package.ClassName', |
|||
'package.ClassName.methodName.apply': 'package.ClassName', |
|||
'package.ClassName.methodName.call': 'package.ClassName', |
|||
'package.ClassName.prototype.methodName': 'package.ClassName', |
|||
'package.ClassName.privateMethod_': 'package.ClassName', |
|||
'package.className.privateProperty_': 'package.className', |
|||
'package.className.privateProperty_.methodName': 'package.className', |
|||
'package.ClassName.PrivateEnum_': 'package.ClassName', |
|||
'package.ClassName.prototype.methodName.apply': 'package.ClassName', |
|||
'package.ClassName.property.subProperty': 'package.ClassName', |
|||
'package.className.prototype.something.somethingElse': 'package.className' |
|||
} |
|||
|
|||
_tokenizer = javascripttokenizer.JavaScriptTokenizer() |
|||
|
|||
def testGetClosurizedNamespace(self): |
|||
"""Tests that the correct namespace is returned for various identifiers.""" |
|||
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( |
|||
closurized_namespaces=['package'], ignored_extra_namespaces=[]) |
|||
for identifier, expected_namespace in self._test_cases.items(): |
|||
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier) |
|||
self.assertEqual( |
|||
expected_namespace, |
|||
actual_namespace, |
|||
'expected namespace "' + str(expected_namespace) + |
|||
'" for identifier "' + str(identifier) + '" but was "' + |
|||
str(actual_namespace) + '"') |
|||
|
|||
def testIgnoredExtraNamespaces(self): |
|||
"""Tests that ignored_extra_namespaces are ignored.""" |
|||
token = self._GetRequireTokens('package.Something') |
|||
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( |
|||
closurized_namespaces=['package'], |
|||
ignored_extra_namespaces=['package.Something']) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Should be valid since it is in ignored namespaces.') |
|||
|
|||
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( |
|||
['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraRequire(token), |
|||
'Should be invalid since it is not in ignored namespaces.') |
|||
|
|||
def testIsExtraProvide_created(self): |
|||
"""Tests that provides for created namespaces are not extra.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo\');', |
|||
'package.Foo = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraProvide(token), |
|||
'Should not be extra since it is created.') |
|||
|
|||
def testIsExtraProvide_createdIdentifier(self): |
|||
"""Tests that provides for created identifiers are not extra.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo.methodName\');', |
|||
'package.Foo.methodName = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraProvide(token), |
|||
'Should not be extra since it is created.') |
|||
|
|||
def testIsExtraProvide_notCreated(self): |
|||
"""Tests that provides for non-created namespaces are extra.""" |
|||
input_lines = ['goog.provide(\'package.Foo\');'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraProvide(token), |
|||
'Should be extra since it is not created.') |
|||
|
|||
def testIsExtraProvide_duplicate(self): |
|||
"""Tests that providing a namespace twice makes the second one extra.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo\');', |
|||
'goog.provide(\'package.Foo\');', |
|||
'package.Foo = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
# Advance to the second goog.provide token. |
|||
token = tokenutil.Search(token.next, TokenType.IDENTIFIER) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraProvide(token), |
|||
'Should be extra since it is already provided.') |
|||
|
|||
def testIsExtraProvide_notClosurized(self): |
|||
"""Tests that provides of non-closurized namespaces are not extra.""" |
|||
input_lines = ['goog.provide(\'notclosurized.Foo\');'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraProvide(token), |
|||
'Should not be extra since it is not closurized.') |
|||
|
|||
def testIsExtraRequire_used(self): |
|||
"""Tests that requires for used namespaces are not extra.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo\');', |
|||
'var x = package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Should not be extra since it is used.') |
|||
|
|||
def testIsExtraRequire_usedIdentifier(self): |
|||
"""Tests that requires for used methods on classes are extra.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo.methodName\');', |
|||
'var x = package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraRequire(token), |
|||
'Should require the package, not the method specifically.') |
|||
|
|||
def testIsExtraRequire_notUsed(self): |
|||
"""Tests that requires for unused namespaces are extra.""" |
|||
input_lines = ['goog.require(\'package.Foo\');'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraRequire(token), |
|||
'Should be extra since it is not used.') |
|||
|
|||
def testIsExtraRequire_notClosurized(self): |
|||
"""Tests that requires of non-closurized namespaces are not extra.""" |
|||
input_lines = ['goog.require(\'notclosurized.Foo\');'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Should not be extra since it is not closurized.') |
|||
|
|||
def testIsExtraRequire_objectOnClass(self): |
|||
"""Tests that requiring an object on a class is extra.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo.Enum\');', |
|||
'var x = package.Foo.Enum.VALUE1;', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraRequire(token), |
|||
'The whole class, not the object, should be required.'); |
|||
|
|||
def testIsExtraRequire_constantOnClass(self): |
|||
"""Tests that requiring a constant on a class is extra.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo.CONSTANT\');', |
|||
'var x = package.Foo.CONSTANT', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsExtraRequire(token), |
|||
'The class, not the constant, should be required.'); |
|||
|
|||
def testIsExtraRequire_constantNotOnClass(self): |
|||
"""Tests that requiring a constant not on a class is OK.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.subpackage.CONSTANT\');', |
|||
'var x = package.subpackage.CONSTANT', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Constants can be required except on classes.'); |
|||
|
|||
def testIsExtraRequire_methodNotOnClass(self): |
|||
"""Tests that requiring a method not on a class is OK.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.subpackage.method\');', |
|||
'var x = package.subpackage.method()', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Methods can be required except on classes.'); |
|||
|
|||
def testIsExtraRequire_defaults(self): |
|||
"""Tests that there are no warnings about extra requires for test utils""" |
|||
input_lines = ['goog.require(\'goog.testing.jsunit\');'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], []) |
|||
|
|||
self.assertFalse(namespaces_info.IsExtraRequire(token), |
|||
'Should not be extra since it is for testing.') |
|||
|
|||
def testGetMissingProvides_provided(self): |
|||
"""Tests that provided functions don't cause a missing provide.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo\');', |
|||
'package.Foo = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingProvides_providedIdentifier(self): |
|||
"""Tests that provided identifiers don't cause a missing provide.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo.methodName\');', |
|||
'package.Foo.methodName = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingProvides_providedParentIdentifier(self): |
|||
"""Tests that provided identifiers on a class don't cause a missing provide |
|||
on objects attached to that class.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.foo.ClassName\');', |
|||
'package.foo.ClassName.methodName = function() {};', |
|||
'package.foo.ClassName.ObjectName = 1;', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingProvides_unprovided(self): |
|||
"""Tests that unprovided functions cause a missing provide.""" |
|||
input_lines = ['package.Foo = function() {};'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(1, len(namespaces_info.GetMissingProvides())) |
|||
self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides()) |
|||
|
|||
def testGetMissingProvides_privatefunction(self): |
|||
"""Tests that unprovided private functions don't cause a missing provide.""" |
|||
input_lines = ['package.Foo_ = function() {};'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingProvides_required(self): |
|||
"""Tests that required namespaces don't cause a missing provide.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo\');', |
|||
'package.Foo.methodName = function() {};' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingRequires_required(self): |
|||
"""Tests that required namespaces don't cause a missing require.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo\');', |
|||
'package.Foo();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingRequires_requiredIdentifier(self): |
|||
"""Tests that required namespaces satisfy identifiers on that namespace.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo\');', |
|||
'package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingProvides())) |
|||
|
|||
def testGetMissingRequires_requiredParentClass(self): |
|||
"""Tests that requiring a parent class of an object is sufficient to prevent |
|||
a missing require on that object.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo\');', |
|||
'package.Foo.methodName();', |
|||
'package.Foo.methodName(package.Foo.ObjectName);' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingRequires())) |
|||
|
|||
def testGetMissingRequires_unrequired(self): |
|||
"""Tests that unrequired namespaces cause a missing require.""" |
|||
input_lines = ['package.Foo();'] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(1, len(namespaces_info.GetMissingRequires())) |
|||
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires()) |
|||
|
|||
def testGetMissingRequires_provided(self): |
|||
"""Tests that provided namespaces satisfy identifiers on that namespace.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo\');', |
|||
'package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingRequires())) |
|||
|
|||
def testGetMissingRequires_created(self): |
|||
"""Tests that created namespaces do not satisfy usage of an identifier.""" |
|||
input_lines = [ |
|||
'package.Foo = function();', |
|||
'package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(1, len(namespaces_info.GetMissingRequires())) |
|||
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires()) |
|||
|
|||
def testGetMissingRequires_createdIdentifier(self): |
|||
"""Tests that created identifiers satisfy usage of the identifier.""" |
|||
input_lines = [ |
|||
'package.Foo.methodName = function();', |
|||
'package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(0, len(namespaces_info.GetMissingRequires())) |
|||
|
|||
def testGetMissingRequires_objectOnClass(self): |
|||
"""Tests that we should require a class, not the object on the class.""" |
|||
input_lines = [ |
|||
'goog.require(\'package.Foo.Enum\');', |
|||
'var x = package.Foo.Enum.VALUE1;', |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertEquals(1, len(namespaces_info.GetMissingRequires()), |
|||
'The whole class, not the object, should be required.'); |
|||
|
|||
def testIsFirstProvide(self): |
|||
"""Tests operation of the isFirstProvide method.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.Foo\');', |
|||
'package.Foo.methodName();' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], []) |
|||
|
|||
self.assertTrue(namespaces_info.IsFirstProvide(token)) |
|||
|
|||
def testGetWholeIdentifierString(self): |
|||
"""Tests that created identifiers satisfy usage of the identifier.""" |
|||
input_lines = [ |
|||
'package.Foo.', |
|||
' veryLong.', |
|||
' identifier;' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], []) |
|||
|
|||
self.assertEquals('package.Foo.veryLong.identifier', |
|||
namespaces_info._GetWholeIdentifierString(token)) |
|||
self.assertEquals(None, |
|||
namespaces_info._GetWholeIdentifierString(token.next)) |
|||
|
|||
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces, |
|||
ignored_extra_namespaces): |
|||
"""Returns a namespaces info initialized with the given token stream.""" |
|||
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo( |
|||
closurized_namespaces=closurized_namespaces, |
|||
ignored_extra_namespaces=ignored_extra_namespaces) |
|||
state_tracker = javascriptstatetracker.JavaScriptStateTracker() |
|||
|
|||
while token: |
|||
namespaces_info.ProcessToken(token, state_tracker) |
|||
token = token.next |
|||
|
|||
return namespaces_info |
|||
|
|||
def _GetProvideTokens(self, namespace): |
|||
"""Returns a list of tokens for a goog.require of the given namespace.""" |
|||
line_text = 'goog.require(\'' + namespace + '\');\n' |
|||
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text]) |
|||
|
|||
def _GetRequireTokens(self, namespace): |
|||
"""Returns a list of tokens for a goog.require of the given namespace.""" |
|||
line_text = 'goog.require(\'' + namespace + '\');\n' |
|||
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text]) |
|||
|
|||
if __name__ == '__main__': |
|||
googletest.main() |
@ -1 +1,16 @@ |
|||
#!/usr/bin/env python |
|||
# Copyright 2008 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Package indicator for gjslint.common.""" |
|||
|
@ -0,0 +1,52 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2012 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Utility functions to format errors.""" |
|||
|
|||
|
|||
__author__ = ('robbyw@google.com (Robert Walker)', |
|||
'ajp@google.com (Andy Perelson)', |
|||
'nnaze@google.com (Nathan Naze)') |
|||
|
|||
|
|||
def GetUnixErrorOutput(filename, error, new_error=False): |
|||
"""Get a output line for an error in UNIX format.""" |
|||
|
|||
line = '' |
|||
|
|||
if error.token: |
|||
line = '%d' % error.token.line_number |
|||
|
|||
error_code = '%04d' % error.code |
|||
if new_error: |
|||
error_code = 'New Error ' + error_code |
|||
return '%s:%s:(%s) %s' % (filename, line, error_code, error.message) |
|||
|
|||
|
|||
def GetErrorOutput(error, new_error=False): |
|||
"""Get a output line for an error in regular format.""" |
|||
|
|||
line = '' |
|||
if error.token: |
|||
line = 'Line %d, ' % error.token.line_number |
|||
|
|||
code = 'E:%04d' % error.code |
|||
|
|||
error_message = error.message |
|||
if new_error: |
|||
error_message = 'New Error ' + error_message |
|||
|
|||
return '%s%s: %s' % (line, code, error.message) |
@ -1,203 +0,0 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2008 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Linter error handler class that prints errors to stdout.""" |
|||
|
|||
__author__ = ('robbyw@google.com (Robert Walker)', |
|||
'ajp@google.com (Andy Perelson)') |
|||
|
|||
from closure_linter.common import error |
|||
from closure_linter.common import errorhandler |
|||
|
|||
Error = error.Error |
|||
|
|||
|
|||
# The error message is of the format: |
|||
# Line <number>, E:<code>: message |
|||
DEFAULT_FORMAT = 1 |
|||
|
|||
# The error message is of the format: |
|||
# filename:[line number]:message |
|||
UNIX_FORMAT = 2 |
|||
|
|||
|
|||
class ErrorPrinter(errorhandler.ErrorHandler): |
|||
"""ErrorHandler that prints errors to stdout.""" |
|||
|
|||
def __init__(self, new_errors=None): |
|||
"""Initializes this error printer. |
|||
|
|||
Args: |
|||
new_errors: A sequence of error codes representing recently introduced |
|||
errors, defaults to None. |
|||
""" |
|||
# Number of errors |
|||
self._error_count = 0 |
|||
|
|||
# Number of new errors |
|||
self._new_error_count = 0 |
|||
|
|||
# Number of files checked |
|||
self._total_file_count = 0 |
|||
|
|||
# Number of files with errors |
|||
self._error_file_count = 0 |
|||
|
|||
# Dict of file name to number of errors |
|||
self._file_table = {} |
|||
|
|||
# List of errors for each file |
|||
self._file_errors = None |
|||
|
|||
# Current file |
|||
self._filename = None |
|||
|
|||
self._format = DEFAULT_FORMAT |
|||
|
|||
if new_errors: |
|||
self._new_errors = frozenset(new_errors) |
|||
else: |
|||
self._new_errors = frozenset(set()) |
|||
|
|||
def SetFormat(self, format): |
|||
"""Sets the print format of errors. |
|||
|
|||
Args: |
|||
format: One of {DEFAULT_FORMAT, UNIX_FORMAT}. |
|||
""" |
|||
self._format = format |
|||
|
|||
def HandleFile(self, filename, first_token): |
|||
"""Notifies this ErrorPrinter that subsequent errors are in filename. |
|||
|
|||
Sets the current file name, and sets a flag stating the header for this file |
|||
has not been printed yet. |
|||
|
|||
Should be called by a linter before a file is style checked. |
|||
|
|||
Args: |
|||
filename: The name of the file about to be checked. |
|||
first_token: The first token in the file, or None if there was an error |
|||
opening the file |
|||
""" |
|||
if self._filename and self._file_table[self._filename]: |
|||
print |
|||
|
|||
self._filename = filename |
|||
self._file_table[filename] = 0 |
|||
self._total_file_count += 1 |
|||
self._file_errors = [] |
|||
|
|||
def HandleError(self, error): |
|||
"""Prints a formatted error message about the specified error. |
|||
|
|||
The error message is of the format: |
|||
Error #<code>, line #<number>: message |
|||
|
|||
Args: |
|||
error: The error object |
|||
""" |
|||
self._file_errors.append(error) |
|||
self._file_table[self._filename] += 1 |
|||
self._error_count += 1 |
|||
|
|||
if self._new_errors and error.code in self._new_errors: |
|||
self._new_error_count += 1 |
|||
|
|||
def _PrintError(self, error): |
|||
"""Prints a formatted error message about the specified error. |
|||
|
|||
Args: |
|||
error: The error object |
|||
""" |
|||
new_error = self._new_errors and error.code in self._new_errors |
|||
if self._format == DEFAULT_FORMAT: |
|||
line = '' |
|||
if error.token: |
|||
line = 'Line %d, ' % error.token.line_number |
|||
|
|||
code = 'E:%04d' % error.code |
|||
if new_error: |
|||
print '%s%s: (New error) %s' % (line, code, error.message) |
|||
else: |
|||
print '%s%s: %s' % (line, code, error.message) |
|||
else: |
|||
# UNIX format |
|||
filename = self._filename |
|||
line = '' |
|||
if error.token: |
|||
line = '%d' % error.token.line_number |
|||
|
|||
error_code = '%04d' % error.code |
|||
if new_error: |
|||
error_code = 'New Error ' + error_code |
|||
print '%s:%s:(%s) %s' % (filename, line, error_code, error.message) |
|||
|
|||
def FinishFile(self): |
|||
"""Finishes handling the current file.""" |
|||
if self._file_errors: |
|||
self._error_file_count += 1 |
|||
|
|||
if self._format != UNIX_FORMAT: |
|||
print '----- FILE : %s -----' % (self._filename) |
|||
|
|||
self._file_errors.sort(Error.Compare) |
|||
|
|||
for error in self._file_errors: |
|||
self._PrintError(error) |
|||
|
|||
def HasErrors(self): |
|||
"""Whether this error printer encountered any errors. |
|||
|
|||
Returns: |
|||
True if the error printer encountered any errors. |
|||
""" |
|||
return self._error_count |
|||
|
|||
def HasNewErrors(self): |
|||
"""Whether this error printer encountered any new errors. |
|||
|
|||
Returns: |
|||
True if the error printer encountered any new errors. |
|||
""" |
|||
return self._new_error_count |
|||
|
|||
def HasOldErrors(self): |
|||
"""Whether this error printer encountered any old errors. |
|||
|
|||
Returns: |
|||
True if the error printer encountered any old errors. |
|||
""" |
|||
return self._error_count - self._new_error_count |
|||
|
|||
def PrintSummary(self): |
|||
"""Print a summary of the number of errors and files.""" |
|||
if self.HasErrors() or self.HasNewErrors(): |
|||
print ('Found %d errors, including %d new errors, in %d files ' |
|||
'(%d files OK).' % ( |
|||
self._error_count, |
|||
self._new_error_count, |
|||
self._error_file_count, |
|||
self._total_file_count - self._error_file_count)) |
|||
else: |
|||
print '%d files checked, no errors found.' % self._total_file_count |
|||
|
|||
def PrintFileSummary(self): |
|||
"""Print a detailed summary of the number of errors in each file.""" |
|||
keys = self._file_table.keys() |
|||
keys.sort() |
|||
for filename in keys: |
|||
print '%s: %d' % (filename, self._file_table[filename]) |
@ -0,0 +1,101 @@ |
|||
#!/usr/bin/env python |
|||
# Copyright 2011 The Closure Linter Authors. All Rights Reserved. |
|||
|
|||
|
|||
|
|||
|
|||
import unittest as googletest |
|||
from closure_linter.common import tokens |
|||
|
|||
|
|||
def _CreateDummyToken(): |
|||
return tokens.Token('foo', None, 1, 1) |
|||
|
|||
|
|||
def _CreateDummyTokens(count): |
|||
dummy_tokens = [] |
|||
for _ in xrange(count): |
|||
dummy_tokens.append(_CreateDummyToken()) |
|||
return dummy_tokens |
|||
|
|||
|
|||
def _SetTokensAsNeighbors(neighbor_tokens): |
|||
for i in xrange(len(neighbor_tokens)): |
|||
prev_index = i - 1 |
|||
next_index = i + 1 |
|||
|
|||
if prev_index >= 0: |
|||
neighbor_tokens[i].previous = neighbor_tokens[prev_index] |
|||
|
|||
if next_index < len(neighbor_tokens): |
|||
neighbor_tokens[i].next = neighbor_tokens[next_index] |
|||
|
|||
|
|||
class TokensTest(googletest.TestCase): |
|||
|
|||
def testIsFirstInLine(self): |
|||
|
|||
# First token in file (has no previous). |
|||
self.assertTrue(_CreateDummyToken().IsFirstInLine()) |
|||
|
|||
a, b = _CreateDummyTokens(2) |
|||
_SetTokensAsNeighbors([a, b]) |
|||
|
|||
# Tokens on same line |
|||
a.line_number = 30 |
|||
b.line_number = 30 |
|||
|
|||
self.assertFalse(b.IsFirstInLine()) |
|||
|
|||
# Tokens on different lines |
|||
b.line_number = 31 |
|||
self.assertTrue(b.IsFirstInLine()) |
|||
|
|||
def testIsLastInLine(self): |
|||
# Last token in file (has no next). |
|||
self.assertTrue(_CreateDummyToken().IsLastInLine()) |
|||
|
|||
a, b = _CreateDummyTokens(2) |
|||
_SetTokensAsNeighbors([a, b]) |
|||
|
|||
# Tokens on same line |
|||
a.line_number = 30 |
|||
b.line_number = 30 |
|||
self.assertFalse(a.IsLastInLine()) |
|||
|
|||
b.line_number = 31 |
|||
self.assertTrue(a.IsLastInLine()) |
|||
|
|||
def testIsType(self): |
|||
a = tokens.Token('foo', 'fakeType1', 1, 1) |
|||
self.assertTrue(a.IsType('fakeType1')) |
|||
self.assertFalse(a.IsType('fakeType2')) |
|||
|
|||
def testIsAnyType(self): |
|||
a = tokens.Token('foo', 'fakeType1', 1, 1) |
|||
self.assertTrue(a.IsAnyType(['fakeType1', 'fakeType2'])) |
|||
self.assertFalse(a.IsAnyType(['fakeType3', 'fakeType4'])) |
|||
|
|||
def testRepr(self): |
|||
a = tokens.Token('foo', 'fakeType1', 1, 1) |
|||
self.assertEquals('<Token: fakeType1, "foo", None, 1, None>', str(a)) |
|||
|
|||
def testIter(self): |
|||
dummy_tokens = _CreateDummyTokens(5) |
|||
_SetTokensAsNeighbors(dummy_tokens) |
|||
a, b, c, d, e = dummy_tokens |
|||
|
|||
i = iter(a) |
|||
self.assertListEqual([a, b, c, d, e], list(i)) |
|||
|
|||
def testReverseIter(self): |
|||
dummy_tokens = _CreateDummyTokens(5) |
|||
_SetTokensAsNeighbors(dummy_tokens) |
|||
a, b, c, d, e = dummy_tokens |
|||
|
|||
ri = reversed(e) |
|||
self.assertListEqual([e, d, c, b, a], list(ri)) |
|||
|
|||
|
|||
if __name__ == '__main__': |
|||
googletest.main() |
@ -0,0 +1,90 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2011 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
|
|||
"""Specific JSLint errors checker.""" |
|||
|
|||
|
|||
|
|||
import gflags as flags |
|||
|
|||
FLAGS = flags.FLAGS |
|||
|
|||
|
|||
class Rule(object): |
|||
"""Different rules to check.""" |
|||
|
|||
# Documentations for specific rules goes in flag definition. |
|||
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level' |
|||
INDENTATION = 'indentation' |
|||
WELL_FORMED_AUTHOR = 'well_formed_author' |
|||
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc' |
|||
BRACES_AROUND_TYPE = 'braces_around_type' |
|||
OPTIONAL_TYPE_MARKER = 'optional_type_marker' |
|||
UNUSED_PRIVATE_MEMBERS = 'unused_private_members' |
|||
|
|||
# Rule to raise all known errors. |
|||
ALL = 'all' |
|||
|
|||
# All rules that are to be checked when using the strict flag. E.g. the rules |
|||
# that are specific to the stricter Closure style. |
|||
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL, |
|||
INDENTATION, |
|||
WELL_FORMED_AUTHOR, |
|||
NO_BRACES_AROUND_INHERIT_DOC, |
|||
BRACES_AROUND_TYPE, |
|||
OPTIONAL_TYPE_MARKER]) |
|||
|
|||
|
|||
flags.DEFINE_boolean('strict', False, |
|||
'Whether to validate against the stricter Closure style. ' |
|||
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.') |
|||
flags.DEFINE_multistring('jslint_error', [], |
|||
'List of specific lint errors to check. Here is a list' |
|||
' of accepted values:\n' |
|||
' - ' + Rule.ALL + ': enables all following errors.\n' |
|||
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates' |
|||
'number of blank lines between blocks at top level.\n' |
|||
' - ' + Rule.INDENTATION + ': checks correct ' |
|||
'indentation of code.\n' |
|||
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the ' |
|||
'@author JsDoc tags.\n' |
|||
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': ' |
|||
'forbids braces around @inheritdoc JsDoc tags.\n' |
|||
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces ' |
|||
'around types in JsDoc tags.\n' |
|||
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct ' |
|||
'use of optional marker = in param types.\n' |
|||
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for ' |
|||
'unused private variables.\n') |
|||
|
|||
|
|||
def ShouldCheck(rule): |
|||
"""Returns whether the optional rule should be checked. |
|||
|
|||
Computes different flags (strict, jslint_error, jslint_noerror) to find out if |
|||
this specific rule should be checked. |
|||
|
|||
Args: |
|||
rule: Name of the rule (see Rule). |
|||
|
|||
Returns: |
|||
True if the rule should be checked according to the flags, otherwise False. |
|||
""" |
|||
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error: |
|||
return True |
|||
# Checks strict rules. |
|||
return FLAGS.strict and rule in Rule.CLOSURE_RULES |
@ -0,0 +1,65 @@ |
|||
#!/usr/bin/env python |
|||
# Copyright 2012 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
|
|||
"""A simple, pickle-serializable class to represent a lint error.""" |
|||
|
|||
|
|||
|
|||
import gflags as flags |
|||
|
|||
from closure_linter import errors |
|||
from closure_linter.common import erroroutput |
|||
|
|||
FLAGS = flags.FLAGS |
|||
|
|||
|
|||
class ErrorRecord(object): |
|||
"""Record-keeping struct that can be serialized back from a process. |
|||
|
|||
Attributes: |
|||
path: Path to the file. |
|||
error_string: Error string for the user. |
|||
new_error: Whether this is a "new error" (see errors.NEW_ERRORS). |
|||
""" |
|||
|
|||
def __init__(self, path, error_string, new_error): |
|||
self.path = path |
|||
self.error_string = error_string |
|||
self.new_error = new_error |
|||
|
|||
|
|||
def MakeErrorRecord(path, error): |
|||
"""Make an error record with correctly formatted error string. |
|||
|
|||
Errors are not able to be serialized (pickled) over processes because of |
|||
their pointers to the complex token/context graph. We use an intermediary |
|||
serializable class to pass back just the relevant information. |
|||
|
|||
Args: |
|||
path: Path of file the error was found in. |
|||
error: An error.Error instance. |
|||
|
|||
Returns: |
|||
_ErrorRecord instance. |
|||
""" |
|||
new_error = error.code in errors.NEW_ERRORS |
|||
|
|||
if FLAGS.unix_mode: |
|||
error_string = erroroutput.GetUnixErrorOutput(path, error, new_error) |
|||
else: |
|||
error_string = erroroutput.GetErrorOutput(error, new_error) |
|||
|
|||
return ErrorRecord(path, error_string, new_error) |
@ -1,53 +0,0 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2010 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Unit tests for JavaScriptStateTracker.""" |
|||
|
|||
|
|||
|
|||
import unittest as googletest |
|||
from closure_linter import javascriptstatetracker |
|||
|
|||
class JavaScriptStateTrackerTest(googletest.TestCase): |
|||
|
|||
__test_cases = { |
|||
'package.CONSTANT' : 'package', |
|||
'package.methodName' : 'package', |
|||
'package.subpackage.methodName' : 'package.subpackage', |
|||
'package.ClassName.something' : 'package.ClassName', |
|||
'package.ClassName.Enum.VALUE.methodName' : 'package.ClassName.Enum', |
|||
'package.ClassName.CONSTANT' : 'package.ClassName', |
|||
'package.ClassName.inherits' : 'package.ClassName', |
|||
'package.ClassName.apply' : 'package.ClassName', |
|||
'package.ClassName.methodName.apply' : 'package.ClassName', |
|||
'package.ClassName.methodName.call' : 'package.ClassName', |
|||
'package.ClassName.prototype.methodName' : 'package.ClassName', |
|||
'package.ClassName.privateMethod_' : None, |
|||
'package.ClassName.prototype.methodName.apply' : 'package.ClassName' |
|||
} |
|||
|
|||
def testGetClosurizedNamespace(self): |
|||
stateTracker = javascriptstatetracker.JavaScriptStateTracker(['package']) |
|||
for identifier, expected_namespace in self.__test_cases.items(): |
|||
actual_namespace = stateTracker.GetClosurizedNamespace(identifier) |
|||
self.assertEqual(expected_namespace, actual_namespace, |
|||
'expected namespace "' + str(expected_namespace) + |
|||
'" for identifier "' + str(identifier) + '" but was "' + |
|||
str(actual_namespace) + '"') |
|||
|
|||
if __name__ == '__main__': |
|||
googletest.main() |
|||
|
@ -0,0 +1,74 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2011 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Tests for gjslint --nostrict. |
|||
|
|||
Tests errors that can be thrown by gjslint when not in strict mode. |
|||
""" |
|||
|
|||
|
|||
|
|||
import os |
|||
import sys |
|||
import unittest |
|||
|
|||
import gflags as flags |
|||
import unittest as googletest |
|||
|
|||
from closure_linter import checker |
|||
from closure_linter import errors |
|||
from closure_linter.common import filetestcase |
|||
|
|||
_RESOURCE_PREFIX = 'closure_linter/testdata' |
|||
|
|||
flags.FLAGS.strict = False |
|||
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires') |
|||
flags.FLAGS.closurized_namespaces = ('goog', 'dummy') |
|||
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js', |
|||
'limited_doc_checks.js') |
|||
|
|||
|
|||
# List of files under testdata to test. |
|||
# We need to list files explicitly since pyglib can't list directories. |
|||
_TEST_FILES = [ |
|||
'not_strict.js' |
|||
] |
|||
|
|||
|
|||
class GJsLintTestSuite(unittest.TestSuite): |
|||
"""Test suite to run a GJsLintTest for each of several files. |
|||
|
|||
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in |
|||
testdata to test. Otherwise, _TEST_FILES is used. |
|||
""" |
|||
|
|||
def __init__(self, tests=()): |
|||
unittest.TestSuite.__init__(self, tests) |
|||
|
|||
argv = sys.argv and sys.argv[1:] or [] |
|||
if argv: |
|||
test_files = argv |
|||
else: |
|||
test_files = _TEST_FILES |
|||
for test_file in test_files: |
|||
resource_path = os.path.join(_RESOURCE_PREFIX, test_file) |
|||
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path, |
|||
checker.GJsLintRunner(), |
|||
errors.ByName)) |
|||
|
|||
if __name__ == '__main__': |
|||
# Don't let main parse args; it happens in the TestSuite. |
|||
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite') |
@ -0,0 +1,272 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2011 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Contains logic for sorting goog.provide and goog.require statements. |
|||
|
|||
Closurized JavaScript files use goog.provide and goog.require statements at the |
|||
top of the file to manage dependencies. These statements should be sorted |
|||
alphabetically, however, it is common for them to be accompanied by inline |
|||
comments or suppression annotations. In order to sort these statements without |
|||
disrupting their comments and annotations, the association between statements |
|||
and comments/annotations must be maintained while sorting. |
|||
|
|||
RequireProvideSorter: Handles checking/fixing of provide/require statements. |
|||
""" |
|||
|
|||
|
|||
|
|||
from closure_linter import javascripttokens |
|||
from closure_linter import tokenutil |
|||
|
|||
# Shorthand |
|||
Type = javascripttokens.JavaScriptTokenType |
|||
|
|||
|
|||
class RequireProvideSorter(object): |
|||
"""Checks for and fixes alphabetization of provide and require statements. |
|||
|
|||
When alphabetizing, comments on the same line or comments directly above a |
|||
goog.provide or goog.require statement are associated with that statement and |
|||
stay with the statement as it gets sorted. |
|||
""" |
|||
|
|||
def CheckProvides(self, token): |
|||
"""Checks alphabetization of goog.provide statements. |
|||
|
|||
Iterates over tokens in given token stream, identifies goog.provide tokens, |
|||
and checks that they occur in alphabetical order by the object being |
|||
provided. |
|||
|
|||
Args: |
|||
token: A token in the token stream before any goog.provide tokens. |
|||
|
|||
Returns: |
|||
A tuple containing the first provide token in the token stream and a list |
|||
of provided objects sorted alphabetically. For example: |
|||
|
|||
(JavaScriptToken, ['object.a', 'object.b', ...]) |
|||
|
|||
None is returned if all goog.provide statements are already sorted. |
|||
""" |
|||
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide') |
|||
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens) |
|||
sorted_provide_strings = sorted(provide_strings) |
|||
if provide_strings != sorted_provide_strings: |
|||
return [provide_tokens[0], sorted_provide_strings] |
|||
return None |
|||
|
|||
def CheckRequires(self, token): |
|||
"""Checks alphabetization of goog.require statements. |
|||
|
|||
Iterates over tokens in given token stream, identifies goog.require tokens, |
|||
and checks that they occur in alphabetical order by the dependency being |
|||
required. |
|||
|
|||
Args: |
|||
token: A token in the token stream before any goog.require tokens. |
|||
|
|||
Returns: |
|||
A tuple containing the first require token in the token stream and a list |
|||
of required dependencies sorted alphabetically. For example: |
|||
|
|||
(JavaScriptToken, ['object.a', 'object.b', ...]) |
|||
|
|||
None is returned if all goog.require statements are already sorted. |
|||
""" |
|||
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require') |
|||
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens) |
|||
sorted_require_strings = sorted(require_strings) |
|||
if require_strings != sorted_require_strings: |
|||
return (require_tokens[0], sorted_require_strings) |
|||
return None |
|||
|
|||
def FixProvides(self, token): |
|||
"""Sorts goog.provide statements in the given token stream alphabetically. |
|||
|
|||
Args: |
|||
token: The first token in the token stream. |
|||
""" |
|||
self._FixProvidesOrRequires( |
|||
self._GetRequireOrProvideTokens(token, 'goog.provide')) |
|||
|
|||
def FixRequires(self, token): |
|||
"""Sorts goog.require statements in the given token stream alphabetically. |
|||
|
|||
Args: |
|||
token: The first token in the token stream. |
|||
""" |
|||
self._FixProvidesOrRequires( |
|||
self._GetRequireOrProvideTokens(token, 'goog.require')) |
|||
|
|||
def _FixProvidesOrRequires(self, tokens): |
|||
"""Sorts goog.provide or goog.require statements. |
|||
|
|||
Args: |
|||
tokens: A list of goog.provide or goog.require tokens in the order they |
|||
appear in the token stream. i.e. the first token in this list must |
|||
be the first goog.provide or goog.require token. |
|||
""" |
|||
strings = self._GetRequireOrProvideTokenStrings(tokens) |
|||
sorted_strings = sorted(strings) |
|||
|
|||
# Make a separate pass to remove any blank lines between goog.require/ |
|||
# goog.provide tokens. |
|||
first_token = tokens[0] |
|||
last_token = tokens[-1] |
|||
i = last_token |
|||
while i != first_token: |
|||
if i.type is Type.BLANK_LINE: |
|||
tokenutil.DeleteToken(i) |
|||
i = i.previous |
|||
|
|||
# A map from required/provided object name to tokens that make up the line |
|||
# it was on, including any comments immediately before it or after it on the |
|||
# same line. |
|||
tokens_map = self._GetTokensMap(tokens) |
|||
|
|||
# Iterate over the map removing all tokens. |
|||
for name in tokens_map: |
|||
tokens_to_delete = tokens_map[name] |
|||
for i in tokens_to_delete: |
|||
tokenutil.DeleteToken(i) |
|||
|
|||
# Re-add all tokens in the map in alphabetical order. |
|||
insert_after = tokens[0].previous |
|||
for string in sorted_strings: |
|||
for i in tokens_map[string]: |
|||
tokenutil.InsertTokenAfter(i, insert_after) |
|||
insert_after = i |
|||
|
|||
def _GetRequireOrProvideTokens(self, token, token_string): |
|||
"""Gets all goog.provide or goog.require tokens in the given token stream. |
|||
|
|||
Args: |
|||
token: The first token in the token stream. |
|||
token_string: One of 'goog.provide' or 'goog.require' to indicate which |
|||
tokens to find. |
|||
|
|||
Returns: |
|||
A list of goog.provide or goog.require tokens in the order they appear in |
|||
the token stream. |
|||
""" |
|||
tokens = [] |
|||
while token: |
|||
if token.type == Type.IDENTIFIER: |
|||
if token.string == token_string: |
|||
tokens.append(token) |
|||
elif token.string not in ['goog.require', 'goog.provide']: |
|||
# The goog.provide and goog.require identifiers are at the top of the |
|||
# file. So if any other identifier is encountered, return. |
|||
break |
|||
token = token.next |
|||
|
|||
return tokens |
|||
|
|||
def _GetRequireOrProvideTokenStrings(self, tokens): |
|||
"""Gets a list of strings corresponding to the given list of tokens. |
|||
|
|||
The string will be the next string in the token stream after each token in |
|||
tokens. This is used to find the object being provided/required by a given |
|||
goog.provide or goog.require token. |
|||
|
|||
Args: |
|||
tokens: A list of goog.provide or goog.require tokens. |
|||
|
|||
Returns: |
|||
A list of object names that are being provided or required by the given |
|||
list of tokens. For example: |
|||
|
|||
['object.a', 'object.c', 'object.b'] |
|||
""" |
|||
token_strings = [] |
|||
for token in tokens: |
|||
name = tokenutil.Search(token, Type.STRING_TEXT).string |
|||
token_strings.append(name) |
|||
return token_strings |
|||
|
|||
def _GetTokensMap(self, tokens): |
|||
"""Gets a map from object name to tokens associated with that object. |
|||
|
|||
Starting from the goog.provide/goog.require token, searches backwards in the |
|||
token stream for any lines that start with a comment. These lines are |
|||
associated with the goog.provide/goog.require token. Also associates any |
|||
tokens on the same line as the goog.provide/goog.require token with that |
|||
token. |
|||
|
|||
Args: |
|||
tokens: A list of goog.provide or goog.require tokens. |
|||
|
|||
Returns: |
|||
A dictionary that maps object names to the tokens associated with the |
|||
goog.provide or goog.require of that object name. For example: |
|||
|
|||
{ |
|||
'object.a': [JavaScriptToken, JavaScriptToken, ...], |
|||
'object.b': [...] |
|||
} |
|||
|
|||
The list of tokens includes any comment lines above the goog.provide or |
|||
goog.require statement and everything after the statement on the same |
|||
line. For example, all of the following would be associated with |
|||
'object.a': |
|||
|
|||
/** @suppress {extraRequire} */ |
|||
goog.require('object.a'); // Some comment. |
|||
""" |
|||
tokens_map = {} |
|||
for token in tokens: |
|||
object_name = tokenutil.Search(token, Type.STRING_TEXT).string |
|||
# If the previous line starts with a comment, presume that the comment |
|||
# relates to the goog.require or goog.provide and keep them together when |
|||
# sorting. |
|||
first_token = token |
|||
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token) |
|||
while previous_first_token.IsAnyType(Type.COMMENT_TYPES): |
|||
first_token = previous_first_token |
|||
previous_first_token = tokenutil.GetFirstTokenInPreviousLine( |
|||
first_token) |
|||
|
|||
# Find the last token on the line. |
|||
last_token = tokenutil.GetLastTokenInSameLine(token) |
|||
|
|||
all_tokens = self._GetTokenList(first_token, last_token) |
|||
tokens_map[object_name] = all_tokens |
|||
return tokens_map |
|||
|
|||
def _GetTokenList(self, first_token, last_token): |
|||
"""Gets a list of all tokens from first_token to last_token, inclusive. |
|||
|
|||
Args: |
|||
first_token: The first token to get. |
|||
last_token: The last token to get. |
|||
|
|||
Returns: |
|||
A list of all tokens between first_token and last_token, including both |
|||
first_token and last_token. |
|||
|
|||
Raises: |
|||
Exception: If the token stream ends before last_token is reached. |
|||
""" |
|||
token_list = [] |
|||
token = first_token |
|||
while token != last_token: |
|||
if not token: |
|||
raise Exception('ran out of tokens') |
|||
token_list.append(token) |
|||
token = token.next |
|||
token_list.append(last_token) |
|||
|
|||
return token_list |
@ -0,0 +1,74 @@ |
|||
#!/usr/bin/env python |
|||
# |
|||
# Copyright 2012 The Closure Linter Authors. All Rights Reserved. |
|||
# |
|||
# Licensed under the Apache License, Version 2.0 (the "License"); |
|||
# you may not use this file except in compliance with the License. |
|||
# You may obtain a copy of the License at |
|||
# |
|||
# http://www.apache.org/licenses/LICENSE-2.0 |
|||
# |
|||
# Unless required by applicable law or agreed to in writing, software |
|||
# distributed under the License is distributed on an "AS-IS" BASIS, |
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|||
# See the License for the specific language governing permissions and |
|||
# limitations under the License. |
|||
|
|||
"""Unit tests for RequireProvideSorter.""" |
|||
|
|||
|
|||
|
|||
import unittest as googletest |
|||
from closure_linter import ecmametadatapass |
|||
from closure_linter import javascripttokenizer |
|||
from closure_linter import javascripttokens |
|||
from closure_linter import requireprovidesorter |
|||
|
|||
# pylint: disable-msg=C6409 |
|||
TokenType = javascripttokens.JavaScriptTokenType |
|||
|
|||
|
|||
class RequireProvideSorterTest(googletest.TestCase): |
|||
"""Tests for RequireProvideSorter.""" |
|||
|
|||
_tokenizer = javascripttokenizer.JavaScriptTokenizer() |
|||
_metadata_pass = ecmametadatapass.EcmaMetaDataPass() |
|||
|
|||
def testFixRequires_removeBlankLines(self): |
|||
"""Tests that blank lines are omitted in sorted goog.require statements.""" |
|||
input_lines = [ |
|||
'goog.provide(\'package.subpackage.Whatever\');', |
|||
'', |
|||
'goog.require(\'package.subpackage.ClassB\');', |
|||
'', |
|||
'goog.require(\'package.subpackage.ClassA\');' |
|||
] |
|||
expected_lines = [ |
|||
'goog.provide(\'package.subpackage.Whatever\');', |
|||
'', |
|||
'goog.require(\'package.subpackage.ClassA\');', |
|||
'goog.require(\'package.subpackage.ClassB\');' |
|||
] |
|||
token = self._tokenizer.TokenizeFile(input_lines) |
|||
self._metadata_pass.Reset() |
|||
self._metadata_pass.Process(token) |
|||
|
|||
sorter = requireprovidesorter.RequireProvideSorter() |
|||
sorter.FixRequires(token) |
|||
|
|||
self.assertEquals(expected_lines, self._GetLines(token)) |
|||
|
|||
def _GetLines(self, token): |
|||
"""Returns an array of lines based on the specified token stream.""" |
|||
lines = [] |
|||
line = '' |
|||
while token: |
|||
line += token.string |
|||
if token.IsLastInLine(): |
|||
lines.append(line) |
|||
line = '' |
|||
token = token.next |
|||
return lines |
|||
|
|||
if __name__ == '__main__': |
|||
googletest.main() |
File diff suppressed because it is too large
@ -1,5 +0,0 @@ |
|||
[egg_info] |
|||
tag_build = |
|||
tag_date = 0 |
|||
tag_svn_revision = 0 |
|||
|
Loading…
Reference in new issue