mirror of https://github.com/lukechilds/node.git
Ryan Dahl
13 years ago
21 changed files with 1467 additions and 563 deletions
@ -0,0 +1,21 @@ |
|||||
|
setup.py |
||||
|
gyp |
||||
|
LICENSE |
||||
|
AUTHORS |
||||
|
pylib/gyp/MSVSNew.py |
||||
|
pylib/gyp/MSVSProject.py |
||||
|
pylib/gyp/MSVSToolFile.py |
||||
|
pylib/gyp/MSVSUserFile.py |
||||
|
pylib/gyp/MSVSVersion.py |
||||
|
pylib/gyp/SCons.py |
||||
|
pylib/gyp/__init__.py |
||||
|
pylib/gyp/common.py |
||||
|
pylib/gyp/input.py |
||||
|
pylib/gyp/xcodeproj_file.py |
||||
|
pylib/gyp/generator/__init__.py |
||||
|
pylib/gyp/generator/gypd.py |
||||
|
pylib/gyp/generator/gypsh.py |
||||
|
pylib/gyp/generator/make.py |
||||
|
pylib/gyp/generator/msvs.py |
||||
|
pylib/gyp/generator/scons.py |
||||
|
pylib/gyp/generator/xcode.py |
@ -0,0 +1 @@ |
|||||
|
* |
@ -0,0 +1,41 @@ |
|||||
|
# Copyright (c) 2011 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
|
||||
|
"""Top-level presubmit script for GYP. |
||||
|
|
||||
|
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts |
||||
|
for more details about the presubmit API built into gcl. |
||||
|
""" |
||||
|
|
||||
|
|
||||
|
def CheckChangeOnUpload(input_api, output_api): |
||||
|
report = [] |
||||
|
report.extend(input_api.canned_checks.PanProjectChecks( |
||||
|
input_api, output_api)) |
||||
|
return report |
||||
|
|
||||
|
|
||||
|
def CheckChangeOnCommit(input_api, output_api): |
||||
|
report = [] |
||||
|
license = ( |
||||
|
r'.*? Copyright \(c\) %(year)s Google Inc\. All rights reserved\.\n' |
||||
|
r'.*? Use of this source code is governed by a BSD-style license that ' |
||||
|
r'can be\n' |
||||
|
r'.*? found in the LICENSE file\.\n' |
||||
|
) % { |
||||
|
'year': input_api.time.strftime('%Y'), |
||||
|
} |
||||
|
|
||||
|
report.extend(input_api.canned_checks.PanProjectChecks( |
||||
|
input_api, output_api, license_header=license)) |
||||
|
report.extend(input_api.canned_checks.CheckTreeIsOpen( |
||||
|
input_api, output_api, |
||||
|
'http://gyp-status.appspot.com/status', |
||||
|
'http://gyp-status.appspot.com/current')) |
||||
|
return report |
||||
|
|
||||
|
|
||||
|
def GetPreferredTrySlaves(): |
||||
|
return ['gyp-win32', 'gyp-win64', 'gyp-linux', 'gyp-mac'] |
@ -0,0 +1,79 @@ |
|||||
|
#!/usr/bin/python |
||||
|
# Copyright (c) 2011 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
|
||||
|
"""Argument-less script to select what to run on the buildbots.""" |
||||
|
|
||||
|
|
||||
|
import os |
||||
|
import shutil |
||||
|
import subprocess |
||||
|
import sys |
||||
|
|
||||
|
|
||||
|
def GypTestFormat(title, format, msvs_version=None): |
||||
|
"""Run the gyp tests for a given format, emitting annotator tags. |
||||
|
|
||||
|
See annotator docs at: |
||||
|
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations |
||||
|
Args: |
||||
|
format: gyp format to test. |
||||
|
Returns: |
||||
|
0 for sucesss, 1 for failure. |
||||
|
""" |
||||
|
print '@@@BUILD_STEP ' + title + '@@@' |
||||
|
sys.stdout.flush() |
||||
|
buildbot_dir = os.path.dirname(os.path.abspath(__file__)) |
||||
|
trunk_dir = os.path.dirname(buildbot_dir) |
||||
|
root_dir = os.path.dirname(trunk_dir) |
||||
|
env = os.environ.copy() |
||||
|
if msvs_version: |
||||
|
env['GYP_MSVS_VERSION'] = msvs_version |
||||
|
retcode = subprocess.call(' '.join( |
||||
|
[sys.executable, 'trunk/gyptest.py', |
||||
|
'--all', |
||||
|
'--passed', |
||||
|
'--format', format, |
||||
|
'--chdir', 'trunk', |
||||
|
'--path', '../scons']), |
||||
|
cwd=root_dir, env=env, shell=True) |
||||
|
if retcode: |
||||
|
# Emit failure tag, and keep going. |
||||
|
print '@@@STEP_FAILURE@@@' |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def GypBuild(): |
||||
|
# Dump out/ directory. |
||||
|
print '@@@BUILD_STEP cleanup@@@' |
||||
|
print 'Removing out/ ...' |
||||
|
shutil.rmtree('out', ignore_errors=True) |
||||
|
print 'Done.' |
||||
|
|
||||
|
retcode = 0 |
||||
|
if sys.platform.startswith('linux'): |
||||
|
retcode += GypTestFormat('scons', format='scons') |
||||
|
retcode += GypTestFormat('make', format='make') |
||||
|
elif sys.platform == 'darwin': |
||||
|
retcode += GypTestFormat('xcode', format='xcode') |
||||
|
retcode += GypTestFormat('make', format='make') |
||||
|
elif sys.platform == 'win32': |
||||
|
retcode += GypTestFormat('msvs-2008', format='msvs', msvs_version='2008') |
||||
|
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64': |
||||
|
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010') |
||||
|
else: |
||||
|
raise Exception('Unknown platform') |
||||
|
if retcode: |
||||
|
# TODO(bradnelson): once the annotator supports a postscript (section for |
||||
|
# after the build proper that could be used for cumulative failures), |
||||
|
# use that instead of this. This isolates the final return value so |
||||
|
# that it isn't misattributed to the last stage. |
||||
|
print '@@@BUILD_STEP failures@@@' |
||||
|
sys.exit(retcode) |
||||
|
|
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
GypBuild() |
@ -0,0 +1,18 @@ |
|||||
|
#!/usr/bin/env python |
||||
|
|
||||
|
# Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
import sys |
||||
|
|
||||
|
# TODO(mark): sys.path manipulation is some temporary testing stuff. |
||||
|
try: |
||||
|
import gyp |
||||
|
except ImportError, e: |
||||
|
import os.path |
||||
|
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'pylib')) |
||||
|
import gyp |
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
sys.exit(gyp.main(sys.argv[1:])) |
@ -0,0 +1,5 @@ |
|||||
|
@rem Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
@rem Use of this source code is governed by a BSD-style license that can be |
||||
|
@rem found in the LICENSE file. |
||||
|
|
||||
|
@python "%~dp0/gyp" %* |
@ -0,0 +1,81 @@ |
|||||
|
#!/usr/bin/python |
||||
|
|
||||
|
# Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
import os.path |
||||
|
import shutil |
||||
|
import sys |
||||
|
|
||||
|
|
||||
|
gyps = [ |
||||
|
'app/app.gyp', |
||||
|
'base/base.gyp', |
||||
|
'build/temp_gyp/googleurl.gyp', |
||||
|
'build/all.gyp', |
||||
|
'build/common.gypi', |
||||
|
'build/external_code.gypi', |
||||
|
'chrome/test/security_tests/security_tests.gyp', |
||||
|
'chrome/third_party/hunspell/hunspell.gyp', |
||||
|
'chrome/chrome.gyp', |
||||
|
'media/media.gyp', |
||||
|
'net/net.gyp', |
||||
|
'printing/printing.gyp', |
||||
|
'sdch/sdch.gyp', |
||||
|
'skia/skia.gyp', |
||||
|
'testing/gmock.gyp', |
||||
|
'testing/gtest.gyp', |
||||
|
'third_party/bzip2/bzip2.gyp', |
||||
|
'third_party/icu38/icu38.gyp', |
||||
|
'third_party/libevent/libevent.gyp', |
||||
|
'third_party/libjpeg/libjpeg.gyp', |
||||
|
'third_party/libpng/libpng.gyp', |
||||
|
'third_party/libxml/libxml.gyp', |
||||
|
'third_party/libxslt/libxslt.gyp', |
||||
|
'third_party/lzma_sdk/lzma_sdk.gyp', |
||||
|
'third_party/modp_b64/modp_b64.gyp', |
||||
|
'third_party/npapi/npapi.gyp', |
||||
|
'third_party/sqlite/sqlite.gyp', |
||||
|
'third_party/zlib/zlib.gyp', |
||||
|
'v8/tools/gyp/v8.gyp', |
||||
|
'webkit/activex_shim/activex_shim.gyp', |
||||
|
'webkit/activex_shim_dll/activex_shim_dll.gyp', |
||||
|
'webkit/build/action_csspropertynames.py', |
||||
|
'webkit/build/action_cssvaluekeywords.py', |
||||
|
'webkit/build/action_jsconfig.py', |
||||
|
'webkit/build/action_makenames.py', |
||||
|
'webkit/build/action_maketokenizer.py', |
||||
|
'webkit/build/action_useragentstylesheets.py', |
||||
|
'webkit/build/rule_binding.py', |
||||
|
'webkit/build/rule_bison.py', |
||||
|
'webkit/build/rule_gperf.py', |
||||
|
'webkit/tools/test_shell/test_shell.gyp', |
||||
|
'webkit/webkit.gyp', |
||||
|
] |
||||
|
|
||||
|
|
||||
|
def Main(argv): |
||||
|
if len(argv) != 3 or argv[1] not in ['push', 'pull']: |
||||
|
print 'Usage: %s push/pull PATH_TO_CHROME' % argv[0] |
||||
|
return 1 |
||||
|
|
||||
|
path_to_chrome = argv[2] |
||||
|
|
||||
|
for g in gyps: |
||||
|
chrome_file = os.path.join(path_to_chrome, g) |
||||
|
local_file = os.path.join(os.path.dirname(argv[0]), os.path.split(g)[1]) |
||||
|
if argv[1] == 'push': |
||||
|
print 'Copying %s to %s' % (local_file, chrome_file) |
||||
|
shutil.copyfile(local_file, chrome_file) |
||||
|
elif argv[1] == 'pull': |
||||
|
print 'Copying %s to %s' % (chrome_file, local_file) |
||||
|
shutil.copyfile(chrome_file, local_file) |
||||
|
else: |
||||
|
assert False |
||||
|
|
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
sys.exit(Main(sys.argv)) |
@ -0,0 +1,5 @@ |
|||||
|
@rem Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
@rem Use of this source code is governed by a BSD-style license that can be |
||||
|
@rem found in the LICENSE file. |
||||
|
|
||||
|
@python %~dp0/samples %* |
@ -0,0 +1,15 @@ |
|||||
|
pretty_vcproj: |
||||
|
Usage: pretty_vcproj.py "c:\path\to\vcproj.vcproj" [key1=value1] [key2=value2] |
||||
|
|
||||
|
They key/value pair are used to resolve vsprops name. |
||||
|
|
||||
|
For example, if I want to diff the base.vcproj project: |
||||
|
|
||||
|
pretty_vcproj.py z:\dev\src-chrome\src\base\build\base.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > orignal.txt |
||||
|
pretty_vcproj.py z:\dev\src-chrome\src\base\base_gyp.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > gyp.txt |
||||
|
|
||||
|
And you can use your favorite diff tool to see the changes. |
||||
|
|
||||
|
Note: In the case of base.vcproj, the original vcproj is one level up the generated one. |
||||
|
I suggest you do a search and replace for '"..\' and replace it with '"' in original.txt |
||||
|
before you perform the diff. |
@ -0,0 +1,95 @@ |
|||||
|
#!/usr/bin/python |
||||
|
|
||||
|
# Copyright (c) 2011 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
"""Using the JSON dumped by the dump-dependency-json generator, |
||||
|
generate input suitable for graphviz to render a dependency graph of |
||||
|
targets.""" |
||||
|
|
||||
|
import collections |
||||
|
import json |
||||
|
import sys |
||||
|
|
||||
|
|
||||
|
def ParseTarget(target): |
||||
|
target, _, suffix = target.partition('#') |
||||
|
filename, _, target = target.partition(':') |
||||
|
return filename, target, suffix |
||||
|
|
||||
|
|
||||
|
def LoadEdges(filename, targets): |
||||
|
"""Load the edges map from the dump file, and filter it to only |
||||
|
show targets in |targets| and their depedendents.""" |
||||
|
|
||||
|
file = open('dump.json') |
||||
|
edges = json.load(file) |
||||
|
file.close() |
||||
|
|
||||
|
# Copy out only the edges we're interested in from the full edge list. |
||||
|
target_edges = {} |
||||
|
to_visit = targets[:] |
||||
|
while to_visit: |
||||
|
src = to_visit.pop() |
||||
|
if src in target_edges: |
||||
|
continue |
||||
|
target_edges[src] = edges[src] |
||||
|
to_visit.extend(edges[src]) |
||||
|
|
||||
|
return target_edges |
||||
|
|
||||
|
|
||||
|
def WriteGraph(edges): |
||||
|
"""Print a graphviz graph to stdout. |
||||
|
|edges| is a map of target to a list of other targets it depends on.""" |
||||
|
|
||||
|
# Bucket targets by file. |
||||
|
files = collections.defaultdict(list) |
||||
|
for src, dst in edges.items(): |
||||
|
build_file, target_name, toolset = ParseTarget(src) |
||||
|
files[build_file].append(src) |
||||
|
|
||||
|
print 'digraph D {' |
||||
|
print ' fontsize=8' # Used by subgraphs. |
||||
|
print ' node [fontsize=8]' |
||||
|
|
||||
|
# Output nodes by file. We must first write out each node within |
||||
|
# its file grouping before writing out any edges that may refer |
||||
|
# to those nodes. |
||||
|
for filename, targets in files.items(): |
||||
|
if len(targets) == 1: |
||||
|
# If there's only one node for this file, simplify |
||||
|
# the display by making it a box without an internal node. |
||||
|
target = targets[0] |
||||
|
build_file, target_name, toolset = ParseTarget(target) |
||||
|
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename, |
||||
|
target_name) |
||||
|
else: |
||||
|
# Group multiple nodes together in a subgraph. |
||||
|
print ' subgraph "cluster_%s" {' % filename |
||||
|
print ' label = "%s"' % filename |
||||
|
for target in targets: |
||||
|
build_file, target_name, toolset = ParseTarget(target) |
||||
|
print ' "%s" [label="%s"]' % (target, target_name) |
||||
|
print ' }' |
||||
|
|
||||
|
# Now that we've placed all the nodes within subgraphs, output all |
||||
|
# the edges between nodes. |
||||
|
for src, dsts in edges.items(): |
||||
|
for dst in dsts: |
||||
|
print ' "%s" -> "%s"' % (src, dst) |
||||
|
|
||||
|
print '}' |
||||
|
|
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
if len(sys.argv) < 2: |
||||
|
print >>sys.stderr, __doc__ |
||||
|
print >>sys.stderr |
||||
|
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0]) |
||||
|
sys.exit(1) |
||||
|
|
||||
|
edges = LoadEdges('dump.json', sys.argv[1:]) |
||||
|
|
||||
|
WriteGraph(edges) |
@ -0,0 +1,142 @@ |
|||||
|
#!/usr/bin/env python |
||||
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
# This file pretty-prints the contents of a GYP file. |
||||
|
|
||||
|
import sys |
||||
|
import re |
||||
|
|
||||
|
input = [] |
||||
|
if len(sys.argv) > 1: |
||||
|
input_file = open(sys.argv[1]) |
||||
|
input = input_file.read().splitlines() |
||||
|
input_file.close() |
||||
|
else: |
||||
|
input = sys.stdin.read().splitlines() |
||||
|
|
||||
|
# This is used to remove comments when we're counting braces. |
||||
|
comment_re = re.compile(r'\s*#.*') |
||||
|
|
||||
|
# This is used to remove quoted strings when we're counting braces. |
||||
|
# It takes into account quoted quotes, and makes sure that the quotes |
||||
|
# match. |
||||
|
# NOTE: It does not handle quotes that span more than one line, or |
||||
|
# cases where an escaped quote is preceeded by an escaped backslash. |
||||
|
quote_re_str = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)' |
||||
|
quote_re = re.compile(quote_re_str) |
||||
|
|
||||
|
def comment_replace(matchobj): |
||||
|
return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3)) |
||||
|
|
||||
|
def mask_comments(input): |
||||
|
# This is used to mask the quoted strings so we skip braces inside |
||||
|
# quoted strings. |
||||
|
search_re = re.compile(r'(.*?)(#)(.*)') |
||||
|
return [search_re.sub(comment_replace, line) for line in input] |
||||
|
|
||||
|
def quote_replace(matchobj): |
||||
|
return "%s%s%s%s" % (matchobj.group(1), |
||||
|
matchobj.group(2), |
||||
|
'x'*len(matchobj.group(3)), |
||||
|
matchobj.group(2)) |
||||
|
|
||||
|
def mask_quotes(input): |
||||
|
# This is used to mask the quoted strings so we skip braces inside |
||||
|
# quoted strings. |
||||
|
search_re = re.compile(r'(.*?)' + quote_re_str) |
||||
|
return [search_re.sub(quote_replace, line) for line in input] |
||||
|
|
||||
|
def do_split(input, masked_input, search_re): |
||||
|
output = [] |
||||
|
mask_output = [] |
||||
|
for (line, masked_line) in zip(input, masked_input): |
||||
|
m = search_re.match(masked_line) |
||||
|
while m: |
||||
|
split = len(m.group(1)) |
||||
|
line = line[:split] + r'\n' + line[split:] |
||||
|
masked_line = masked_line[:split] + r'\n' + masked_line[split:] |
||||
|
m = search_re.match(masked_line) |
||||
|
output.extend(line.split(r'\n')) |
||||
|
mask_output.extend(masked_line.split(r'\n')) |
||||
|
return (output, mask_output) |
||||
|
|
||||
|
# This masks out the quotes and comments, and then splits appropriate |
||||
|
# lines (lines that matche the double_*_brace re's above) before |
||||
|
# indenting them below. |
||||
|
def split_double_braces(input): |
||||
|
# These are used to split lines which have multiple braces on them, so |
||||
|
# that the indentation looks prettier when all laid out (e.g. closing |
||||
|
# braces make a nice diagonal line). |
||||
|
double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])') |
||||
|
double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])') |
||||
|
|
||||
|
masked_input = mask_quotes(input) |
||||
|
masked_input = mask_comments(masked_input) |
||||
|
|
||||
|
(output, mask_output) = do_split(input, masked_input, double_open_brace_re) |
||||
|
(output, mask_output) = do_split(output, mask_output, double_close_brace_re) |
||||
|
|
||||
|
return output |
||||
|
|
||||
|
# This keeps track of the number of braces on a given line and returns |
||||
|
# the result. It starts at zero and subtracts for closed braces, and |
||||
|
# adds for open braces. |
||||
|
def count_braces(line): |
||||
|
open_braces = ['[', '(', '{'] |
||||
|
close_braces = [']', ')', '}'] |
||||
|
closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$') |
||||
|
cnt = 0 |
||||
|
stripline = comment_re.sub(r'', line) |
||||
|
stripline = quote_re.sub(r"''", stripline) |
||||
|
for char in stripline: |
||||
|
for brace in open_braces: |
||||
|
if char == brace: |
||||
|
cnt += 1 |
||||
|
for brace in close_braces: |
||||
|
if char == brace: |
||||
|
cnt -= 1 |
||||
|
|
||||
|
after = False |
||||
|
if cnt > 0: |
||||
|
after = True |
||||
|
|
||||
|
# This catches the special case of a closing brace having something |
||||
|
# other than just whitespace ahead of it -- we don't want to |
||||
|
# unindent that until after this line is printed so it stays with |
||||
|
# the previous indentation level. |
||||
|
if cnt < 0 and closing_prefix_re.match(stripline): |
||||
|
after = True |
||||
|
return (cnt, after) |
||||
|
|
||||
|
# This does the main work of indenting the input based on the brace counts. |
||||
|
def prettyprint_input(lines): |
||||
|
indent = 0 |
||||
|
basic_offset = 2 |
||||
|
last_line = "" |
||||
|
for line in lines: |
||||
|
if comment_re.match(line): |
||||
|
print line |
||||
|
else: |
||||
|
line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix. |
||||
|
if len(line) > 0: |
||||
|
(brace_diff, after) = count_braces(line) |
||||
|
if brace_diff != 0: |
||||
|
if after: |
||||
|
print " " * (basic_offset * indent) + line |
||||
|
indent += brace_diff |
||||
|
else: |
||||
|
indent += brace_diff |
||||
|
print " " * (basic_offset * indent) + line |
||||
|
else: |
||||
|
print " " * (basic_offset * indent) + line |
||||
|
else: |
||||
|
print "" |
||||
|
last_line = line |
||||
|
|
||||
|
# Split up the double braces. |
||||
|
lines = split_double_braces(input) |
||||
|
|
||||
|
# Indent and print the output. |
||||
|
prettyprint_input(lines) |
@ -0,0 +1,167 @@ |
|||||
|
#!/usr/bin/python2.5 |
||||
|
|
||||
|
# Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
"""Prints the information in a sln file in a diffable way. |
||||
|
|
||||
|
It first outputs each projects in alphabetical order with their |
||||
|
dependencies. |
||||
|
|
||||
|
Then it outputs a possible build order. |
||||
|
""" |
||||
|
|
||||
|
__author__ = 'nsylvain (Nicolas Sylvain)' |
||||
|
|
||||
|
import os |
||||
|
import re |
||||
|
import sys |
||||
|
import pretty_vcproj |
||||
|
|
||||
|
def BuildProject(project, built, projects, deps): |
||||
|
# if all dependencies are done, we can build it, otherwise we try to build the |
||||
|
# dependency. |
||||
|
# This is not infinite-recursion proof. |
||||
|
for dep in deps[project]: |
||||
|
if dep not in built: |
||||
|
BuildProject(dep, built, projects, deps) |
||||
|
print project |
||||
|
built.append(project) |
||||
|
|
||||
|
def ParseSolution(solution_file): |
||||
|
# All projects, their clsid and paths. |
||||
|
projects = dict() |
||||
|
|
||||
|
# A list of dependencies associated with a project. |
||||
|
dependencies = dict() |
||||
|
|
||||
|
# Regular expressions that matches the SLN format. |
||||
|
# The first line of a project definition. |
||||
|
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942' |
||||
|
'}"\) = "(.*)", "(.*)", "(.*)"$')) |
||||
|
# The last line of a project definition. |
||||
|
end_project = re.compile('^EndProject$') |
||||
|
# The first line of a dependency list. |
||||
|
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$') |
||||
|
# The last line of a dependency list. |
||||
|
end_dep = re.compile('EndProjectSection$') |
||||
|
# A line describing a dependency. |
||||
|
dep_line = re.compile(' *({.*}) = ({.*})$') |
||||
|
|
||||
|
in_deps = False |
||||
|
solution = open(solution_file) |
||||
|
for line in solution: |
||||
|
results = begin_project.search(line) |
||||
|
if results: |
||||
|
# Hack to remove icu because the diff is too different. |
||||
|
if results.group(1).find('icu') != -1: |
||||
|
continue |
||||
|
# We remove "_gyp" from the names because it helps to diff them. |
||||
|
current_project = results.group(1).replace('_gyp', '') |
||||
|
projects[current_project] = [results.group(2).replace('_gyp', ''), |
||||
|
results.group(3), |
||||
|
results.group(2)] |
||||
|
dependencies[current_project] = [] |
||||
|
continue |
||||
|
|
||||
|
results = end_project.search(line) |
||||
|
if results: |
||||
|
current_project = None |
||||
|
continue |
||||
|
|
||||
|
results = begin_dep.search(line) |
||||
|
if results: |
||||
|
in_deps = True |
||||
|
continue |
||||
|
|
||||
|
results = end_dep.search(line) |
||||
|
if results: |
||||
|
in_deps = False |
||||
|
continue |
||||
|
|
||||
|
results = dep_line.search(line) |
||||
|
if results and in_deps and current_project: |
||||
|
dependencies[current_project].append(results.group(1)) |
||||
|
continue |
||||
|
|
||||
|
# Change all dependencies clsid to name instead. |
||||
|
for project in dependencies: |
||||
|
# For each dependencies in this project |
||||
|
new_dep_array = [] |
||||
|
for dep in dependencies[project]: |
||||
|
# Look for the project name matching this cldis |
||||
|
for project_info in projects: |
||||
|
if projects[project_info][1] == dep: |
||||
|
new_dep_array.append(project_info) |
||||
|
dependencies[project] = sorted(new_dep_array) |
||||
|
|
||||
|
return (projects, dependencies) |
||||
|
|
||||
|
def PrintDependencies(projects, deps): |
||||
|
print "---------------------------------------" |
||||
|
print "Dependencies for all projects" |
||||
|
print "---------------------------------------" |
||||
|
print "-- --" |
||||
|
|
||||
|
for (project, dep_list) in sorted(deps.items()): |
||||
|
print "Project : %s" % project |
||||
|
print "Path : %s" % projects[project][0] |
||||
|
if dep_list: |
||||
|
for dep in dep_list: |
||||
|
print " - %s" % dep |
||||
|
print "" |
||||
|
|
||||
|
print "-- --" |
||||
|
|
||||
|
def PrintBuildOrder(projects, deps): |
||||
|
print "---------------------------------------" |
||||
|
print "Build order " |
||||
|
print "---------------------------------------" |
||||
|
print "-- --" |
||||
|
|
||||
|
built = [] |
||||
|
for (project, dep_list) in sorted(deps.items()): |
||||
|
if project not in built: |
||||
|
BuildProject(project, built, projects, deps) |
||||
|
|
||||
|
print "-- --" |
||||
|
|
||||
|
def PrintVCProj(projects): |
||||
|
|
||||
|
for project in projects: |
||||
|
print "-------------------------------------" |
||||
|
print "-------------------------------------" |
||||
|
print project |
||||
|
print project |
||||
|
print project |
||||
|
print "-------------------------------------" |
||||
|
print "-------------------------------------" |
||||
|
|
||||
|
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]), |
||||
|
projects[project][2])) |
||||
|
|
||||
|
pretty = pretty_vcproj |
||||
|
argv = [ '', |
||||
|
project_path, |
||||
|
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]), |
||||
|
] |
||||
|
argv.extend(sys.argv[3:]) |
||||
|
pretty.main(argv) |
||||
|
|
||||
|
def main(): |
||||
|
# check if we have exactly 1 parameter. |
||||
|
if len(sys.argv) < 2: |
||||
|
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0] |
||||
|
return |
||||
|
|
||||
|
(projects, deps) = ParseSolution(sys.argv[1]) |
||||
|
PrintDependencies(projects, deps) |
||||
|
PrintBuildOrder(projects, deps) |
||||
|
|
||||
|
if '--recursive' in sys.argv: |
||||
|
PrintVCProj(projects) |
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
main() |
||||
|
|
@ -0,0 +1,316 @@ |
|||||
|
#!/usr/bin/python2.5 |
||||
|
|
||||
|
# Copyright (c) 2009 Google Inc. All rights reserved. |
||||
|
# Use of this source code is governed by a BSD-style license that can be |
||||
|
# found in the LICENSE file. |
||||
|
|
||||
|
"""Make the format of a vcproj really pretty. |
||||
|
|
||||
|
This script normalize and sort an xml. It also fetches all the properties |
||||
|
inside linked vsprops and include them explicitly in the vcproj. |
||||
|
|
||||
|
It outputs the resulting xml to stdout. |
||||
|
""" |
||||
|
|
||||
|
__author__ = 'nsylvain (Nicolas Sylvain)' |
||||
|
|
||||
|
import os |
||||
|
import sys |
||||
|
|
||||
|
from xml.dom.minidom import parse |
||||
|
from xml.dom.minidom import Node |
||||
|
|
||||
|
REPLACEMENTS = dict() |
||||
|
ARGUMENTS = None |
||||
|
|
||||
|
class CmpTuple: |
||||
|
"""Compare function between 2 tuple.""" |
||||
|
def __call__(self, x, y): |
||||
|
(key1, value1) = x |
||||
|
(key2, value2) = y |
||||
|
return cmp(key1, key2) |
||||
|
|
||||
|
class CmpNode: |
||||
|
"""Compare function between 2 xml nodes.""" |
||||
|
|
||||
|
def get_string(self, node): |
||||
|
node_string = "node" |
||||
|
node_string += node.nodeName |
||||
|
if node.nodeValue: |
||||
|
node_string += node.nodeValue |
||||
|
|
||||
|
if node.attributes: |
||||
|
# We first sort by name, if present. |
||||
|
node_string += node.getAttribute("Name") |
||||
|
|
||||
|
all_nodes = [] |
||||
|
for (name, value) in node.attributes.items(): |
||||
|
all_nodes.append((name, value)) |
||||
|
|
||||
|
all_nodes.sort(CmpTuple()) |
||||
|
for (name, value) in all_nodes: |
||||
|
node_string += name |
||||
|
node_string += value |
||||
|
|
||||
|
return node_string |
||||
|
|
||||
|
def __call__(self, x, y): |
||||
|
return cmp(self.get_string(x), self.get_string(y)) |
||||
|
|
||||
|
def PrettyPrintNode(node, indent=0): |
||||
|
if node.nodeType == Node.TEXT_NODE: |
||||
|
if node.data.strip(): |
||||
|
print '%s%s' % (' '*indent, node.data.strip()) |
||||
|
return |
||||
|
|
||||
|
if node.childNodes: |
||||
|
node.normalize() |
||||
|
# Get the number of attributes |
||||
|
attr_count = 0 |
||||
|
if node.attributes: |
||||
|
attr_count = node.attributes.length |
||||
|
|
||||
|
# Print the main tag |
||||
|
if attr_count == 0: |
||||
|
print '%s<%s>' % (' '*indent, node.nodeName) |
||||
|
else: |
||||
|
print '%s<%s' % (' '*indent, node.nodeName) |
||||
|
|
||||
|
all_attributes = [] |
||||
|
for (name, value) in node.attributes.items(): |
||||
|
all_attributes.append((name, value)) |
||||
|
all_attributes.sort(CmpTuple()) |
||||
|
for (name, value) in all_attributes: |
||||
|
print '%s %s="%s"' % (' '*indent, name, value) |
||||
|
print '%s>' % (' '*indent) |
||||
|
if node.nodeValue: |
||||
|
print '%s %s' % (' '*indent, node.nodeValue) |
||||
|
|
||||
|
for sub_node in node.childNodes: |
||||
|
PrettyPrintNode(sub_node, indent=indent+2) |
||||
|
print '%s</%s>' % (' '*indent, node.nodeName) |
||||
|
|
||||
|
def FlattenFilter(node): |
||||
|
"""Returns a list of all the node and sub nodes.""" |
||||
|
node_list = [] |
||||
|
|
||||
|
if (node.attributes and |
||||
|
node.getAttribute('Name') == '_excluded_files'): |
||||
|
# We don't add the "_excluded_files" filter. |
||||
|
return [] |
||||
|
|
||||
|
for current in node.childNodes: |
||||
|
if current.nodeName == 'Filter': |
||||
|
node_list.extend(FlattenFilter(current)) |
||||
|
else: |
||||
|
node_list.append(current) |
||||
|
|
||||
|
return node_list |
||||
|
|
||||
|
def FixFilenames(filenames, current_directory): |
||||
|
new_list = [] |
||||
|
for filename in filenames: |
||||
|
if filename: |
||||
|
for key in REPLACEMENTS: |
||||
|
filename = filename.replace(key, REPLACEMENTS[key]) |
||||
|
os.chdir(current_directory) |
||||
|
filename = filename.strip('"\' ') |
||||
|
if filename.startswith('$'): |
||||
|
new_list.append(filename) |
||||
|
else: |
||||
|
new_list.append(os.path.abspath(filename)) |
||||
|
return new_list |
||||
|
|
||||
|
def AbsoluteNode(node): |
||||
|
# Make all the properties we know about in this node absolute. |
||||
|
if node.attributes: |
||||
|
for (name, value) in node.attributes.items(): |
||||
|
if name in ['InheritedPropertySheets', 'RelativePath', |
||||
|
'AdditionalIncludeDirectories', |
||||
|
'IntermediateDirectory', 'OutputDirectory', |
||||
|
'AdditionalLibraryDirectories']: |
||||
|
# We want to fix up these paths |
||||
|
path_list = value.split(';') |
||||
|
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1])) |
||||
|
node.setAttribute(name, ';'.join(new_list)) |
||||
|
if not value: |
||||
|
node.removeAttribute(name) |
||||
|
|
||||
|
def CleanupVcproj(node): |
||||
|
# For each sub node, we call recursively this function. |
||||
|
for sub_node in node.childNodes: |
||||
|
AbsoluteNode(sub_node) |
||||
|
CleanupVcproj(sub_node) |
||||
|
|
||||
|
# Normalize the node, and remove all extranous whitespaces. |
||||
|
for sub_node in node.childNodes: |
||||
|
if sub_node.nodeType == Node.TEXT_NODE: |
||||
|
sub_node.data = sub_node.data.replace("\r", "") |
||||
|
sub_node.data = sub_node.data.replace("\n", "") |
||||
|
sub_node.data = sub_node.data.rstrip() |
||||
|
|
||||
|
# Fix all the semicolon separated attributes to be sorted, and we also |
||||
|
# remove the dups. |
||||
|
if node.attributes: |
||||
|
for (name, value) in node.attributes.items(): |
||||
|
sorted_list = sorted(value.split(';')) |
||||
|
unique_list = [] |
||||
|
[unique_list.append(i) for i in sorted_list if not unique_list.count(i)] |
||||
|
node.setAttribute(name, ';'.join(unique_list)) |
||||
|
if not value: |
||||
|
node.removeAttribute(name) |
||||
|
|
||||
|
if node.childNodes: |
||||
|
node.normalize() |
||||
|
|
||||
|
# For each node, take a copy, and remove it from the list. |
||||
|
node_array = [] |
||||
|
while node.childNodes and node.childNodes[0]: |
||||
|
# Take a copy of the node and remove it from the list. |
||||
|
current = node.childNodes[0] |
||||
|
node.removeChild(current) |
||||
|
|
||||
|
# If the child is a filter, we want to append all its children |
||||
|
# to this same list. |
||||
|
if current.nodeName == 'Filter': |
||||
|
node_array.extend(FlattenFilter(current)) |
||||
|
else: |
||||
|
node_array.append(current) |
||||
|
|
||||
|
|
||||
|
# Sort the list. |
||||
|
node_array.sort(CmpNode()) |
||||
|
|
||||
|
# Insert the nodes in the correct order. |
||||
|
for new_node in node_array: |
||||
|
# But don't append empty tool node. |
||||
|
if new_node.nodeName == 'Tool': |
||||
|
if new_node.attributes and new_node.attributes.length == 1: |
||||
|
# This one was empty. |
||||
|
continue |
||||
|
if new_node.nodeName == 'UserMacro': |
||||
|
continue |
||||
|
node.appendChild(new_node) |
||||
|
|
||||
|
def GetConfiguationNodes(vcproj): |
||||
|
#TODO(nsylvain): Find a better way to navigate the xml. |
||||
|
nodes = [] |
||||
|
for node in vcproj.childNodes: |
||||
|
if node.nodeName == "Configurations": |
||||
|
for sub_node in node.childNodes: |
||||
|
if sub_node.nodeName == "Configuration": |
||||
|
nodes.append(sub_node) |
||||
|
|
||||
|
return nodes |
||||
|
|
||||
|
def GetChildrenVsprops(filename): |
||||
|
dom = parse(filename) |
||||
|
if dom.documentElement.attributes: |
||||
|
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets') |
||||
|
return FixFilenames(vsprops.split(';'), os.path.dirname(filename)) |
||||
|
return [] |
||||
|
|
||||
|
def SeekToNode(node1, child2): |
||||
|
# A text node does not have properties. |
||||
|
if child2.nodeType == Node.TEXT_NODE: |
||||
|
return None |
||||
|
|
||||
|
# Get the name of the current node. |
||||
|
current_name = child2.getAttribute("Name") |
||||
|
if not current_name: |
||||
|
# There is no name. We don't know how to merge. |
||||
|
return None |
||||
|
|
||||
|
# Look through all the nodes to find a match. |
||||
|
for sub_node in node1.childNodes: |
||||
|
if sub_node.nodeName == child2.nodeName: |
||||
|
name = sub_node.getAttribute("Name") |
||||
|
if name == current_name: |
||||
|
return sub_node |
||||
|
|
||||
|
# No match. We give up. |
||||
|
return None |
||||
|
|
||||
|
def MergeAttributes(node1, node2): |
||||
|
# No attributes to merge? |
||||
|
if not node2.attributes: |
||||
|
return |
||||
|
|
||||
|
for (name, value2) in node2.attributes.items(): |
||||
|
# Don't merge the 'Name' attribute. |
||||
|
if name == 'Name': |
||||
|
continue |
||||
|
value1 = node1.getAttribute(name) |
||||
|
if value1: |
||||
|
# The attribute exist in the main node. If it's equal, we leave it |
||||
|
# untouched, otherwise we concatenate it. |
||||
|
if value1 != value2: |
||||
|
node1.setAttribute(name, ';'.join([value1, value2])) |
||||
|
else: |
||||
|
# The attribute does nto exist in the main node. We append this one. |
||||
|
node1.setAttribute(name, value2) |
||||
|
|
||||
|
# If the attribute was a property sheet attributes, we remove it, since |
||||
|
# they are useless. |
||||
|
if name == 'InheritedPropertySheets': |
||||
|
node1.removeAttribute(name) |
||||
|
|
||||
|
def MergeProperties(node1, node2): |
||||
|
MergeAttributes(node1, node2) |
||||
|
for child2 in node2.childNodes: |
||||
|
child1 = SeekToNode(node1, child2) |
||||
|
if child1: |
||||
|
MergeProperties(child1, child2) |
||||
|
else: |
||||
|
node1.appendChild(child2.cloneNode(True)) |
||||
|
|
||||
|
def main(argv): |
||||
|
global REPLACEMENTS |
||||
|
global ARGUMENTS |
||||
|
ARGUMENTS = argv |
||||
|
"""Main function of this vcproj prettifier.""" |
||||
|
|
||||
|
# check if we have exactly 1 parameter. |
||||
|
if len(argv) < 2: |
||||
|
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] ' |
||||
|
'[key2=value2]' % argv[0]) |
||||
|
return |
||||
|
|
||||
|
# Parse the keys |
||||
|
for i in range(2, len(argv)): |
||||
|
(key, value) = argv[i].split('=') |
||||
|
REPLACEMENTS[key] = value |
||||
|
|
||||
|
# Open the vcproj and parse the xml. |
||||
|
dom = parse(argv[1]) |
||||
|
|
||||
|
# First thing we need to do is find the Configuration Node and merge them |
||||
|
# with the vsprops they include. |
||||
|
for configuration_node in GetConfiguationNodes(dom.documentElement): |
||||
|
# Get the property sheets associated with this configuration. |
||||
|
vsprops = configuration_node.getAttribute('InheritedPropertySheets') |
||||
|
|
||||
|
# Fix the filenames to be absolute. |
||||
|
vsprops_list = FixFilenames(vsprops.strip().split(';'), |
||||
|
os.path.dirname(argv[1])) |
||||
|
|
||||
|
# Extend the list of vsprops with all vsprops contained in the current |
||||
|
# vsprops. |
||||
|
for current_vsprops in vsprops_list: |
||||
|
vsprops_list.extend(GetChildrenVsprops(current_vsprops)) |
||||
|
|
||||
|
# Now that we have all the vsprops, we need to merge them. |
||||
|
for current_vsprops in vsprops_list: |
||||
|
MergeProperties(configuration_node, |
||||
|
parse(current_vsprops).documentElement) |
||||
|
|
||||
|
# Now that everything is merged, we need to cleanup the xml. |
||||
|
CleanupVcproj(dom.documentElement) |
||||
|
|
||||
|
# Finally, we use the prett xml function to print the vcproj back to the |
||||
|
# user. |
||||
|
#print dom.toprettyxml(newl="\n") |
||||
|
PrettyPrintNode(dom.documentElement) |
||||
|
|
||||
|
if __name__ == '__main__': |
||||
|
main(sys.argv) |
Loading…
Reference in new issue