Browse Source

Upgrade V8 to 2.4.7

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
c9627e0a0d
  1. 176
      deps/v8/ChangeLog
  2. 4
      deps/v8/benchmarks/README.txt
  3. 562
      deps/v8/benchmarks/regexp.js
  4. 4
      deps/v8/benchmarks/revisions.html
  5. 2
      deps/v8/benchmarks/run.html
  6. 5
      deps/v8/include/v8-debug.h
  7. 1
      deps/v8/src/SConscript
  8. 87
      deps/v8/src/api.cc
  9. 65
      deps/v8/src/arm/builtins-arm.cc
  10. 60
      deps/v8/src/arm/codegen-arm.cc
  11. 13
      deps/v8/src/arm/frames-arm.cc
  12. 869
      deps/v8/src/arm/full-codegen-arm.cc
  13. 20
      deps/v8/src/arm/ic-arm.cc
  14. 12
      deps/v8/src/arm/simulator-arm.cc
  15. 150
      deps/v8/src/arm/stub-cache-arm.cc
  16. 4
      deps/v8/src/assembler.cc
  17. 51
      deps/v8/src/assembler.h
  18. 10
      deps/v8/src/ast.cc
  19. 204
      deps/v8/src/ast.h
  20. 57
      deps/v8/src/bootstrapper.cc
  21. 2
      deps/v8/src/builtins.h
  22. 4
      deps/v8/src/codegen.cc
  23. 49
      deps/v8/src/compilation-cache.cc
  24. 47
      deps/v8/src/compiler.cc
  25. 155
      deps/v8/src/compiler.h
  26. 5
      deps/v8/src/contexts.cc
  27. 62
      deps/v8/src/conversions.cc
  28. 5
      deps/v8/src/cpu-profiler-inl.h
  29. 64
      deps/v8/src/cpu-profiler.cc
  30. 18
      deps/v8/src/cpu-profiler.h
  31. 4
      deps/v8/src/data-flow.cc
  32. 34
      deps/v8/src/debug-debugger.js
  33. 17
      deps/v8/src/debug.cc
  34. 1
      deps/v8/src/debug.h
  35. 5
      deps/v8/src/disassembler.cc
  36. 7
      deps/v8/src/dtoa.cc
  37. 323
      deps/v8/src/fast-dtoa.cc
  38. 43
      deps/v8/src/fast-dtoa.h
  39. 6
      deps/v8/src/flag-definitions.h
  40. 52
      deps/v8/src/frames.cc
  41. 47
      deps/v8/src/frames.h
  42. 241
      deps/v8/src/full-codegen.cc
  43. 272
      deps/v8/src/full-codegen.h
  44. 8
      deps/v8/src/global-handles.cc
  45. 10
      deps/v8/src/globals.h
  46. 37
      deps/v8/src/handles.cc
  47. 8
      deps/v8/src/handles.h
  48. 7
      deps/v8/src/heap-inl.h
  49. 197
      deps/v8/src/heap.cc
  50. 75
      deps/v8/src/heap.h
  51. 133
      deps/v8/src/ia32/assembler-ia32.cc
  52. 19
      deps/v8/src/ia32/assembler-ia32.h
  53. 40
      deps/v8/src/ia32/builtins-ia32.cc
  54. 102
      deps/v8/src/ia32/code-stubs-ia32.cc
  55. 142
      deps/v8/src/ia32/codegen-ia32.cc
  56. 57
      deps/v8/src/ia32/disasm-ia32.cc
  57. 12
      deps/v8/src/ia32/frames-ia32.cc
  58. 986
      deps/v8/src/ia32/full-codegen-ia32.cc
  59. 43
      deps/v8/src/ia32/ic-ia32.cc
  60. 18
      deps/v8/src/ia32/macro-assembler-ia32.cc
  61. 5
      deps/v8/src/ia32/macro-assembler-ia32.h
  62. 266
      deps/v8/src/ia32/stub-cache-ia32.cc
  63. 2
      deps/v8/src/ia32/virtual-frame-ia32.cc
  64. 72
      deps/v8/src/ic.cc
  65. 4
      deps/v8/src/ic.h
  66. 10
      deps/v8/src/liveedit.cc
  67. 43
      deps/v8/src/log.cc
  68. 6
      deps/v8/src/log.h
  69. 40
      deps/v8/src/mark-compact.cc
  70. 26
      deps/v8/src/messages.js
  71. 1
      deps/v8/src/mips/assembler-mips.h
  72. 10
      deps/v8/src/mips/frames-mips.cc
  73. 23
      deps/v8/src/objects-debug.cc
  74. 71
      deps/v8/src/objects-inl.h
  75. 257
      deps/v8/src/objects.cc
  76. 193
      deps/v8/src/objects.h
  77. 157
      deps/v8/src/parser.cc
  78. 67
      deps/v8/src/parser.h
  79. 6
      deps/v8/src/profile-generator-inl.h
  80. 28
      deps/v8/src/profile-generator.cc
  81. 20
      deps/v8/src/profile-generator.h
  82. 57
      deps/v8/src/regexp-macro-assembler-tracer.cc
  83. 8
      deps/v8/src/regexp.js
  84. 4
      deps/v8/src/rewriter.cc
  85. 293
      deps/v8/src/runtime.cc
  86. 2
      deps/v8/src/runtime.h
  87. 9
      deps/v8/src/scanner.cc
  88. 6
      deps/v8/src/scanner.h
  89. 16
      deps/v8/src/scopeinfo.cc
  90. 32
      deps/v8/src/scopes.cc
  91. 58
      deps/v8/src/scopes.h
  92. 44
      deps/v8/src/spaces.cc
  93. 102
      deps/v8/src/spaces.h
  94. 40
      deps/v8/src/string-search.cc
  95. 699
      deps/v8/src/string-search.h
  96. 29
      deps/v8/src/stub-cache.cc
  97. 45
      deps/v8/src/stub-cache.h
  98. 20
      deps/v8/src/utils.h
  99. 962
      deps/v8/src/utils.h.orig
  100. 2
      deps/v8/src/v8-counters.h

176
deps/v8/ChangeLog

@ -1,12 +1,53 @@
2010-09-30: Version 2.4.7
Changed the command-line flag --max-new-space-size to be in kB and the
flag --max-old-space-size to be in MB (previously they were in bytes).
Added Debug::CancelDebugBreak to the debugger API.
Fixed a bug in getters for negative numeric property names
(https://bugs.webkit.org/show_bug.cgi?id=46689).
Performance improvements on all platforms.
2010-09-27: Version 2.4.6
Fixed assertion failure related to copy-on-write arrays (issue 876).
Fixed build failure of 64-bit V8 on Windows.
Fixed a bug in RegExp (issue http://crbug.com/52801).
Improved the profiler's coverage to cover more functions (issue 858).
Fixed error in shift operators on 64-bit V8
(issue http://crbug.com/54521).
2010-09-22: Version 2.4.5
Changed the RegExp benchmark to exercise the regexp engine on different
inputs by scrambling the input strings.
Fixed a bug in keyed loads on strings.
Fixed a bug with loading global function prototypes.
Fixed a bug with profiling RegExp calls (issue http://crbug.com/55999).
Performance improvements on all platforms.
2010-09-15: Version 2.4.4 2010-09-15: Version 2.4.4
Fix bug with hangs on very large sparse arrays. Fixed bug with hangs on very large sparse arrays.
Try harder to free up memory when running out of space. Now tries harder to free up memory when running out of space.
Add heap snapshots to JSON format to API. Added heap snapshots to JSON format to API.
Recalibrate benchmarks. Recalibrated benchmarks.
2010-09-13: Version 2.4.3 2010-09-13: Version 2.4.3
@ -42,33 +83,33 @@
2010-09-01: Version 2.4.0 2010-09-01: Version 2.4.0
Fix bug in Object.freeze and Object.seal when Array.prototype or Fixed bug in Object.freeze and Object.seal when Array.prototype or
Object.prototype is changed (issue 842). Object.prototype are changed (issue 842).
Update Array.splice to follow Safari and Firefox when called Updated Array.splice to follow Safari and Firefox when called
with zero arguments. with zero arguments.
Fix a missing live register when breaking at keyed loads on ARM. Fixed a missing live register when breaking at keyed loads on ARM.
Performance improvements on all platforms. Performance improvements on all platforms.
2010-08-25: Version 2.3.11 2010-08-25: Version 2.3.11
Fix bug in RegExp related to copy-on-write arrays. Fixed bug in RegExp related to copy-on-write arrays.
Refactoring of tools/test.py script, including the introduction of Refactored tools/test.py script, including the introduction of
VARIANT_FLAGS that allows specification of sets of flags with which VARIANT_FLAGS that allows specification of sets of flags with which
all tests should be run. all tests should be run.
Fix a bug in the handling of debug breaks in CallIC. Fixed a bug in the handling of debug breaks in CallIC.
Performance improvements on all platforms. Performance improvements on all platforms.
2010-08-23: Version 2.3.10 2010-08-23: Version 2.3.10
Fix bug in bitops on ARM. Fixed bug in bitops on ARM.
Build fixes for unusual compilers. Build fixes for unusual compilers.
@ -79,7 +120,7 @@
2010-08-18: Version 2.3.9 2010-08-18: Version 2.3.9
Fix compilation for ARMv4 on OpenBSD/FreeBSD. Fixed compilation for ARMv4 on OpenBSD/FreeBSD.
Removed specialized handling of GCC 4.4 (issue 830). Removed specialized handling of GCC 4.4 (issue 830).
@ -120,7 +161,7 @@
Fixed handling of JSObject::elements in CalculateNetworkSize Fixed handling of JSObject::elements in CalculateNetworkSize
(issue 822). (issue 822).
Allow compiling with strict aliasing enabled on GCC 4.4 (issue 463). Allowed compiling with strict aliasing enabled on GCC 4.4 (issue 463).
2010-08-09: Version 2.3.6 2010-08-09: Version 2.3.6
@ -130,7 +171,7 @@
Object.seal and Object.freeze return the modified object (issue 809). Object.seal and Object.freeze return the modified object (issue 809).
Fix building using GCC 4.4.4. Fixed building using GCC 4.4.4.
2010-08-04: Version 2.3.5 2010-08-04: Version 2.3.5
@ -139,7 +180,7 @@
dot-notation property access now allows keywords. Also allowed dot-notation property access now allows keywords. Also allowed
non-identifiers after "get" or "set" in an object initialiser. non-identifiers after "get" or "set" in an object initialiser.
Randomize the addresses of allocated executable memory on Windows. Randomized the addresses of allocated executable memory on Windows.
2010-08-02: Version 2.3.4 2010-08-02: Version 2.3.4
@ -251,15 +292,15 @@
2010-06-30: Version 2.2.21 2010-06-30: Version 2.2.21
Fix bug in externalizing some ASCII strings (Chromium issue 47824). Fixed bug in externalizing some ASCII strings (Chromium issue 47824).
Update JSON.stringify to floor the space parameter (issue 753). Updated JSON.stringify to floor the space parameter (issue 753).
Update the Mozilla test expectations to the newest version. Updated the Mozilla test expectations to the newest version.
Update the ES5 Conformance Test expectations to the latest version. Updated the ES5 Conformance Test expectations to the latest version.
Update the V8 benchmark suite. Updated the V8 benchmark suite.
Provide actual breakpoints locations in response to setBreakpoint Provide actual breakpoints locations in response to setBreakpoint
and listBreakpoints requests. and listBreakpoints requests.
@ -267,13 +308,13 @@
2010-06-28: Version 2.2.20 2010-06-28: Version 2.2.20
Fix bug with for-in on x64 platform (issue 748). Fixed bug with for-in on x64 platform (issue 748).
Fix crash bug on x64 platform (issue 756). Fixed crash bug on x64 platform (issue 756).
Fix bug in Object.getOwnPropertyNames. (chromium issue 41243). Fixed bug in Object.getOwnPropertyNames. (chromium issue 41243).
Fix a bug on ARM that caused the result of 1 << x to be Fixed a bug on ARM that caused the result of 1 << x to be
miscalculated for some inputs. miscalculated for some inputs.
Performance improvements on all platforms. Performance improvements on all platforms.
@ -281,7 +322,7 @@
2010-06-23: Version 2.2.19 2010-06-23: Version 2.2.19
Fix bug that causes the build to break when profillingsupport=off Fixed bug that causes the build to break when profillingsupport=off
(issue 738). (issue 738).
Added expose-externalize-string flag for testing extensions. Added expose-externalize-string flag for testing extensions.
@ -289,7 +330,7 @@
Resolve linker issues with using V8 as a DLL causing a number of Resolve linker issues with using V8 as a DLL causing a number of
problems with unresolved symbols. problems with unresolved symbols.
Fix build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not Fixed build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not
defined. defined.
Performance improvements on all platforms. Performance improvements on all platforms.
@ -300,11 +341,11 @@
Added API functions to retrieve information on indexed properties Added API functions to retrieve information on indexed properties
managed by the embedding layer. Fixes bug 737. managed by the embedding layer. Fixes bug 737.
Make ES5 Object.defineProperty support array elements. Fixes bug 619. Made ES5 Object.defineProperty support array elements. Fixes bug 619.
Add heap profiling to the API. Added heap profiling to the API.
Remove old named property query from the API. Removed old named property query from the API.
Incremental performance improvements. Incremental performance improvements.
@ -330,12 +371,12 @@
2010-06-07: Version 2.2.15 2010-06-07: Version 2.2.15
Add an API to control the disposal of external string resources. Added an API to control the disposal of external string resources.
Add missing initialization of a couple of variables which makes Added missing initialization of a couple of variables which makes
some compilers complaint when compiling with -Werror. some compilers complaint when compiling with -Werror.
Improve performance on all platforms. Improved performance on all platforms.
2010-06-02: Version 2.2.14 2010-06-02: Version 2.2.14
@ -349,12 +390,12 @@
2010-05-31: Version 2.2.13 2010-05-31: Version 2.2.13
Implement Object.getOwnPropertyDescriptor for element indices and Implemented Object.getOwnPropertyDescriptor for element indices and
strings (issue 599). strings (issue 599).
Fix bug for windows 64 bit C calls from generated code. Fixed bug for windows 64 bit C calls from generated code.
Add new scons flag unalignedaccesses for arm builds. Added new scons flag unalignedaccesses for arm builds.
Performance improvements on all platforms. Performance improvements on all platforms.
@ -369,7 +410,7 @@
2010-05-21: Version 2.2.11 2010-05-21: Version 2.2.11
Fix crash bug in liveedit on 64 bit. Fixed crash bug in liveedit on 64 bit.
Use 'full compiler' when debugging is active. This should increase Use 'full compiler' when debugging is active. This should increase
the density of possible break points, making single step more fine the density of possible break points, making single step more fine
@ -379,11 +420,11 @@
Misc. fixes to the Solaris build. Misc. fixes to the Solaris build.
Add new flags --print-cumulative-gc-stat and --trace-gc-nvp. Added new flags --print-cumulative-gc-stat and --trace-gc-nvp.
Add filtering of CPU profiles by security context. Added filtering of CPU profiles by security context.
Fix crash bug on ARM when running without VFP2 or VFP3. Fixed crash bug on ARM when running without VFP2 or VFP3.
Incremental performance improvements in all backends. Incremental performance improvements in all backends.
@ -395,12 +436,12 @@
2010-05-10: Version 2.2.9 2010-05-10: Version 2.2.9
Allow Object.create to be called with a function (issue 697). Allowed Object.create to be called with a function (issue 697).
Fixed bug with Date.parse returning a non-NaN value when called on a Fixed bug with Date.parse returning a non-NaN value when called on a
non date string (issue 696). non date string (issue 696).
Allow unaligned memory accesses on ARM targets that support it (by Allowed unaligned memory accesses on ARM targets that support it (by
Subrato K De of CodeAurora <subratokde@codeaurora.org>). Subrato K De of CodeAurora <subratokde@codeaurora.org>).
C++ API for retrieving JavaScript stack trace information. C++ API for retrieving JavaScript stack trace information.
@ -554,9 +595,9 @@
2010-02-23: Version 2.1.2 2010-02-23: Version 2.1.2
Fix a crash bug caused by wrong assert. Fixed a crash bug caused by wrong assert.
Fix a bug with register names on 64-bit V8 (issue 615). Fixed a bug with register names on 64-bit V8 (issue 615).
Performance improvements on all platforms. Performance improvements on all platforms.
@ -592,13 +633,13 @@
Solaris support by Erich Ocean <erich.ocean@me.com> and Ryan Dahl Solaris support by Erich Ocean <erich.ocean@me.com> and Ryan Dahl
<ry@tinyclouds.org>. <ry@tinyclouds.org>.
Fix a bug that Math.round() returns incorrect results for huge Fixed a bug that Math.round() returns incorrect results for huge
integers. integers.
Fix enumeration order for objects created from some constructor Fixed enumeration order for objects created from some constructor
functions (isue http://crbug.com/3867). functions (isue http://crbug.com/3867).
Fix arithmetic on some integer constants (issue 580). Fixed arithmetic on some integer constants (issue 580).
Numerous performance improvements including porting of previous IA-32 Numerous performance improvements including porting of previous IA-32
optimizations to x64 and ARM architectures. optimizations to x64 and ARM architectures.
@ -737,11 +778,11 @@
X64: Convert smis to holding 32 bits of payload. X64: Convert smis to holding 32 bits of payload.
Introduce v8::Integer::NewFromUnsigned method. Introduced v8::Integer::NewFromUnsigned method.
Add missing null check in Context::GetCurrent. Added missing null check in Context::GetCurrent.
Add trim, trimLeft and trimRight methods to String Added trim, trimLeft and trimRight methods to String
Patch by Jan de Mooij <jandemooij@gmail.com> Patch by Jan de Mooij <jandemooij@gmail.com>
Implement ES5 Array.isArray Implement ES5 Array.isArray
@ -749,14 +790,15 @@
Skip access checks for hidden properties. Skip access checks for hidden properties.
Add String::Concat(Handle<String> left, Handle<String> right) to the V8 API. Added String::Concat(Handle<String> left, Handle<String> right) to the
V8 API.
Fix GYP-based builds of V8. Fixed GYP-based builds of V8.
2009-10-07: Version 1.3.15 2009-10-07: Version 1.3.15
Expand the maximum size of the code space to 512MB for 64-bit mode. Expanded the maximum size of the code space to 512MB for 64-bit mode.
Fixed a crash bug happening when starting profiling (issue Fixed a crash bug happening when starting profiling (issue
http://crbug.com/23768). http://crbug.com/23768).
@ -768,10 +810,10 @@
located on the object or in the prototype chain skipping any located on the object or in the prototype chain skipping any
interceptors. interceptors.
Fix the stack limits setting API to work correctly with threads. The Fixed the stack limits setting API to work correctly with threads. The
stack limit now needs to be set to each thread thich is used with V8. stack limit now needs to be set to each thread thich is used with V8.
Remove the high-priority flag from IdleNotification() Removed the high-priority flag from IdleNotification()
Ensure V8 is initialized before locking and unlocking threads. Ensure V8 is initialized before locking and unlocking threads.
@ -839,7 +881,7 @@
Implemented missing pieces of debugger infrastructure on ARM. The Implemented missing pieces of debugger infrastructure on ARM. The
debugger is now fully functional on ARM. debugger is now fully functional on ARM.
Make 'hidden' the default visibility for gcc. Made 'hidden' the default visibility for gcc.
2009-09-09: Version 1.3.10 2009-09-09: Version 1.3.10
@ -894,9 +936,9 @@
2009-08-21: Version 1.3.6 2009-08-21: Version 1.3.6
Add support for forceful termination of JavaScript execution. Added support for forceful termination of JavaScript execution.
Add low memory notification to the API. The embedding host can signal Added low memory notification to the API. The embedding host can signal
a low memory situation to V8. a low memory situation to V8.
Changed the handling of global handles (persistent handles in the API Changed the handling of global handles (persistent handles in the API
@ -910,9 +952,9 @@
2009-08-19: Version 1.3.5 2009-08-19: Version 1.3.5
Optimize initialization of some arrays in the builtins. Optimized initialization of some arrays in the builtins.
Fix mac-nm script to support filenames with spaces. Fixed mac-nm script to support filenames with spaces.
Support for using the V8 profiler when V8 is embedded in a Windows DLL. Support for using the V8 profiler when V8 is embedded in a Windows DLL.
@ -925,7 +967,7 @@
Added API for getting object mirrors. Added API for getting object mirrors.
Make sure that SSE3 instructions are used whenever possible even when Made sure that SSE3 instructions are used whenever possible even when
running off a snapshot generated without using SSE3 instructions. running off a snapshot generated without using SSE3 instructions.
Tweaked the handling of the initial size and growth policy of the heap. Tweaked the handling of the initial size and growth policy of the heap.
@ -947,20 +989,20 @@
2009-08-12: Version 1.3.3 2009-08-12: Version 1.3.3
Fix issue 417: incorrect %t placeholder expansion. Fixed issue 417: incorrect %t placeholder expansion.
Add .gitignore file similar to Chromium's one. Added .gitignore file similar to Chromium's one.
Fix SConstruct file to build with new logging code for Android. Fixed SConstruct file to build with new logging code for Android.
API: added function to find instance of template in prototype API: added function to find instance of template in prototype
chain. Inlined Object::IsInstanceOf. chain. Inlined Object::IsInstanceOf.
Land change to notify valgrind when we modify code on x86. Land change to notify valgrind when we modify code on x86.
Add api call to determine whether a string can be externalized. Added api call to determine whether a string can be externalized.
Add a write() command to d8. Added a write() command to d8.
2009-08-05: Version 1.3.2 2009-08-05: Version 1.3.2
@ -1243,7 +1285,7 @@
Added EcmaScript 5 JSON object. Added EcmaScript 5 JSON object.
Fix bug in preemption support on ARM. Fixed bug in preemption support on ARM.
2009-04-23: Version 1.2.0 2009-04-23: Version 1.2.0

4
deps/v8/benchmarks/README.txt

@ -70,7 +70,9 @@ Removed dead code from the RayTrace benchmark and fixed a couple of
typos in the DeltaBlue implementation. Changed the Splay benchmark to typos in the DeltaBlue implementation. Changed the Splay benchmark to
avoid converting the same numeric key to a string over and over again avoid converting the same numeric key to a string over and over again
and to avoid inserting and removing the same element repeatedly thus and to avoid inserting and removing the same element repeatedly thus
increasing pressure on the memory subsystem. increasing pressure on the memory subsystem. Changed the RegExp
benchmark to exercise the regular expression engine on different
input strings.
Furthermore, the benchmark runner was changed to run the benchmarks Furthermore, the benchmark runner was changed to run the benchmarks
for at least a few times to stabilize the reported numbers on slower for at least a few times to stabilize the reported numbers on slower

562
deps/v8/benchmarks/regexp.js

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -25,21 +25,51 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Automatically generated on 2009-01-30. // Automatically generated on 2009-01-30. Manually updated on 2010-09-17.
// This benchmark is generated by loading 50 of the most popular pages // This benchmark is generated by loading 50 of the most popular pages
// on the web and logging all regexp operations performed. Each // on the web and logging all regexp operations performed. Each
// operation is given a weight that is calculated from an estimate of // operation is given a weight that is calculated from an estimate of
// the popularity of the pages where it occurs and the number of times // the popularity of the pages where it occurs and the number of times
// it is executed while loading each page. Finally the literal // it is executed while loading each page. Furthermore the literal
// letters in the data are encoded using ROT13 in a way that does not // letters in the data are encoded using ROT13 in a way that does not
// affect how the regexps match their input. // affect how the regexps match their input. Finally the strings are
// scrambled to exercise the regexp engine on different input strings.
var RegRxp = new BenchmarkSuite('RegExp', 910985, [
new Benchmark("RegExp", runRegExpBenchmark) var RegExp = new BenchmarkSuite('RegExp', 910985, [
new Benchmark("RegExp", RegExpRun, RegExpSetup, RegExpTearDown)
]); ]);
function runRegExpBenchmark() { var regExpBenchmark = null;
function RegExpSetup() {
regExpBenchmark = new RegExpBenchmark();
RegExpRun(); // run once to get system initialized
}
function RegExpRun() {
regExpBenchmark.run();
}
function RegExpTearDown() {
regExpBenchmark = null;
}
// Returns an array of n different variants of the input string str.
// The variants are computed by randomly rotating one random
// character.
function computeInputVariants(str, n) {
var variants = [ str ];
for (var i = 1; i < n; i++) {
var pos = Math.floor(Math.random() * str.length);
var chr = String.fromCharCode((str.charCodeAt(pos) + Math.floor(Math.random() * 128)) % 128);
variants[i] = str.substring(0, pos) + chr + str.substring(pos + 1, str.length);
}
return variants;
}
function RegExpBenchmark() {
var re0 = /^ba/; var re0 = /^ba/;
var re1 = /(((\w+):\/\/)([^\/:]*)(:(\d+))?)?([^#?]*)(\?([^#]*))?(#(.*))?/; var re1 = /(((\w+):\/\/)([^\/:]*)(:(\d+))?)?([^#?]*)(\?([^#]*))?(#(.*))?/;
var re2 = /^\s*|\s*$/g; var re2 = /^\s*|\s*$/g;
@ -59,77 +89,105 @@ function runRegExpBenchmark() {
var re14 = /\s+/g; var re14 = /\s+/g;
var re15 = /^\s*(\S*(\s+\S+)*)\s*$/; var re15 = /^\s*(\S*(\s+\S+)*)\s*$/;
var re16 = /(-[a-z])/i; var re16 = /(-[a-z])/i;
var s0 = computeInputVariants('pyvpx', 6511);
var s1 = computeInputVariants('uggc://jjj.snprobbx.pbz/ybtva.cuc', 1844);
var s2 = computeInputVariants('QBZPbageby_cynprubyqre', 739);
var s3 = computeInputVariants('uggc://jjj.snprobbx.pbz/', 598);
var s4 = computeInputVariants('uggc://jjj.snprobbx.pbz/fepu.cuc', 454);
var s5 = computeInputVariants('qqqq, ZZZ q, llll', 352);
var s6 = computeInputVariants('vachggrkg QBZPbageby_cynprubyqre', 312);
var s7 = computeInputVariants('/ZlFcnprUbzrcntr/Vaqrk-FvgrUbzr,10000000', 282);
var s8 = computeInputVariants('vachggrkg', 177);
var s9 = computeInputVariants('528.9', 170);
var s10 = computeInputVariants('528', 170);
var s11 = computeInputVariants('VCPhygher=ra-HF', 156);
var s12 = computeInputVariants('CersreerqPhygher=ra-HF', 156);
var s13 = computeInputVariants('xrlcerff', 144);
var s14 = computeInputVariants('521', 139);
var s15 = computeInputVariants(str0, 139);
var s16 = computeInputVariants('qvi .so_zrah', 137);
var s17 = computeInputVariants('qvi.so_zrah', 137);
var s18 = computeInputVariants('uvqqra_ryrz', 117);
var s19 = computeInputVariants('sevraqfgre_naba=nvq%3Qn6ss9p85n868ro9s059pn854735956o3%26ers%3Q%26df%3Q%26vpgl%3QHF', 95);
var s20 = computeInputVariants('uggc://ubzr.zlfcnpr.pbz/vaqrk.psz', 93);
var s21 = computeInputVariants(str1, 92);
var s22 = computeInputVariants('svefg', 85);
var s23 = computeInputVariants('uggc://cebsvyr.zlfcnpr.pbz/vaqrk.psz', 85);
var s24 = computeInputVariants('ynfg', 85);
var s25 = computeInputVariants('qvfcynl', 85);
function runBlock0() { function runBlock0() {
for (var i = 0; i < 6511; i++) { for (var i = 0; i < 6511; i++) {
re0.exec('pyvpx'); re0.exec(s0[i]);
} }
for (var i = 0; i < 1844; i++) { for (var i = 0; i < 1844; i++) {
re1.exec('uggc://jjj.snprobbx.pbz/ybtva.cuc'); re1.exec(s1[i]);
} }
for (var i = 0; i < 739; i++) { for (var i = 0; i < 739; i++) {
'QBZPbageby_cynprubyqre'.replace(re2, ''); s2[i].replace(re2, '');
} }
for (var i = 0; i < 598; i++) { for (var i = 0; i < 598; i++) {
re1.exec('uggc://jjj.snprobbx.pbz/'); re1.exec(s3[i]);
} }
for (var i = 0; i < 454; i++) { for (var i = 0; i < 454; i++) {
re1.exec('uggc://jjj.snprobbx.pbz/fepu.cuc'); re1.exec(s4[i]);
} }
for (var i = 0; i < 352; i++) { for (var i = 0; i < 352; i++) {
/qqqq|qqq|qq|q|ZZZZ|ZZZ|ZZ|Z|llll|ll|l|uu|u|UU|U|zz|z|ff|f|gg|g|sss|ss|s|mmm|mm|m/g.exec('qqqq, ZZZ q, llll'); /qqqq|qqq|qq|q|ZZZZ|ZZZ|ZZ|Z|llll|ll|l|uu|u|UU|U|zz|z|ff|f|gg|g|sss|ss|s|mmm|mm|m/g.exec(s5[i]);
} }
for (var i = 0; i < 312; i++) { for (var i = 0; i < 312; i++) {
re3.exec('vachggrkg QBZPbageby_cynprubyqre'); re3.exec(s6[i]);
} }
for (var i = 0; i < 282; i++) { for (var i = 0; i < 282; i++) {
re4.exec('/ZlFcnprUbzrcntr/Vaqrk-FvgrUbzr,10000000'); re4.exec(s7[i]);
} }
for (var i = 0; i < 177; i++) { for (var i = 0; i < 177; i++) {
'vachggrkg'.replace(re5, ''); s8[i].replace(re5, '');
} }
for (var i = 0; i < 170; i++) { for (var i = 0; i < 170; i++) {
'528.9'.replace(re6, ''); s9[i].replace(re6, '');
re7.exec('528'); re7.exec(s10[i]);
} }
for (var i = 0; i < 156; i++) { for (var i = 0; i < 156; i++) {
re8.exec('VCPhygher=ra-HF'); re8.exec(s11[i]);
re8.exec('CersreerqPhygher=ra-HF'); re8.exec(s12[i]);
} }
for (var i = 0; i < 144; i++) { for (var i = 0; i < 144; i++) {
re0.exec('xrlcerff'); re0.exec(s13[i]);
} }
for (var i = 0; i < 139; i++) { for (var i = 0; i < 139; i++) {
'521'.replace(re6, ''); s14[i].replace(re6, '');
re7.exec('521'); re7.exec(s14[i]);
re9.exec(''); re9.exec('');
/JroXvg\/(\S+)/.exec(str0); /JroXvg\/(\S+)/.exec(s15[i]);
} }
for (var i = 0; i < 137; i++) { for (var i = 0; i < 137; i++) {
'qvi .so_zrah'.replace(re10, ''); s16[i].replace(re10, '');
'qvi .so_zrah'.replace(/\[/g, ''); s16[i].replace(/\[/g, '');
'qvi.so_zrah'.replace(re11, ''); s17[i].replace(re11, '');
} }
for (var i = 0; i < 117; i++) { for (var i = 0; i < 117; i++) {
'uvqqra_ryrz'.replace(re2, ''); s18[i].replace(re2, '');
} }
for (var i = 0; i < 95; i++) { for (var i = 0; i < 95; i++) {
/(?:^|;)\s*sevraqfgre_ynat=([^;]*)/.exec('sevraqfgre_naba=nvq%3Qn6ss9p85n868ro9s059pn854735956o3%26ers%3Q%26df%3Q%26vpgl%3QHF'); /(?:^|;)\s*sevraqfgre_ynat=([^;]*)/.exec(s19[i]);
} }
for (var i = 0; i < 93; i++) { for (var i = 0; i < 93; i++) {
'uggc://ubzr.zlfcnpr.pbz/vaqrk.psz'.replace(re12, ''); s20[i].replace(re12, '');
re13.exec('uggc://ubzr.zlfcnpr.pbz/vaqrk.psz'); re13.exec(s20[i]);
} }
for (var i = 0; i < 92; i++) { for (var i = 0; i < 92; i++) {
str1.replace(/([a-zA-Z]|\s)+/, ''); s21[i].replace(/([a-zA-Z]|\s)+/, '');
} }
for (var i = 0; i < 85; i++) { for (var i = 0; i < 85; i++) {
'svefg'.replace(re14, ''); s22[i].replace(re14, '');
'svefg'.replace(re15, ''); s22[i].replace(re15, '');
'uggc://cebsvyr.zlfcnpr.pbz/vaqrk.psz'.replace(re12, ''); s23[i].replace(re12, '');
'ynfg'.replace(re14, ''); s24[i].replace(re14, '');
'ynfg'.replace(re15, ''); s24[i].replace(re15, '');
re16.exec('qvfcynl'); re16.exec(s25[i]);
re13.exec('uggc://cebsvyr.zlfcnpr.pbz/vaqrk.psz'); re13.exec(s23[i]);
} }
} }
var re17 = /(^|[^\\])\"\\\/Qngr\((-?[0-9]+)\)\\\/\"/g; var re17 = /(^|[^\\])\"\\\/Qngr\((-?[0-9]+)\)\\\/\"/g;
@ -145,64 +203,98 @@ function runRegExpBenchmark() {
var str7 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(d1)c=d1.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+d1.Cnaryf[c].Jvqgu,o=g+d1.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(d1&&d1.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(d1)d1.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(d1)d1.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;d1.IjTc=d2(n,c){ine nq=d1;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;d1.IjTpy=d2(n,c,p){ine cn=d1.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;d1.IjGenpr=d2(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;d1.IjYvzvg1=d2(n,f){ine nq=d1,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;d1.IjYvzvg0=d2(n,f){ine nq=d1,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;d1.IjRVST=d2(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",d1.rvsg);};;d1.IjNavzSHC=d2(n,c){ine nq=d1;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;d1.IjTrgErnyCbfvgvba=d2(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;d1.IjPnapryGvzrbhg=d2(n,c){c=IjTc(n,c);ine cay=d1.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;d1.IjPnapryNyyGvzrbhgf=d2(n){vs(d1.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<d1.bac;c++)IjPnapryGvzrbhg(n,c);};;d1.IjFgnegGvzrbhg=d2(n,c,bG){c=IjTc(n,c);ine cay=d1.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;d1.IjErfrgGvzrbhg=d2(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;d1.IjErfrgNyyGvzrbhgf=d2(n){sbe(ine c=0;c<d1.bac;c++)IjErfrgGvzrbhg(n,c);};;d1.IjQrgnpure=d2(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbirRiragYvfgrare(\'"+rig+"\',"+sap+"NQ_VQ,snyfr)");}pngpu(r){}};;d1.IjPyrnaHc=d2(n){IjCvat(n,"G");ine nq=d1;sbe(ine v=0;v<nq.Cnaryf.yratgu;v++){IjUvqrCnary(n,v,gehr);}gel{IjTrgBow(nq.gya).vaareUGZY="";}pngpu(r){}vs(nq.gya!=nq.gya2)gel{IjTrgBow(nq.gya2).vaareUGZY="";}pngpu(r){}gel{d1=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"haybnq","IjHayNQ_VQ");}pngpu(r){}gel{jvaqbj.IjHayNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"fpebyy","IjFeNQ_VQ");}pngpu(r){}gel{jvaqbj.IjFeNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"erfvmr","IjEmNQ_VQ");}pngpu(r){}gel{jvaqbj.IjEmNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n'; var str7 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(d1)c=d1.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+d1.Cnaryf[c].Jvqgu,o=g+d1.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(d1&&d1.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(d1)d1.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(d1)d1.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;d1.IjTc=d2(n,c){ine nq=d1;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;d1.IjTpy=d2(n,c,p){ine cn=d1.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;d1.IjGenpr=d2(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;d1.IjYvzvg1=d2(n,f){ine nq=d1,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;d1.IjYvzvg0=d2(n,f){ine nq=d1,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;d1.IjRVST=d2(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",d1.rvsg);};;d1.IjNavzSHC=d2(n,c){ine nq=d1;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;d1.IjTrgErnyCbfvgvba=d2(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;d1.IjPnapryGvzrbhg=d2(n,c){c=IjTc(n,c);ine cay=d1.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;d1.IjPnapryNyyGvzrbhgf=d2(n){vs(d1.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<d1.bac;c++)IjPnapryGvzrbhg(n,c);};;d1.IjFgnegGvzrbhg=d2(n,c,bG){c=IjTc(n,c);ine cay=d1.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;d1.IjErfrgGvzrbhg=d2(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;d1.IjErfrgNyyGvzrbhgf=d2(n){sbe(ine c=0;c<d1.bac;c++)IjErfrgGvzrbhg(n,c);};;d1.IjQrgnpure=d2(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbirRiragYvfgrare(\'"+rig+"\',"+sap+"NQ_VQ,snyfr)");}pngpu(r){}};;d1.IjPyrnaHc=d2(n){IjCvat(n,"G");ine nq=d1;sbe(ine v=0;v<nq.Cnaryf.yratgu;v++){IjUvqrCnary(n,v,gehr);}gel{IjTrgBow(nq.gya).vaareUGZY="";}pngpu(r){}vs(nq.gya!=nq.gya2)gel{IjTrgBow(nq.gya2).vaareUGZY="";}pngpu(r){}gel{d1=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"haybnq","IjHayNQ_VQ");}pngpu(r){}gel{jvaqbj.IjHayNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"fpebyy","IjFeNQ_VQ");}pngpu(r){}gel{jvaqbj.IjFeNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n,"erfvmr","IjEmNQ_VQ");}pngpu(r){}gel{jvaqbj.IjEmNQ_VQ=ahyy;}pngpu(r){}gel{IjQrgnpure(n';
var str8 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(jvaqbj.IjNqNQ_VQ)c=jvaqbj.IjNqNQ_VQ.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Jvqgu,o=g+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(jvaqbj.IjNqNQ_VQ&&jvaqbj.IjNqNQ_VQ.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjTc=shapgvba(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;jvaqbj.IjNqNQ_VQ.IjTpy=shapgvba(n,c,p){ine cn=jvaqbj.IjNqNQ_VQ.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;jvaqbj.IjNqNQ_VQ.IjGenpr=shapgvba(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjYvzvg1=shapgvba(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;jvaqbj.IjNqNQ_VQ.IjYvzvg0=shapgvba(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;jvaqbj.IjNqNQ_VQ.IjRVST=shapgvba(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",jvaqbj.IjNqNQ_VQ.rvsg);};;jvaqbj.IjNqNQ_VQ.IjNavzSHC=shapgvba(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;jvaqbj.IjNqNQ_VQ.IjTrgErnyCbfvgvba=shapgvba(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;jvaqbj.IjNqNQ_VQ.IjPnapryGvzrbhg=shapgvba(n,c){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;jvaqbj.IjNqNQ_VQ.IjPnapryNyyGvzrbhgf=shapgvba(n){vs(jvaqbj.IjNqNQ_VQ.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjPnapryGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjFgnegGvzrbhg=shapgvba(n,c,bG){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;jvaqbj.IjNqNQ_VQ.IjErfrgGvzrbhg=shapgvba(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;jvaqbj.IjNqNQ_VQ.IjErfrgNyyGvzrbhgf=shapgvba(n){sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjErfrgGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjQrgnpure=shapgvba(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbir'; var str8 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(jvaqbj.IjNqNQ_VQ)c=jvaqbj.IjNqNQ_VQ.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Jvqgu,o=g+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(jvaqbj.IjNqNQ_VQ&&jvaqbj.IjNqNQ_VQ.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjTc=shapgvba(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;jvaqbj.IjNqNQ_VQ.IjTpy=shapgvba(n,c,p){ine cn=jvaqbj.IjNqNQ_VQ.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;jvaqbj.IjNqNQ_VQ.IjGenpr=shapgvba(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjYvzvg1=shapgvba(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;jvaqbj.IjNqNQ_VQ.IjYvzvg0=shapgvba(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;jvaqbj.IjNqNQ_VQ.IjRVST=shapgvba(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",jvaqbj.IjNqNQ_VQ.rvsg);};;jvaqbj.IjNqNQ_VQ.IjNavzSHC=shapgvba(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;jvaqbj.IjNqNQ_VQ.IjTrgErnyCbfvgvba=shapgvba(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;jvaqbj.IjNqNQ_VQ.IjPnapryGvzrbhg=shapgvba(n,c){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;jvaqbj.IjNqNQ_VQ.IjPnapryNyyGvzrbhgf=shapgvba(n){vs(jvaqbj.IjNqNQ_VQ.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjPnapryGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjFgnegGvzrbhg=shapgvba(n,c,bG){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;jvaqbj.IjNqNQ_VQ.IjErfrgGvzrbhg=shapgvba(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;jvaqbj.IjNqNQ_VQ.IjErfrgNyyGvzrbhgf=shapgvba(n){sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjErfrgGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjQrgnpure=shapgvba(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbir';
var str9 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(jvaqbj.IjNqNQ_VQ)c=jvaqbj.IjNqNQ_VQ.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Jvqgu,o=g+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(jvaqbj.IjNqNQ_VQ&&jvaqbj.IjNqNQ_VQ.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjTc=d2(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;jvaqbj.IjNqNQ_VQ.IjTpy=d2(n,c,p){ine cn=jvaqbj.IjNqNQ_VQ.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;jvaqbj.IjNqNQ_VQ.IjGenpr=d2(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjYvzvg1=d2(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;jvaqbj.IjNqNQ_VQ.IjYvzvg0=d2(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;jvaqbj.IjNqNQ_VQ.IjRVST=d2(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",jvaqbj.IjNqNQ_VQ.rvsg);};;jvaqbj.IjNqNQ_VQ.IjNavzSHC=d2(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;jvaqbj.IjNqNQ_VQ.IjTrgErnyCbfvgvba=d2(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;jvaqbj.IjNqNQ_VQ.IjPnapryGvzrbhg=d2(n,c){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;jvaqbj.IjNqNQ_VQ.IjPnapryNyyGvzrbhgf=d2(n){vs(jvaqbj.IjNqNQ_VQ.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjPnapryGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjFgnegGvzrbhg=d2(n,c,bG){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;jvaqbj.IjNqNQ_VQ.IjErfrgGvzrbhg=d2(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;jvaqbj.IjNqNQ_VQ.IjErfrgNyyGvzrbhgf=d2(n){sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjErfrgGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjQrgnpure=d2(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbirRiragYvfgrare(\'"+rig+"\',"+sap+"NQ_VQ,snyfr)");}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjPyrna'; var str9 = ';;jvaqbj.IjPurpxZbhfrCbfvgvbaNQ_VQ=shapgvba(r){vs(!r)ine r=jvaqbj.rirag;ine c=-1;vs(jvaqbj.IjNqNQ_VQ)c=jvaqbj.IjNqNQ_VQ.EbyybssCnary;ine bo=IjTrgBow("IjCnayNQ_VQ_"+c);vs(bo&&bo.fglyr.ivfvovyvgl=="ivfvoyr"){ine fns=IjFns?8:0;ine pheK=r.pyvragK+IjBOFpe("U")+fns,pheL=r.pyvragL+IjBOFpe("I")+fns;ine y=IjBOEC(NQ_VQ,bo,"Y"),g=IjBOEC(NQ_VQ,bo,"G");ine e=y+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Jvqgu,o=g+jvaqbj.IjNqNQ_VQ.Cnaryf[c].Urvtug;vs((pheK<y)||(pheK>e)||(pheL<g)||(pheL>o)){vs(jvaqbj.IjBaEbyybssNQ_VQ)IjBaEbyybssNQ_VQ(c);ryfr IjPybfrNq(NQ_VQ,c,gehr,"");}ryfr erghea;}IjPnapryZbhfrYvfgrareNQ_VQ();};;jvaqbj.IjFrgEbyybssCnaryNQ_VQ=shapgvba(c){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;c=IjTc(NQ_VQ,c);vs(jvaqbj.IjNqNQ_VQ&&jvaqbj.IjNqNQ_VQ.EbyybssCnary>-1)IjPnapryZbhfrYvfgrareNQ_VQ();vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=c;gel{vs(q.nqqRiragYvfgrare)q.nqqRiragYvfgrare(z,s,snyfr);ryfr vs(q.nggnpuRirag)q.nggnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjPnapryZbhfrYvfgrareNQ_VQ=shapgvba(){ine z="zbhfrzbir",q=qbphzrag,s=IjPurpxZbhfrCbfvgvbaNQ_VQ;vs(jvaqbj.IjNqNQ_VQ)jvaqbj.IjNqNQ_VQ.EbyybssCnary=-1;gel{vs(q.erzbirRiragYvfgrare)q.erzbirRiragYvfgrare(z,s,snyfr);ryfr vs(q.qrgnpuRirag)q.qrgnpuRirag("ba"+z,s);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjTc=d2(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(vfAnA(c)){sbe(ine v=0;v<nq.Cnaryf.yratgu;v++)vs(nq.Cnaryf[v].Anzr==c)erghea v;erghea 0;}erghea c;};;jvaqbj.IjNqNQ_VQ.IjTpy=d2(n,c,p){ine cn=jvaqbj.IjNqNQ_VQ.Cnaryf[IjTc(n,c)];vs(!cn)erghea 0;vs(vfAnA(p)){sbe(ine v=0;v<cn.Pyvpxguehf.yratgu;v++)vs(cn.Pyvpxguehf[v].Anzr==p)erghea v;erghea 0;}erghea p;};;jvaqbj.IjNqNQ_VQ.IjGenpr=d2(n,f){gel{vs(jvaqbj["Ij"+"QtQ"])jvaqbj["Ij"+"QtQ"](n,1,f);}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjYvzvg1=d2(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0,p=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.FzV.yratgu>0)nq.FzV+="/";nq.FzV+=vh[v];nq.FtZ[nq.FtZ.yratgu]=snyfr;}}};;jvaqbj.IjNqNQ_VQ.IjYvzvg0=d2(n,f){ine nq=jvaqbj.IjNqNQ_VQ,vh=f.fcyvg("/");sbe(ine v=0;v<vh.yratgu;v++){vs(vh[v].yratgu>0){vs(nq.OvC.yratgu>0)nq.OvC+="/";nq.OvC+=vh[v];}}};;jvaqbj.IjNqNQ_VQ.IjRVST=d2(n,c){jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]=IjTrgBow("IjCnayNQ_VQ_"+c+"_Bow");vs(jvaqbj["IjCnayNQ_VQ_"+c+"_Bow"]==ahyy)frgGvzrbhg("IjRVST(NQ_VQ,"+c+")",jvaqbj.IjNqNQ_VQ.rvsg);};;jvaqbj.IjNqNQ_VQ.IjNavzSHC=d2(n,c){ine nq=jvaqbj.IjNqNQ_VQ;vs(c>nq.Cnaryf.yratgu)erghea;ine cna=nq.Cnaryf[c],nn=gehr,on=gehr,yn=gehr,en=gehr,cn=nq.Cnaryf[0],sf=nq.ShF,j=cn.Jvqgu,u=cn.Urvtug;vs(j=="100%"){j=sf;en=snyfr;yn=snyfr;}vs(u=="100%"){u=sf;nn=snyfr;on=snyfr;}vs(cn.YnY=="Y")yn=snyfr;vs(cn.YnY=="E")en=snyfr;vs(cn.GnY=="G")nn=snyfr;vs(cn.GnY=="O")on=snyfr;ine k=0,l=0;fjvgpu(nq.NshP%8){pnfr 0:oernx;pnfr 1:vs(nn)l=-sf;oernx;pnfr 2:k=j-sf;oernx;pnfr 3:vs(en)k=j;oernx;pnfr 4:k=j-sf;l=u-sf;oernx;pnfr 5:k=j-sf;vs(on)l=u;oernx;pnfr 6:l=u-sf;oernx;pnfr 7:vs(yn)k=-sf;l=u-sf;oernx;}vs(nq.NshP++ <nq.NshG)frgGvzrbhg(("IjNavzSHC(NQ_VQ,"+c+")"),nq.NshC);ryfr{k=-1000;l=k;}cna.YrsgBssfrg=k;cna.GbcBssfrg=l;IjNhErcb(n,c);};;jvaqbj.IjNqNQ_VQ.IjTrgErnyCbfvgvba=d2(n,b,j){erghea IjBOEC.nccyl(guvf,nethzragf);};;jvaqbj.IjNqNQ_VQ.IjPnapryGvzrbhg=d2(n,c){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&cay.UgU!=""){pyrneGvzrbhg(cay.UgU);}};;jvaqbj.IjNqNQ_VQ.IjPnapryNyyGvzrbhgf=d2(n){vs(jvaqbj.IjNqNQ_VQ.YbpxGvzrbhgPunatrf)erghea;sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjPnapryGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjFgnegGvzrbhg=d2(n,c,bG){c=IjTc(n,c);ine cay=jvaqbj.IjNqNQ_VQ.Cnaryf[c];vs(cay&&((cay.UvqrGvzrbhgInyhr>0)||(nethzragf.yratgu==3&&bG>0))){pyrneGvzrbhg(cay.UgU);cay.UgU=frgGvzrbhg(cay.UvqrNpgvba,(nethzragf.yratgu==3?bG:cay.UvqrGvzrbhgInyhr));}};;jvaqbj.IjNqNQ_VQ.IjErfrgGvzrbhg=d2(n,c,bG){c=IjTc(n,c);IjPnapryGvzrbhg(n,c);riny("IjFgnegGvzrbhg(NQ_VQ,c"+(nethzragf.yratgu==3?",bG":"")+")");};;jvaqbj.IjNqNQ_VQ.IjErfrgNyyGvzrbhgf=d2(n){sbe(ine c=0;c<jvaqbj.IjNqNQ_VQ.bac;c++)IjErfrgGvzrbhg(n,c);};;jvaqbj.IjNqNQ_VQ.IjQrgnpure=d2(n,rig,sap){gel{vs(IjQVR5)riny("jvaqbj.qrgnpuRirag(\'ba"+rig+"\',"+sap+"NQ_VQ)");ryfr vs(!IjQVRZnp)riny("jvaqbj.erzbirRiragYvfgrare(\'"+rig+"\',"+sap+"NQ_VQ,snyfr)");}pngpu(r){}};;jvaqbj.IjNqNQ_VQ.IjPyrna';
var s26 = computeInputVariants('VC=74.125.75.1', 81);
var s27 = computeInputVariants('9.0 e115', 78);
var s28 = computeInputVariants('k',78);
var s29 = computeInputVariants(str2, 81);
var s30 = computeInputVariants(str3, 81);
var s31 = computeInputVariants('144631658', 78);
var s32 = computeInputVariants('Pbhagel=IIZ%3Q', 78);
var s33 = computeInputVariants('Pbhagel=IIZ=', 78);
var s34 = computeInputVariants('CersreerqPhygherCraqvat=', 78);
var s35 = computeInputVariants(str4, 78);
var s36 = computeInputVariants(str5, 78);
var s37 = computeInputVariants('__hgzp=144631658', 78);
var s38 = computeInputVariants('gvzrMbar=-8', 78);
var s39 = computeInputVariants('gvzrMbar=0', 78);
// var s40 = computeInputVariants(s15[i], 78);
var s41 = computeInputVariants('vachggrkg QBZPbageby_cynprubyqre', 78);
var s42 = computeInputVariants('xrlqbja', 78);
var s43 = computeInputVariants('xrlhc', 78);
var s44 = computeInputVariants('uggc://zrffntvat.zlfcnpr.pbz/vaqrk.psz', 77);
var s45 = computeInputVariants('FrffvbaFgbentr=%7O%22GnoThvq%22%3N%7O%22thvq%22%3N1231367125017%7Q%7Q', 73);
var s46 = computeInputVariants(str6, 72);
var s47 = computeInputVariants('3.5.0.0', 70);
var s48 = computeInputVariants(str7, 70);
var s49 = computeInputVariants(str8, 70);
var s50 = computeInputVariants(str9, 70);
var s51 = computeInputVariants('NI%3Q1_CI%3Q1_PI%3Q1_EI%3Q1_HI%3Q1_HP%3Q1_IC%3Q0.0.0.0_IH%3Q0', 70);
var s52 = computeInputVariants('svz_zlfcnpr_ubzrcntr_abgybttrqva,svz_zlfcnpr_aba_HTP,svz_zlfcnpr_havgrq-fgngrf', 70);
var s53 = computeInputVariants('ybnqvat', 70);
var s54 = computeInputVariants('#', 68);
var s55 = computeInputVariants('ybnqrq', 68);
var s56 = computeInputVariants('pbybe', 49);
var s57 = computeInputVariants('uggc://sevraqf.zlfcnpr.pbz/vaqrk.psz', 44);
function runBlock1() { function runBlock1() {
for (var i = 0; i < 81; i++) { for (var i = 0; i < 81; i++) {
re8.exec('VC=74.125.75.1'); re8.exec(s26[i]);
} }
for (var i = 0; i < 78; i++) { for (var i = 0; i < 78; i++) {
'9.0 e115'.replace(/(\s)+e/, ''); s27[i].replace(/(\s)+e/, '');
'k'.replace(/./, ''); s28[i].replace(/./, '');
str2.replace(re17, ''); s29[i].replace(re17, '');
str3.replace(re17, ''); s30[i].replace(re17, '');
re8.exec('144631658'); re8.exec(s31[i]);
re8.exec('Pbhagel=IIZ%3Q'); re8.exec(s32[i]);
re8.exec('Pbhagel=IIZ='); re8.exec(s33[i]);
re8.exec('CersreerqPhygherCraqvat='); re8.exec(s34[i]);
re8.exec(str4); re8.exec(s35[i]);
re8.exec(str5); re8.exec(s36[i]);
re8.exec('__hgzp=144631658'); re8.exec(s37[i]);
re8.exec('gvzrMbar=-8'); re8.exec(s38[i]);
re8.exec('gvzrMbar=0'); re8.exec(s39[i]);
/Fnsnev\/(\d+\.\d+)/.exec(str0); /Fnsnev\/(\d+\.\d+)/.exec(s15[i]);
re3.exec('vachggrkg QBZPbageby_cynprubyqre'); re3.exec(s41[i]);
re0.exec('xrlqbja'); re0.exec(s42[i]);
re0.exec('xrlhc'); re0.exec(s43[i]);
} }
for (var i = 0; i < 77; i++) { for (var i = 0; i < 77; i++) {
'uggc://zrffntvat.zlfcnpr.pbz/vaqrk.psz'.replace(re12, ''); s44[i].replace(re12, '');
re13.exec('uggc://zrffntvat.zlfcnpr.pbz/vaqrk.psz'); re13.exec(s44[i]);
} }
for (var i = 0; i < 73; i++) { for (var i = 0; i < 73; i++) {
'FrffvbaFgbentr=%7O%22GnoThvq%22%3N%7O%22thvq%22%3N1231367125017%7Q%7Q'.replace(re18, ''); s45[i].replace(re18, '');
} }
for (var i = 0; i < 72; i++) { for (var i = 0; i < 72; i++) {
re1.exec(str6); re1.exec(s46[i]);
} }
for (var i = 0; i < 71; i++) { for (var i = 0; i < 71; i++) {
re19.exec(''); re19.exec('');
} }
for (var i = 0; i < 70; i++) { for (var i = 0; i < 70; i++) {
'3.5.0.0'.replace(re11, ''); s47[i].replace(re11, '');
str7.replace(/d1/g, ''); s48[i].replace(/d1/g, '');
str8.replace(/NQ_VQ/g, ''); s49[i].replace(/NQ_VQ/g, '');
str9.replace(/d2/g, ''); s50[i].replace(/d2/g, '');
'NI%3Q1_CI%3Q1_PI%3Q1_EI%3Q1_HI%3Q1_HP%3Q1_IC%3Q0.0.0.0_IH%3Q0'.replace(/_/g, ''); s51[i].replace(/_/g, '');
'svz_zlfcnpr_ubzrcntr_abgybttrqva,svz_zlfcnpr_aba_HTP,svz_zlfcnpr_havgrq-fgngrf'.split(re20); s52[i].split(re20);
re21.exec('ybnqvat'); re21.exec(s53[i]);
} }
for (var i = 0; i < 68; i++) { for (var i = 0; i < 68; i++) {
re1.exec('#'); re1.exec(s54[i]);
/(?:ZFVR.(\d+\.\d+))|(?:(?:Sversbk|TenaCnenqvfb|Vprjrnfry).(\d+\.\d+))|(?:Bcren.(\d+\.\d+))|(?:NccyrJroXvg.(\d+(?:\.\d+)?))/.exec(str0); /(?:ZFVR.(\d+\.\d+))|(?:(?:Sversbk|TenaCnenqvfb|Vprjrnfry).(\d+\.\d+))|(?:Bcren.(\d+\.\d+))|(?:NccyrJroXvg.(\d+(?:\.\d+)?))/.exec(s15[i]);
/(Znp BF K)|(Jvaqbjf;)/.exec(str0); /(Znp BF K)|(Jvaqbjf;)/.exec(s15[i]);
/Trpxb\/([0-9]+)/.exec(str0); /Trpxb\/([0-9]+)/.exec(s15[i]);
re21.exec('ybnqrq'); re21.exec(s55[i]);
} }
for (var i = 0; i < 49; i++) { for (var i = 0; i < 49; i++) {
re16.exec('pbybe'); re16.exec(s56[i]);
} }
for (var i = 0; i < 44; i++) { for (var i = 0; i < 44; i++) {
'uggc://sevraqf.zlfcnpr.pbz/vaqrk.psz'.replace(re12, ''); s57[i].replace(re12, '');
re13.exec('uggc://sevraqf.zlfcnpr.pbz/vaqrk.psz'); re13.exec(s57[i]);
} }
} }
var re22 = /\bso_zrah\b/; var re22 = /\bso_zrah\b/;
@ -210,15 +302,26 @@ function runRegExpBenchmark() {
var re24 = /uggcf?:\/\/([^\/]+\.)?snprobbx\.pbz\//; var re24 = /uggcf?:\/\/([^\/]+\.)?snprobbx\.pbz\//;
var re25 = /"/g; var re25 = /"/g;
var re26 = /^([^?#]+)(?:\?([^#]*))?(#.*)?/; var re26 = /^([^?#]+)(?:\?([^#]*))?(#.*)?/;
var s57a = computeInputVariants('fryrpgrq', 40);
var s58 = computeInputVariants('vachggrkg uvqqra_ryrz', 40);
var s59 = computeInputVariants('vachggrkg ', 40);
var s60 = computeInputVariants('vachggrkg', 40);
var s61 = computeInputVariants('uggc://jjj.snprobbx.pbz/', 40);
var s62 = computeInputVariants('uggc://jjj.snprobbx.pbz/ybtva.cuc', 40);
var s63 = computeInputVariants('Funer guvf tnqtrg', 40);
var s64 = computeInputVariants('uggc://jjj.tbbtyr.pbz/vt/qverpgbel', 40);
var s65 = computeInputVariants('419', 40);
var s66 = computeInputVariants('gvzrfgnzc', 40);
function runBlock2() { function runBlock2() {
for (var i = 0; i < 40; i++) { for (var i = 0; i < 40; i++) {
'fryrpgrq'.replace(re14, ''); s57a[i].replace(re14, '');
'fryrpgrq'.replace(re15, ''); s57a[i].replace(re15, '');
} }
for (var i = 0; i < 39; i++) { for (var i = 0; i < 39; i++) {
'vachggrkg uvqqra_ryrz'.replace(/\buvqqra_ryrz\b/g, ''); s58[i].replace(/\buvqqra_ryrz\b/g, '');
re3.exec('vachggrkg '); re3.exec(s59[i]);
re3.exec('vachggrkg'); re3.exec(s60[i]);
re22.exec('HVYvaxOhggba'); re22.exec('HVYvaxOhggba');
re22.exec('HVYvaxOhggba_E'); re22.exec('HVYvaxOhggba_E');
re22.exec('HVYvaxOhggba_EJ'); re22.exec('HVYvaxOhggba_EJ');
@ -246,28 +349,28 @@ function runRegExpBenchmark() {
re8.exec('s6r4579npn4rn2135s904r0s75pp1o5334p6s6pospo12696'); re8.exec('s6r4579npn4rn2135s904r0s75pp1o5334p6s6pospo12696');
} }
for (var i = 0; i < 32; i++) { for (var i = 0; i < 32; i++) {
/puebzr/i.exec(str0); /puebzr/i.exec(s15[i]);
} }
for (var i = 0; i < 31; i++) { for (var i = 0; i < 31; i++) {
'uggc://jjj.snprobbx.pbz/'.replace(re23, ''); s61[i].replace(re23, '');
re8.exec('SbeprqRkcvengvba=633669358527244818'); re8.exec('SbeprqRkcvengvba=633669358527244818');
re8.exec('VC=66.249.85.130'); re8.exec('VC=66.249.85.130');
re8.exec('FrffvbaQQS2=s15q53p9n372sn76npr13o271n4s3p5r29p235746p908p58'); re8.exec('FrffvbaQQS2=s15q53p9n372sn76npr13o271n4s3p5r29p235746p908p58');
re8.exec('s15q53p9n372sn76npr13o271n4s3p5r29p235746p908p58'); re8.exec('s15q53p9n372sn76npr13o271n4s3p5r29p235746p908p58');
re24.exec('uggc://jjj.snprobbx.pbz/'); re24.exec(s61[i]);
} }
for (var i = 0; i < 30; i++) { for (var i = 0; i < 30; i++) {
'419'.replace(re6, ''); s65[i].replace(re6, '');
/(?:^|\s+)gvzrfgnzc(?:\s+|$)/.exec('gvzrfgnzc'); /(?:^|\s+)gvzrfgnzc(?:\s+|$)/.exec(s66[i]);
re7.exec('419'); re7.exec(s65[i]);
} }
for (var i = 0; i < 29; i++) { for (var i = 0; i < 29; i++) {
'uggc://jjj.snprobbx.pbz/ybtva.cuc'.replace(re23, ''); s62[i].replace(re23, '');
} }
for (var i = 0; i < 28; i++) { for (var i = 0; i < 28; i++) {
'Funer guvf tnqtrg'.replace(re25, ''); s63[i].replace(re25, '');
'Funer guvf tnqtrg'.replace(re12, ''); s63[i].replace(re12, '');
re26.exec('uggc://jjj.tbbtyr.pbz/vt/qverpgbel'); re26.exec(s64[i]);
} }
} }
var re27 = /-\D/g; var re27 = /-\D/g;
@ -290,13 +393,27 @@ function runRegExpBenchmark() {
var str18 = 'uggc://jjj.yrobapbva.se/yv'; var str18 = 'uggc://jjj.yrobapbva.se/yv';
var str19 = 'ZFPhygher=VC=74.125.75.1&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&Pbhagel=IIZ%3Q&SbeprqRkcvengvba=633669316860113296&gvzrMbar=-8&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R%3Q'; var str19 = 'ZFPhygher=VC=74.125.75.1&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&Pbhagel=IIZ%3Q&SbeprqRkcvengvba=633669316860113296&gvzrMbar=-8&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R%3Q';
var str20 = 'ZFPhygher=VC=74.125.75.1&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&CersreerqPhygherCraqvat=&Pbhagel=IIZ=&SbeprqRkcvengvba=633669316860113296&gvzrMbar=0&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R='; var str20 = 'ZFPhygher=VC=74.125.75.1&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&CersreerqPhygherCraqvat=&Pbhagel=IIZ=&SbeprqRkcvengvba=633669316860113296&gvzrMbar=0&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R=';
var s67 = computeInputVariants('e115', 27);
var s68 = computeInputVariants('qvfcynl', 27);
var s69 = computeInputVariants('cbfvgvba', 27);
var s70 = computeInputVariants('uggc://jjj.zlfcnpr.pbz/', 27);
var s71 = computeInputVariants('cntrivrj', 27);
var s72 = computeInputVariants('VC=74.125.75.3', 27);
var s73 = computeInputVariants('ra', 27);
var s74 = computeInputVariants(str10, 27);
var s75 = computeInputVariants(str11, 27);
var s76 = computeInputVariants(str12, 27);
var s77 = computeInputVariants(str17, 27);
var s78 = computeInputVariants(str18, 27);
function runBlock3() { function runBlock3() {
for (var i = 0; i < 27; i++) { for (var i = 0; i < 27; i++) {
'e115'.replace(/[A-Za-z]/g, ''); s67[i].replace(/[A-Za-z]/g, '');
} }
for (var i = 0; i < 23; i++) { for (var i = 0; i < 23; i++) {
'qvfcynl'.replace(re27, ''); s68[i].replace(re27, '');
'cbfvgvba'.replace(re27, ''); s69[i].replace(re27, '');
} }
for (var i = 0; i < 22; i++) { for (var i = 0; i < 22; i++) {
'unaqyr'.replace(re14, ''); 'unaqyr'.replace(re14, '');
@ -310,23 +427,23 @@ function runRegExpBenchmark() {
re28.exec(''); re28.exec('');
} }
for (var i = 0; i < 21; i++) { for (var i = 0; i < 21; i++) {
'uggc://jjj.zlfcnpr.pbz/'.replace(re12, ''); s70[i].replace(re12, '');
re13.exec('uggc://jjj.zlfcnpr.pbz/'); re13.exec(s70[i]);
} }
for (var i = 0; i < 20; i++) { for (var i = 0; i < 20; i++) {
'cntrivrj'.replace(re29, ''); s71[i].replace(re29, '');
'cntrivrj'.replace(re30, ''); s71[i].replace(re30, '');
re19.exec('ynfg'); re19.exec('ynfg');
re19.exec('ba svefg'); re19.exec('ba svefg');
re8.exec('VC=74.125.75.3'); re8.exec(s72[i]);
} }
for (var i = 0; i < 19; i++) { for (var i = 0; i < 19; i++) {
re31.exec('ra'); re31.exec(s73[i]);
} }
for (var i = 0; i < 18; i++) { for (var i = 0; i < 18; i++) {
str10.split(re32); s74[i].split(re32);
str11.split(re32); s75[i].split(re32);
str12.replace(re33, ''); s76[i].replace(re33, '');
re8.exec('144631658.0.10.1231363570'); re8.exec('144631658.0.10.1231363570');
re8.exec('144631658.1231363570.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)'); re8.exec('144631658.1231363570.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)');
re8.exec('144631658.3426875219718084000.1231363570.1231363570.1231363570.1'); re8.exec('144631658.3426875219718084000.1231363570.1231363570.1231363570.1');
@ -335,12 +452,12 @@ function runRegExpBenchmark() {
re8.exec('__hgzn=144631658.3426875219718084000.1231363570.1231363570.1231363570.1'); re8.exec('__hgzn=144631658.3426875219718084000.1231363570.1231363570.1231363570.1');
re8.exec('__hgzo=144631658.0.10.1231363570'); re8.exec('__hgzo=144631658.0.10.1231363570');
re8.exec('__hgzm=144631658.1231363570.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)'); re8.exec('__hgzm=144631658.1231363570.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)');
re34.exec(str10); re34.exec(s74[i]);
re34.exec(str11); re34.exec(s75[i]);
} }
for (var i = 0; i < 17; i++) { for (var i = 0; i < 17; i++) {
str0.match(/zfvr/gi); s15[i].match(/zfvr/gi);
str0.match(/bcren/gi); s15[i].match(/bcren/gi);
str15.split(re32); str15.split(re32);
str16.split(re32); str16.split(re32);
'ohggba'.replace(re14, ''); 'ohggba'.replace(re14, '');
@ -355,11 +472,11 @@ function runRegExpBenchmark() {
'qry'.replace(re15, ''); 'qry'.replace(re15, '');
'uqy_zba'.replace(re14, ''); 'uqy_zba'.replace(re14, '');
'uqy_zba'.replace(re15, ''); 'uqy_zba'.replace(re15, '');
str17.replace(re33, ''); s77[i].replace(re33, '');
str18.replace(/%3P/g, ''); s78[i].replace(/%3P/g, '');
str18.replace(/%3R/g, ''); s78[i].replace(/%3R/g, '');
str18.replace(/%3q/g, ''); s78[i].replace(/%3q/g, '');
str18.replace(re35, ''); s78[i].replace(re35, '');
'yvaxyvfg16'.replace(re14, ''); 'yvaxyvfg16'.replace(re14, '');
'yvaxyvfg16'.replace(re15, ''); 'yvaxyvfg16'.replace(re15, '');
'zvahf'.replace(re14, ''); 'zvahf'.replace(re14, '');
@ -414,20 +531,25 @@ function runRegExpBenchmark() {
var re47 = /\/\xfc\/t/; var re47 = /\/\xfc\/t/;
var re48 = /\W/g; var re48 = /\W/g;
var re49 = /uers|fep|fglyr/; var re49 = /uers|fep|fglyr/;
var s79 = computeInputVariants(str21, 16);
var s80 = computeInputVariants(str22, 16);
var s81 = computeInputVariants(str23, 16);
var s82 = computeInputVariants(str26, 16);
function runBlock4() { function runBlock4() {
for (var i = 0; i < 16; i++) { for (var i = 0; i < 16; i++) {
''.replace(/\*/g, ''); ''.replace(/\*/g, '');
/\bnpgvir\b/.exec('npgvir'); /\bnpgvir\b/.exec('npgvir');
/sversbk/i.exec(str0); /sversbk/i.exec(s15[i]);
re36.exec('glcr'); re36.exec('glcr');
/zfvr/i.exec(str0); /zfvr/i.exec(s15[i]);
/bcren/i.exec(str0); /bcren/i.exec(s15[i]);
} }
for (var i = 0; i < 15; i++) { for (var i = 0; i < 15; i++) {
str21.split(re32); s79[i].split(re32);
str22.split(re32); s80[i].split(re32);
'uggc://ohyyrgvaf.zlfcnpr.pbz/vaqrk.psz'.replace(re12, ''); 'uggc://ohyyrgvaf.zlfcnpr.pbz/vaqrk.psz'.replace(re12, '');
str23.replace(re33, ''); s81[i].replace(re33, '');
'yv'.replace(re37, ''); 'yv'.replace(re37, '');
'yv'.replace(re18, ''); 'yv'.replace(re18, '');
re8.exec('144631658.0.10.1231367822'); re8.exec('144631658.0.10.1231367822');
@ -438,9 +560,9 @@ function runRegExpBenchmark() {
re8.exec('__hgzn=144631658.4127520630321984500.1231367822.1231367822.1231367822.1'); re8.exec('__hgzn=144631658.4127520630321984500.1231367822.1231367822.1231367822.1');
re8.exec('__hgzo=144631658.0.10.1231367822'); re8.exec('__hgzo=144631658.0.10.1231367822');
re8.exec('__hgzm=144631658.1231367822.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)'); re8.exec('__hgzm=144631658.1231367822.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)');
re34.exec(str21); re34.exec(s79[i]);
re34.exec(str22); re34.exec(s80[i]);
/\.([\w-]+)|\[(\w+)(?:([!*^$~|]?=)["']?(.*?)["']?)?\]|:([\w-]+)(?:\(["']?(.*?)?["']?\)|$)/g.exec(str26); /\.([\w-]+)|\[(\w+)(?:([!*^$~|]?=)["']?(.*?)["']?)?\]|:([\w-]+)(?:\(["']?(.*?)?["']?\)|$)/g.exec(s82[i]);
re13.exec('uggc://ohyyrgvaf.zlfcnpr.pbz/vaqrk.psz'); re13.exec('uggc://ohyyrgvaf.zlfcnpr.pbz/vaqrk.psz');
re38.exec('yv'); re38.exec('yv');
} }
@ -502,8 +624,8 @@ function runRegExpBenchmark() {
'fhozvg'.replace(re14, ''); 'fhozvg'.replace(re14, '');
'fhozvg'.replace(re15, ''); 'fhozvg'.replace(re15, '');
re50.exec(''); re50.exec('');
/NccyrJroXvg\/([^\s]*)/.exec(str0); /NccyrJroXvg\/([^\s]*)/.exec(s15[i]);
/XUGZY/.exec(str0); /XUGZY/.exec(s15[i]);
} }
for (var i = 0; i < 12; i++) { for (var i = 0; i < 12; i++) {
'${cebg}://${ubfg}${cngu}/${dz}'.replace(/(\$\{cebg\})|(\$cebg\b)/g, ''); '${cebg}://${ubfg}${cngu}/${dz}'.replace(/(\$\{cebg\})|(\$cebg\b)/g, '');
@ -518,7 +640,7 @@ function runRegExpBenchmark() {
'9.0 e115'.replace(/^.*e(.*)$/, ''); '9.0 e115'.replace(/^.*e(.*)$/, '');
'<!-- ${nqiHey} -->'.replace(re55, ''); '<!-- ${nqiHey} -->'.replace(re55, '');
'<fpevcg glcr="grkg/wninfpevcg" fep="${nqiHey}"></fpevcg>'.replace(re55, ''); '<fpevcg glcr="grkg/wninfpevcg" fep="${nqiHey}"></fpevcg>'.replace(re55, '');
str1.replace(/^.*\s+(\S+\s+\S+$)/, ''); s21[i].replace(/^.*\s+(\S+\s+\S+$)/, '');
'tzk%2Subzrcntr%2Sfgneg%2Sqr%2S'.replace(re30, ''); 'tzk%2Subzrcntr%2Sfgneg%2Sqr%2S'.replace(re30, '');
'tzk'.replace(re30, ''); 'tzk'.replace(re30, '');
'uggc://${ubfg}${cngu}/${dz}'.replace(/(\$\{ubfg\})|(\$ubfg\b)/g, ''); 'uggc://${ubfg}${cngu}/${dz}'.replace(/(\$\{ubfg\})|(\$ubfg\b)/g, '');
@ -549,61 +671,70 @@ function runRegExpBenchmark() {
var re62 = /^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/; var re62 = /^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/;
var str34 = '${1}://${2}${3}${4}${5}'; var str34 = '${1}://${2}${3}${4}${5}';
var str35 = ' O=6gnyg0g4znrrn&o=3&f=gc; Q=_lyu=K3bQZGSxnT4lZzD3OS9GNmV3ZGLkAQxRpTyxNmRlZmRmAmNkAQLRqTImqNZjOUEgpTjQnJ5xMKtgoN--; SCF=qy'; var str35 = ' O=6gnyg0g4znrrn&o=3&f=gc; Q=_lyu=K3bQZGSxnT4lZzD3OS9GNmV3ZGLkAQxRpTyxNmRlZmRmAmNkAQLRqTImqNZjOUEgpTjQnJ5xMKtgoN--; SCF=qy';
var s83 = computeInputVariants(str27, 11);
var s84 = computeInputVariants(str28, 11);
var s85 = computeInputVariants(str29, 11);
var s86 = computeInputVariants(str30, 11);
var s87 = computeInputVariants(str31, 11);
var s88 = computeInputVariants(str32, 11);
var s89 = computeInputVariants(str33, 11);
var s90 = computeInputVariants(str34, 11);
function runBlock6() { function runBlock6() {
for (var i = 0; i < 11; i++) { for (var i = 0; i < 11; i++) {
str27.replace(/##yv0##/gi, ''); s83[i].replace(/##yv0##/gi, '');
str27.replace(re57, ''); s83[i].replace(re57, '');
str28.replace(re58, ''); s84[i].replace(re58, '');
str29.replace(re59, ''); s85[i].replace(re59, '');
str30.replace(/##\/o##/gi, ''); s86[i].replace(/##\/o##/gi, '');
str30.replace(/##\/v##/gi, ''); s86[i].replace(/##\/v##/gi, '');
str30.replace(/##\/h##/gi, ''); s86[i].replace(/##\/h##/gi, '');
str30.replace(/##o##/gi, ''); s86[i].replace(/##o##/gi, '');
str30.replace(/##oe##/gi, ''); s86[i].replace(/##oe##/gi, '');
str30.replace(/##v##/gi, ''); s86[i].replace(/##v##/gi, '');
str30.replace(/##h##/gi, ''); s86[i].replace(/##h##/gi, '');
str31.replace(/##n##/gi, ''); s87[i].replace(/##n##/gi, '');
str32.replace(/##\/n##/gi, ''); s88[i].replace(/##\/n##/gi, '');
str33.replace(/#~#argjbexybtb#~#/g, ''); s89[i].replace(/#~#argjbexybtb#~#/g, '');
/ Zbovyr\//.exec(str0); / Zbovyr\//.exec(s15[i]);
/##yv1##/gi.exec(str27); /##yv1##/gi.exec(s83[i]);
/##yv10##/gi.exec(str28); /##yv10##/gi.exec(s84[i]);
/##yv11##/gi.exec(str28); /##yv11##/gi.exec(s84[i]);
/##yv12##/gi.exec(str28); /##yv12##/gi.exec(s84[i]);
/##yv13##/gi.exec(str28); /##yv13##/gi.exec(s84[i]);
/##yv14##/gi.exec(str28); /##yv14##/gi.exec(s84[i]);
/##yv15##/gi.exec(str28); /##yv15##/gi.exec(s84[i]);
re58.exec(str28); re58.exec(s84[i]);
/##yv17##/gi.exec(str29); /##yv17##/gi.exec(s85[i]);
/##yv18##/gi.exec(str29); /##yv18##/gi.exec(s85[i]);
re59.exec(str29); re59.exec(s85[i]);
/##yv2##/gi.exec(str27); /##yv2##/gi.exec(s83[i]);
/##yv20##/gi.exec(str30); /##yv20##/gi.exec(s86[i]);
/##yv21##/gi.exec(str30); /##yv21##/gi.exec(s86[i]);
/##yv22##/gi.exec(str30); /##yv22##/gi.exec(s86[i]);
/##yv23##/gi.exec(str30); /##yv23##/gi.exec(s86[i]);
/##yv3##/gi.exec(str27); /##yv3##/gi.exec(s83[i]);
re57.exec(str27); re57.exec(s83[i]);
/##yv5##/gi.exec(str28); /##yv5##/gi.exec(s84[i]);
/##yv6##/gi.exec(str28); /##yv6##/gi.exec(s84[i]);
/##yv7##/gi.exec(str28); /##yv7##/gi.exec(s84[i]);
/##yv8##/gi.exec(str28); /##yv8##/gi.exec(s84[i]);
/##yv9##/gi.exec(str28); /##yv9##/gi.exec(s84[i]);
re8.exec('473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29'); re8.exec('473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29');
re8.exec('SbeprqRkcvengvba=633669325184628362'); re8.exec('SbeprqRkcvengvba=633669325184628362');
re8.exec('FrffvbaQQS2=473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29'); re8.exec('FrffvbaQQS2=473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29');
/AbxvnA[^\/]*/.exec(str0); /AbxvnA[^\/]*/.exec(s15[i]);
} }
for (var i = 0; i < 10; i++) { for (var i = 0; i < 10; i++) {
' bss'.replace(/(?:^|\s+)bss(?:\s+|$)/g, ''); ' bss'.replace(/(?:^|\s+)bss(?:\s+|$)/g, '');
str34.replace(/(\$\{0\})|(\$0\b)/g, ''); s90[i].replace(/(\$\{0\})|(\$0\b)/g, '');
str34.replace(/(\$\{1\})|(\$1\b)/g, ''); s90[i].replace(/(\$\{1\})|(\$1\b)/g, '');
str34.replace(/(\$\{pbzcyrgr\})|(\$pbzcyrgr\b)/g, ''); s90[i].replace(/(\$\{pbzcyrgr\})|(\$pbzcyrgr\b)/g, '');
str34.replace(/(\$\{sentzrag\})|(\$sentzrag\b)/g, ''); s90[i].replace(/(\$\{sentzrag\})|(\$sentzrag\b)/g, '');
str34.replace(/(\$\{ubfgcbeg\})|(\$ubfgcbeg\b)/g, ''); s90[i].replace(/(\$\{ubfgcbeg\})|(\$ubfgcbeg\b)/g, '');
str34.replace(re56, ''); s90[i].replace(re56, '');
str34.replace(/(\$\{cebgbpby\})|(\$cebgbpby\b)/g, ''); s90[i].replace(/(\$\{cebgbpby\})|(\$cebgbpby\b)/g, '');
str34.replace(/(\$\{dhrel\})|(\$dhrel\b)/g, ''); s90[i].replace(/(\$\{dhrel\})|(\$dhrel\b)/g, '');
'nqfvmr'.replace(re29, ''); 'nqfvmr'.replace(re29, '');
'nqfvmr'.replace(re30, ''); 'nqfvmr'.replace(re30, '');
'uggc://${2}${3}${4}${5}'.replace(/(\$\{2\})|(\$2\b)/g, ''); 'uggc://${2}${3}${4}${5}'.replace(/(\$\{2\})|(\$2\b)/g, '');
@ -629,7 +760,7 @@ function runRegExpBenchmark() {
re9.exec('zrqvgobk'); re9.exec('zrqvgobk');
re9.exec('hsgy'); re9.exec('hsgy');
re9.exec('lhv-h'); re9.exec('lhv-h');
/Fnsnev|Xbadhrebe|XUGZY/gi.exec(str0); /Fnsnev|Xbadhrebe|XUGZY/gi.exec(s15[i]);
re61.exec('uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/onfr.wf'); re61.exec('uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/onfr.wf');
re62.exec('#Ybtva_rznvy'); re62.exec('#Ybtva_rznvy');
} }
@ -640,6 +771,9 @@ function runRegExpBenchmark() {
var str38 = 'uggc://tbbtyrnqf.t.qbhoyrpyvpx.arg/cntrnq/nqf?pyvrag=pn-svz_zlfcnpr_zlfcnpr-ubzrcntr_wf&qg=1231364057761&uy=ra&nqfnsr=uvtu&br=hgs8&ahz_nqf=4&bhgchg=wf&nqgrfg=bss&pbeeryngbe=1231364057761&punaary=svz_zlfcnpr_ubzrcntr_abgybttrqva%2Psvz_zlfcnpr_aba_HTP%2Psvz_zlfcnpr_havgrq-fgngrf&hey=uggc%3N%2S%2Ssevraqf.zlfcnpr.pbz%2Svaqrk.psz&nq_glcr=grkg&rvq=6083027&rn=0&sez=0&tn_ivq=1667363813.1231364061&tn_fvq=1231364061&tn_uvq=1917563877&synfu=9.0.115&h_u=768&h_j=1024&h_nu=738&h_nj=1024&h_pq=24&h_gm=-480&h_uvf=2&h_wnin=gehr&h_acyht=7&h_azvzr=22'; var str38 = 'uggc://tbbtyrnqf.t.qbhoyrpyvpx.arg/cntrnq/nqf?pyvrag=pn-svz_zlfcnpr_zlfcnpr-ubzrcntr_wf&qg=1231364057761&uy=ra&nqfnsr=uvtu&br=hgs8&ahz_nqf=4&bhgchg=wf&nqgrfg=bss&pbeeryngbe=1231364057761&punaary=svz_zlfcnpr_ubzrcntr_abgybttrqva%2Psvz_zlfcnpr_aba_HTP%2Psvz_zlfcnpr_havgrq-fgngrf&hey=uggc%3N%2S%2Ssevraqf.zlfcnpr.pbz%2Svaqrk.psz&nq_glcr=grkg&rvq=6083027&rn=0&sez=0&tn_ivq=1667363813.1231364061&tn_fvq=1231364061&tn_uvq=1917563877&synfu=9.0.115&h_u=768&h_j=1024&h_nu=738&h_nj=1024&h_pq=24&h_gm=-480&h_uvf=2&h_wnin=gehr&h_acyht=7&h_azvzr=22';
var str39 = 'ZFPhygher=VC=74.125.75.20&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&Pbhagel=IIZ%3Q&SbeprqRkcvengvba=633669321699093060&gvzrMbar=-8&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R%3Q'; var str39 = 'ZFPhygher=VC=74.125.75.20&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&Pbhagel=IIZ%3Q&SbeprqRkcvengvba=633669321699093060&gvzrMbar=-8&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R%3Q';
var str40 = 'ZFPhygher=VC=74.125.75.20&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&CersreerqPhygherCraqvat=&Pbhagel=IIZ=&SbeprqRkcvengvba=633669321699093060&gvzrMbar=0&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R='; var str40 = 'ZFPhygher=VC=74.125.75.20&VCPhygher=ra-HF&CersreerqPhygher=ra-HF&CersreerqPhygherCraqvat=&Pbhagel=IIZ=&SbeprqRkcvengvba=633669321699093060&gvzrMbar=0&HFEYBP=DKWyLHAiMTH9AwHjWxAcqUx9GJ91oaEunJ4tIzyyqlMQo3IhqUW5D29xMG1IHlMQo3IhqUW5GzSgMG1Iozy0MJDtH3EuqTImWxEgLHAiMTH9BQN3WxkuqTy0qJEyCGZ3YwDkBGVzGT9hM2y0qJEyCF0kZwVhZQH3APMDo3A0LJkQo2EyCGx0ZQDmWyWyM2yiox5uoJH9D0R=';
var s91 = computeInputVariants(str36, 9);
var s92 = computeInputVariants(str37, 9);
var s93 = computeInputVariants(str38, 9);
function runBlock7() { function runBlock7() {
for (var i = 0; i < 9; i++) { for (var i = 0; i < 9; i++) {
'0'.replace(re40, ''); '0'.replace(re40, '');
@ -660,15 +794,15 @@ function runRegExpBenchmark() {
for (var i = 0; i < 8; i++) { for (var i = 0; i < 8; i++) {
'Pybfr {0}'.replace(re63, ''); 'Pybfr {0}'.replace(re63, '');
'Bcra {0}'.replace(re63, ''); 'Bcra {0}'.replace(re63, '');
str36.split(re32); s91[i].split(re32);
str37.split(re32); s92[i].split(re32);
'puvyq p1 svefg gnournqref'.replace(re14, ''); 'puvyq p1 svefg gnournqref'.replace(re14, '');
'puvyq p1 svefg gnournqref'.replace(re15, ''); 'puvyq p1 svefg gnournqref'.replace(re15, '');
'uqy_fcb'.replace(re14, ''); 'uqy_fcb'.replace(re14, '');
'uqy_fcb'.replace(re15, ''); 'uqy_fcb'.replace(re15, '');
'uvag'.replace(re14, ''); 'uvag'.replace(re14, '');
'uvag'.replace(re15, ''); 'uvag'.replace(re15, '');
str38.replace(re33, ''); s93[i].replace(re33, '');
'yvfg'.replace(re14, ''); 'yvfg'.replace(re14, '');
'yvfg'.replace(re15, ''); 'yvfg'.replace(re15, '');
'at_bhgre'.replace(re30, ''); 'at_bhgre'.replace(re30, '');
@ -697,8 +831,8 @@ function runRegExpBenchmark() {
re8.exec('__hgzo=144631658.0.10.1231364074'); re8.exec('__hgzo=144631658.0.10.1231364074');
re8.exec('__hgzm=144631658.1231364074.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)'); re8.exec('__hgzm=144631658.1231364074.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)');
re8.exec('p98s8o9q42nr21or1r61pqorn1n002nsss569635984s6qp7'); re8.exec('p98s8o9q42nr21or1r61pqorn1n002nsss569635984s6qp7');
re34.exec(str36); re34.exec(s91[i]);
re34.exec(str37); re34.exec(s92[i]);
} }
} }
var re64 = /\b[a-z]/g; var re64 = /\b[a-z]/g;
@ -707,7 +841,7 @@ function runRegExpBenchmark() {
var str41 = 'uggc://cebsvyr.zlfcnpr.pbz/Zbqhyrf/Nccyvpngvbaf/Cntrf/Pnainf.nfck'; var str41 = 'uggc://cebsvyr.zlfcnpr.pbz/Zbqhyrf/Nccyvpngvbaf/Cntrf/Pnainf.nfck';
function runBlock8() { function runBlock8() {
for (var i = 0; i < 7; i++) { for (var i = 0; i < 7; i++) {
str1.match(/\d+/g); s21[i].match(/\d+/g);
'nsgre'.replace(re64, ''); 'nsgre'.replace(re64, '');
'orsber'.replace(re64, ''); 'orsber'.replace(re64, '');
'obggbz'.replace(re64, ''); 'obggbz'.replace(re64, '');
@ -741,9 +875,9 @@ function runRegExpBenchmark() {
re19.exec('gno6'); re19.exec('gno6');
re19.exec('gno7'); re19.exec('gno7');
re19.exec('gno8'); re19.exec('gno8');
/NqborNVE\/([^\s]*)/.exec(str0); /NqborNVE\/([^\s]*)/.exec(s15[i]);
/NccyrJroXvg\/([^ ]*)/.exec(str0); /NccyrJroXvg\/([^ ]*)/.exec(s15[i]);
/XUGZY/gi.exec(str0); /XUGZY/gi.exec(s15[i]);
/^(?:obql|ugzy)$/i.exec('YV'); /^(?:obql|ugzy)$/i.exec('YV');
re38.exec('ohggba'); re38.exec('ohggba');
re38.exec('vachg'); re38.exec('vachg');
@ -774,14 +908,14 @@ function runRegExpBenchmark() {
'freivpr'.replace(re46, ''); 'freivpr'.replace(re46, '');
'freivpr'.replace(re47, ''); 'freivpr'.replace(re47, '');
'freivpr'.replace(re48, ''); 'freivpr'.replace(re48, '');
/((ZFVR\s+([6-9]|\d\d)\.))/.exec(str0); /((ZFVR\s+([6-9]|\d\d)\.))/.exec(s15[i]);
re66.exec(''); re66.exec('');
re50.exec('fryrpgrq'); re50.exec('fryrpgrq');
re8.exec('8sqq78r9n442851q565599o401385sp3s04r92rnn7o19ssn'); re8.exec('8sqq78r9n442851q565599o401385sp3s04r92rnn7o19ssn');
re8.exec('SbeprqRkcvengvba=633669340386893867'); re8.exec('SbeprqRkcvengvba=633669340386893867');
re8.exec('VC=74.125.75.17'); re8.exec('VC=74.125.75.17');
re8.exec('FrffvbaQQS2=8sqq78r9n442851q565599o401385sp3s04r92rnn7o19ssn'); re8.exec('FrffvbaQQS2=8sqq78r9n442851q565599o401385sp3s04r92rnn7o19ssn');
/Xbadhrebe|Fnsnev|XUGZY/.exec(str0); /Xbadhrebe|Fnsnev|XUGZY/.exec(s15[i]);
re13.exec(str41); re13.exec(str41);
re49.exec('unfsbphf'); re49.exec('unfsbphf');
} }
@ -826,12 +960,23 @@ function runRegExpBenchmark() {
var str61 = 'uggc://gx2.fgp.f-zfa.pbz/oe/uc/11/ra-hf/pff/v/g.tvs#uggc://gx2.fgo.f-zfa.pbz/v/29/4RQP4969777N048NPS4RRR3PO2S7S.wct'; var str61 = 'uggc://gx2.fgp.f-zfa.pbz/oe/uc/11/ra-hf/pff/v/g.tvs#uggc://gx2.fgo.f-zfa.pbz/v/29/4RQP4969777N048NPS4RRR3PO2S7S.wct';
var str62 = 'uggc://gx2.fgp.f-zfa.pbz/oe/uc/11/ra-hf/pff/v/g.tvs#uggc://gx2.fgo.f-zfa.pbz/v/OQ/63NP9O94NS5OQP1249Q9S1ROP7NS3.wct'; var str62 = 'uggc://gx2.fgp.f-zfa.pbz/oe/uc/11/ra-hf/pff/v/g.tvs#uggc://gx2.fgo.f-zfa.pbz/v/OQ/63NP9O94NS5OQP1249Q9S1ROP7NS3.wct';
var str63 = 'zbmvyyn/5.0 (jvaqbjf; h; jvaqbjf ag 5.1; ra-hf) nccyrjroxvg/528.9 (xugzy, yvxr trpxb) puebzr/2.0.157.0 fnsnev/528.9'; var str63 = 'zbmvyyn/5.0 (jvaqbjf; h; jvaqbjf ag 5.1; ra-hf) nccyrjroxvg/528.9 (xugzy, yvxr trpxb) puebzr/2.0.157.0 fnsnev/528.9';
var s94 = computeInputVariants(str42, 5);
var s95 = computeInputVariants(str43, 5);
var s96 = computeInputVariants(str44, 5);
var s97 = computeInputVariants(str47, 5);
var s98 = computeInputVariants(str48, 5);
var s99 = computeInputVariants(str49, 5);
var s100 = computeInputVariants(str50, 5);
var s101 = computeInputVariants(str51, 5);
var s102 = computeInputVariants(str52, 5);
var s103 = computeInputVariants(str53, 5);
function runBlock9() { function runBlock9() {
for (var i = 0; i < 5; i++) { for (var i = 0; i < 5; i++) {
str42.split(re32); s94[i].split(re32);
str43.split(re32); s95[i].split(re32);
'svz_zlfcnpr_hfre-ivrj-pbzzragf,svz_zlfcnpr_havgrq-fgngrf'.split(re20); 'svz_zlfcnpr_hfre-ivrj-pbzzragf,svz_zlfcnpr_havgrq-fgngrf'.split(re20);
str44.replace(re33, ''); s96[i].replace(re33, '');
'zrah_arj zrah_arj_gbttyr zrah_gbttyr'.replace(re67, ''); 'zrah_arj zrah_arj_gbttyr zrah_gbttyr'.replace(re67, '');
'zrah_byq zrah_byq_gbttyr zrah_gbttyr'.replace(re67, ''); 'zrah_byq zrah_byq_gbttyr zrah_gbttyr'.replace(re67, '');
re8.exec('102n9o0o9pq60132qn0337rr867p75953502q2s27s2s5r98'); re8.exec('102n9o0o9pq60132qn0337rr867p75953502q2s27s2s5r98');
@ -855,12 +1000,12 @@ function runRegExpBenchmark() {
' yvfg2'.replace(re15, ''); ' yvfg2'.replace(re15, '');
' frneputebhc1'.replace(re14, ''); ' frneputebhc1'.replace(re14, '');
' frneputebhc1'.replace(re15, ''); ' frneputebhc1'.replace(re15, '');
str47.replace(re68, ''); s97[i].replace(re68, '');
str47.replace(re18, ''); s97[i].replace(re18, '');
''.replace(/&/g, ''); ''.replace(/&/g, '');
''.replace(re35, ''); ''.replace(re35, '');
'(..-{0})(\|(\d+)|)'.replace(re63, ''); '(..-{0})(\|(\d+)|)'.replace(re63, '');
str48.replace(re18, ''); s98[i].replace(re18, '');
'//vzt.jro.qr/vij/FC/${cngu}/${anzr}/${inyhr}?gf=${abj}'.replace(re56, ''); '//vzt.jro.qr/vij/FC/${cngu}/${anzr}/${inyhr}?gf=${abj}'.replace(re56, '');
'//vzt.jro.qr/vij/FC/tzk_uc/${anzr}/${inyhr}?gf=${abj}'.replace(/(\$\{anzr\})|(\$anzr\b)/g, ''); '//vzt.jro.qr/vij/FC/tzk_uc/${anzr}/${inyhr}?gf=${abj}'.replace(/(\$\{anzr\})|(\$anzr\b)/g, '');
'<fcna pynff="urnq"><o>Jvaqbjf Yvir Ubgznvy</o></fcna><fcna pynff="zft">{1}</fcna>'.replace(re69, ''); '<fcna pynff="urnq"><o>Jvaqbjf Yvir Ubgznvy</o></fcna><fcna pynff="zft">{1}</fcna>'.replace(re69, '');
@ -872,8 +1017,8 @@ function runRegExpBenchmark() {
'Zncf'.replace(re15, ''); 'Zncf'.replace(re15, '');
'Zbq-Vasb-Vasb-WninFpevcgUvag'.replace(re39, ''); 'Zbq-Vasb-Vasb-WninFpevcgUvag'.replace(re39, '');
'Arjf'.replace(re15, ''); 'Arjf'.replace(re15, '');
str49.split(re32); s99[i].split(re32);
str50.split(re32); s100[i].split(re32);
'Ivqrb'.replace(re15, ''); 'Ivqrb'.replace(re15, '');
'Jro'.replace(re15, ''); 'Jro'.replace(re15, '');
'n'.replace(re39, ''); 'n'.replace(re39, '');
@ -907,17 +1052,17 @@ function runRegExpBenchmark() {
'uc_fubccvatobk'.replace(re30, ''); 'uc_fubccvatobk'.replace(re30, '');
'ugzy%2Rvq'.replace(re29, ''); 'ugzy%2Rvq'.replace(re29, '');
'ugzy%2Rvq'.replace(re30, ''); 'ugzy%2Rvq'.replace(re30, '');
str51.replace(re33, ''); s101[i].replace(re33, '');
'uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/cebgbglcr.wf${4}${5}'.replace(re71, ''); 'uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/cebgbglcr.wf${4}${5}'.replace(re71, '');
'uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/cebgbglcr.wf${5}'.replace(re72, ''); 'uggc://wf.hv-cbegny.qr/tzk/ubzr/wf/20080602/cebgbglcr.wf${5}'.replace(re72, '');
str52.replace(re73, ''); s102[i].replace(re73, '');
'uggc://zfacbegny.112.2b7.arg/o/ff/zfacbegnyubzr/1/U.7-cqi-2/f55332979829981?[NDO]&{1}&{2}&[NDR]'.replace(re69, ''); 'uggc://zfacbegny.112.2b7.arg/o/ff/zfacbegnyubzr/1/U.7-cqi-2/f55332979829981?[NDO]&{1}&{2}&[NDR]'.replace(re69, '');
'vztZFSG'.replace(re14, ''); 'vztZFSG'.replace(re14, '');
'vztZFSG'.replace(re15, ''); 'vztZFSG'.replace(re15, '');
'zfasbbg1 ps'.replace(re14, ''); 'zfasbbg1 ps'.replace(re14, '');
'zfasbbg1 ps'.replace(re15, ''); 'zfasbbg1 ps'.replace(re15, '');
str53.replace(re14, ''); s103[i].replace(re14, '');
str53.replace(re15, ''); s103[i].replace(re15, '');
'cnerag puebzr6 fvatyr1 gno fryrpgrq ovaq'.replace(re14, ''); 'cnerag puebzr6 fvatyr1 gno fryrpgrq ovaq'.replace(re14, '');
'cnerag puebzr6 fvatyr1 gno fryrpgrq ovaq'.replace(re15, ''); 'cnerag puebzr6 fvatyr1 gno fryrpgrq ovaq'.replace(re15, '');
'cevznel'.replace(re14, ''); 'cevznel'.replace(re14, '');
@ -945,11 +1090,11 @@ function runRegExpBenchmark() {
re8.exec('__hgzn=144631658.2770915348920628700.1231367708.1231367708.1231367708.1'); re8.exec('__hgzn=144631658.2770915348920628700.1231367708.1231367708.1231367708.1');
re8.exec('__hgzo=144631658.0.10.1231367708'); re8.exec('__hgzo=144631658.0.10.1231367708');
re8.exec('__hgzm=144631658.1231367708.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)'); re8.exec('__hgzm=144631658.1231367708.1.1.hgzpfe=(qverpg)|hgzppa=(qverpg)|hgzpzq=(abar)');
re34.exec(str49); re34.exec(s99[i]);
re34.exec(str50); re34.exec(s100[i]);
/ZFVR\s+5[.]01/.exec(str0); /ZFVR\s+5[.]01/.exec(s15[i]);
/HF(?=;)/i.exec(str56); /HF(?=;)/i.exec(str56);
re74.exec(str47); re74.exec(s97[i]);
re28.exec('svefg npgvir svefgNpgvir'); re28.exec('svefg npgvir svefgNpgvir');
re28.exec('ynfg'); re28.exec('ynfg');
/\bp:(..)/i.exec('m:94043|yn:37.4154|yb:-122.0585|p:HF'); /\bp:(..)/i.exec('m:94043|yn:37.4154|yb:-122.0585|p:HF');
@ -967,15 +1112,15 @@ function runRegExpBenchmark() {
re79.exec(str60); re79.exec(str60);
re79.exec(str59); re79.exec(str59);
/\|p:([a-z]{2})/i.exec('m:94043|yn:37.4154|yb:-122.0585|p:HF|ue:1'); /\|p:([a-z]{2})/i.exec('m:94043|yn:37.4154|yb:-122.0585|p:HF|ue:1');
re80.exec(str47); re80.exec(s97[i]);
re61.exec('cebgbglcr.wf'); re61.exec('cebgbglcr.wf');
re68.exec(str47); re68.exec(s97[i]);
re81.exec(str47); re81.exec(s97[i]);
re82.exec(str47); re82.exec(s97[i]);
/^Fubpxjnir Synfu (\d)/.exec(str1); /^Fubpxjnir Synfu (\d)/.exec(s21[i]);
/^Fubpxjnir Synfu (\d+)/.exec(str1); /^Fubpxjnir Synfu (\d+)/.exec(s21[i]);
re83.exec('[bowrpg tybony]'); re83.exec('[bowrpg tybony]');
re62.exec(str47); re62.exec(s97[i]);
re84.exec(str61); re84.exec(str61);
re84.exec(str62); re84.exec(str62);
/jroxvg/.exec(str63); /jroxvg/.exec(str63);
@ -1597,6 +1742,8 @@ function runRegExpBenchmark() {
/jvaqbjf/.exec(str63); /jvaqbjf/.exec(str63);
} }
} }
function run() {
for (var i = 0; i < 5; i++) { for (var i = 0; i < 5; i++) {
runBlock0(); runBlock0();
runBlock1(); runBlock1();
@ -1612,3 +1759,6 @@ function runRegExpBenchmark() {
runBlock11(); runBlock11();
} }
} }
this.run = run;
}

4
deps/v8/benchmarks/revisions.html

@ -26,7 +26,9 @@ the benchmark suite.
typos in the DeltaBlue implementation. Changed the Splay benchmark to typos in the DeltaBlue implementation. Changed the Splay benchmark to
avoid converting the same numeric key to a string over and over again avoid converting the same numeric key to a string over and over again
and to avoid inserting and removing the same element repeatedly thus and to avoid inserting and removing the same element repeatedly thus
increasing pressure on the memory subsystem.</p> increasing pressure on the memory subsystem. Changed the RegExp
benchmark to exercise the regular expression engine on different input
strings.</p>
<p>Furthermore, the benchmark runner was changed to run the benchmarks <p>Furthermore, the benchmark runner was changed to run the benchmarks
for at least a few times to stabilize the reported numbers on slower for at least a few times to stabilize the reported numbers on slower

2
deps/v8/benchmarks/run.html

@ -114,7 +114,7 @@ higher scores means better performance: <em>Bigger is better!</em>
<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>904 lines</i>).</li> <li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>904 lines</i>).</li>
<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4684 lines</i>).</li> <li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4684 lines</i>).</li>
<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages <li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>). (<i>1761 lines</i>).
</li> </li>
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li> <li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>394 lines</i>).</li>
</ul> </ul>

5
deps/v8/include/v8-debug.h

@ -253,9 +253,12 @@ class EXPORT Debug {
static bool SetDebugEventListener(v8::Handle<v8::Object> that, static bool SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data = Handle<Value>()); Handle<Value> data = Handle<Value>());
// Break execution of JavaScript. // Schedule a debugger break to happen when JavaScript code is run.
static void DebugBreak(); static void DebugBreak();
// Remove scheduled debugger break if it has not happened yet.
static void CancelDebugBreak();
// Break execution of JavaScript (this method can be invoked from a // Break execution of JavaScript (this method can be invoked from a
// non-VM thread) for further client command execution on a VM // non-VM thread) for further client command execution on a VM
// thread. Client data is then passed in EventDetails to // thread. Client data is then passed in EventDetails to

1
deps/v8/src/SConscript

@ -100,6 +100,7 @@ SOURCES = {
serialize.cc serialize.cc
snapshot-common.cc snapshot-common.cc
spaces.cc spaces.cc
string-search.cc
string-stream.cc string-stream.cc
stub-cache.cc stub-cache.cc
token.cc token.cc

87
deps/v8/src/api.cc

@ -134,27 +134,27 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.new_space_size = &new_space_size; heap_stats.new_space_size = &new_space_size;
int new_space_capacity; int new_space_capacity;
heap_stats.new_space_capacity = &new_space_capacity; heap_stats.new_space_capacity = &new_space_capacity;
int old_pointer_space_size; intptr_t old_pointer_space_size;
heap_stats.old_pointer_space_size = &old_pointer_space_size; heap_stats.old_pointer_space_size = &old_pointer_space_size;
int old_pointer_space_capacity; intptr_t old_pointer_space_capacity;
heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity; heap_stats.old_pointer_space_capacity = &old_pointer_space_capacity;
int old_data_space_size; intptr_t old_data_space_size;
heap_stats.old_data_space_size = &old_data_space_size; heap_stats.old_data_space_size = &old_data_space_size;
int old_data_space_capacity; intptr_t old_data_space_capacity;
heap_stats.old_data_space_capacity = &old_data_space_capacity; heap_stats.old_data_space_capacity = &old_data_space_capacity;
int code_space_size; intptr_t code_space_size;
heap_stats.code_space_size = &code_space_size; heap_stats.code_space_size = &code_space_size;
int code_space_capacity; intptr_t code_space_capacity;
heap_stats.code_space_capacity = &code_space_capacity; heap_stats.code_space_capacity = &code_space_capacity;
int map_space_size; intptr_t map_space_size;
heap_stats.map_space_size = &map_space_size; heap_stats.map_space_size = &map_space_size;
int map_space_capacity; intptr_t map_space_capacity;
heap_stats.map_space_capacity = &map_space_capacity; heap_stats.map_space_capacity = &map_space_capacity;
int cell_space_size; intptr_t cell_space_size;
heap_stats.cell_space_size = &cell_space_size; heap_stats.cell_space_size = &cell_space_size;
int cell_space_capacity; intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity; heap_stats.cell_space_capacity = &cell_space_capacity;
int lo_space_size; intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size; heap_stats.lo_space_size = &lo_space_size;
int global_handle_count; int global_handle_count;
heap_stats.global_handle_count = &global_handle_count; heap_stats.global_handle_count = &global_handle_count;
@ -166,9 +166,9 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.near_death_global_handle_count = &near_death_global_handle_count; heap_stats.near_death_global_handle_count = &near_death_global_handle_count;
int destroyed_global_handle_count; int destroyed_global_handle_count;
heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count; heap_stats.destroyed_global_handle_count = &destroyed_global_handle_count;
int memory_allocator_size; intptr_t memory_allocator_size;
heap_stats.memory_allocator_size = &memory_allocator_size; heap_stats.memory_allocator_size = &memory_allocator_size;
int memory_allocator_capacity; intptr_t memory_allocator_capacity;
heap_stats.memory_allocator_capacity = &memory_allocator_capacity; heap_stats.memory_allocator_capacity = &memory_allocator_capacity;
int objects_per_type[LAST_TYPE + 1] = {0}; int objects_per_type[LAST_TYPE + 1] = {0};
heap_stats.objects_per_type = objects_per_type; heap_stats.objects_per_type = objects_per_type;
@ -767,6 +767,12 @@ int TypeSwitch::match(v8::Handle<Value> value) {
} }
#define SET_FIELD_WRAPPED(obj, setter, cdata) do { \
i::Handle<i::Object> proxy = FromCData(cdata); \
(obj)->setter(*proxy); \
} while (false)
void FunctionTemplate::SetCallHandler(InvocationCallback callback, void FunctionTemplate::SetCallHandler(InvocationCallback callback,
v8::Handle<Value> data) { v8::Handle<Value> data) {
if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return; if (IsDeadCheck("v8::FunctionTemplate::SetCallHandler()")) return;
@ -776,7 +782,7 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE); i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj = i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj); i::Handle<i::CallHandlerInfo>::cast(struct_obj);
obj->set_callback(*FromCData(callback)); SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data)); obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_call_code(*obj); Utils::OpenHandle(this)->set_call_code(*obj);
@ -792,8 +798,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::PropertyAttribute attributes) { v8::PropertyAttribute attributes) {
i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo(); i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
ASSERT(getter != NULL); ASSERT(getter != NULL);
obj->set_getter(*FromCData(getter)); SET_FIELD_WRAPPED(obj, set_getter, getter);
obj->set_setter(*FromCData(setter)); SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data)); obj->set_data(*Utils::OpenHandle(*data));
obj->set_name(*Utils::OpenHandle(*name)); obj->set_name(*Utils::OpenHandle(*name));
@ -877,11 +883,13 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE); i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj = i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj); i::Handle<i::InterceptorInfo>::cast(struct_obj);
if (getter != 0) obj->set_getter(*FromCData(getter));
if (setter != 0) obj->set_setter(*FromCData(setter)); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (query != 0) obj->set_query(*FromCData(query)); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
if (remover != 0) obj->set_deleter(*FromCData(remover)); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator)); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data)); obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_named_property_handler(*obj); Utils::OpenHandle(this)->set_named_property_handler(*obj);
@ -905,11 +913,13 @@ void FunctionTemplate::SetIndexedInstancePropertyHandler(
i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE); i::Factory::NewStruct(i::INTERCEPTOR_INFO_TYPE);
i::Handle<i::InterceptorInfo> obj = i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj); i::Handle<i::InterceptorInfo>::cast(struct_obj);
if (getter != 0) obj->set_getter(*FromCData(getter));
if (setter != 0) obj->set_setter(*FromCData(setter)); if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (query != 0) obj->set_query(*FromCData(query)); if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
if (remover != 0) obj->set_deleter(*FromCData(remover)); if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (enumerator != 0) obj->set_enumerator(*FromCData(enumerator)); if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data)); obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_indexed_property_handler(*obj); Utils::OpenHandle(this)->set_indexed_property_handler(*obj);
@ -928,7 +938,7 @@ void FunctionTemplate::SetInstanceCallAsFunctionHandler(
i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE); i::Factory::NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj = i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj); i::Handle<i::CallHandlerInfo>::cast(struct_obj);
obj->set_callback(*FromCData(callback)); SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data)); obj->set_data(*Utils::OpenHandle(*data));
Utils::OpenHandle(this)->set_instance_call_handler(*obj); Utils::OpenHandle(this)->set_instance_call_handler(*obj);
@ -1043,8 +1053,10 @@ void ObjectTemplate::SetAccessCheckCallbacks(
i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE); i::Factory::NewStruct(i::ACCESS_CHECK_INFO_TYPE);
i::Handle<i::AccessCheckInfo> info = i::Handle<i::AccessCheckInfo> info =
i::Handle<i::AccessCheckInfo>::cast(struct_info); i::Handle<i::AccessCheckInfo>::cast(struct_info);
info->set_named_callback(*FromCData(named_callback));
info->set_indexed_callback(*FromCData(indexed_callback)); SET_FIELD_WRAPPED(info, set_named_callback, named_callback);
SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback);
if (data.IsEmpty()) data = v8::Undefined(); if (data.IsEmpty()) data = v8::Undefined();
info->set_data(*Utils::OpenHandle(*data)); info->set_data(*Utils::OpenHandle(*data));
@ -2646,8 +2658,9 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return; return;
} }
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data); i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
self->set_map( i::Handle<i::Map> slow_map =
*i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()))); i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*slow_map);
self->set_elements(*pixels); self->set_elements(*pixels);
} }
@ -2701,8 +2714,9 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
} }
i::Handle<i::ExternalArray> array = i::Handle<i::ExternalArray> array =
i::Factory::NewExternalArray(length, array_type, data); i::Factory::NewExternalArray(length, array_type, data);
self->set_map( i::Handle<i::Map> slow_map =
*i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()))); i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*slow_map);
self->set_elements(*array); self->set_elements(*array);
} }
@ -4251,6 +4265,11 @@ void Debug::DebugBreak() {
} }
void Debug::CancelDebugBreak() {
i::StackGuard::Continue(i::DEBUGBREAK);
}
void Debug::DebugBreakForCommand(ClientData* data) { void Debug::DebugBreakForCommand(ClientData* data) {
if (!i::V8::IsRunning()) return; if (!i::V8::IsRunning()) return;
i::Debugger::EnqueueDebugCommand(data); i::Debugger::EnqueueDebugCommand(data);
@ -4433,7 +4452,7 @@ double CpuProfileNode::GetSelfSamplesCount() const {
unsigned CpuProfileNode::GetCallUid() const { unsigned CpuProfileNode::GetCallUid() const {
IsDeadCheck("v8::CpuProfileNode::GetCallUid"); IsDeadCheck("v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid(); return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
} }

65
deps/v8/src/arm/builtins-arm.cc

@ -521,7 +521,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm, static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function) { bool is_api_function,
bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -530,9 +534,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(r0); // Smi-tagged arguments count. __ push(r0); // Smi-tagged arguments count.
__ push(r1); // Constructor function. __ push(r1); // Constructor function.
// Use r7 for holding undefined which is used in several places below.
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
// Try to allocate the object without transitioning into C code. If any of the // Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call. // preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated; Label rt_call, allocated;
@ -549,7 +550,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
// r1: constructor function // r1: constructor function
// r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call); __ b(eq, &rt_call);
@ -561,14 +561,35 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE. // instance type would be JS_FUNCTION_TYPE.
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
// r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call); __ b(eq, &rt_call);
if (count_constructions) {
Label allocate;
// Decrease generous allocation count.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
MemOperand constructor_count =
FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
__ ldrb(r4, constructor_count);
__ sub(r4, r4, Operand(1), SetCC);
__ strb(r4, constructor_count);
__ b(ne, &allocate);
__ Push(r1, r2);
__ push(r1); // constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(r2);
__ pop(r1);
__ bind(&allocate);
}
// Now allocate the JSObject on the heap. // Now allocate the JSObject on the heap.
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS); __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@ -578,7 +599,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map // r2: initial map
// r3: object size // r3: object size
// r4: JSObject (not tagged) // r4: JSObject (not tagged)
// r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4); __ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@ -588,16 +608,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex)); __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Fill all the in-object properties with undefined. // Fill all the in-object properties with the appropriate filler.
// r1: constructor function // r1: constructor function
// r2: initial map // r2: initial map
// r3: object size (in words) // r3: object size (in words)
// r4: JSObject (not tagged) // r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged) // r5: First in-object property of JSObject (not tagged)
// r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry; { Label loop, entry;
if (count_constructions) {
// To allow for truncation.
__ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
} else {
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
}
__ b(&entry); __ b(&entry);
__ bind(&loop); __ bind(&loop);
__ str(r7, MemOperand(r5, kPointerSize, PostIndex)); __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
@ -617,7 +642,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function // r1: constructor function
// r4: JSObject // r4: JSObject
// r5: start of next object (not tagged) // r5: start of next object (not tagged)
// r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset)); __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and // The field instance sizes contains both pre-allocated property fields and
// in-object properties. // in-object properties.
@ -637,7 +661,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array // r3: number of elements in properties array
// r4: JSObject // r4: JSObject
// r5: start of next object // r5: start of next object
// r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize)); __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace( __ AllocateInNewSpace(
r0, r0,
@ -652,7 +675,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array // r3: number of elements in properties array
// r4: JSObject // r4: JSObject
// r5: FixedArray (not tagged) // r5: FixedArray (not tagged)
// r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5); __ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@ -667,10 +689,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array // r3: number of elements in properties array
// r4: JSObject // r4: JSObject
// r5: FixedArray (not tagged) // r5: FixedArray (not tagged)
// r7: undefined
__ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
{ Label loop, entry; { Label loop, entry;
if (count_constructions) {
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
} else if (FLAG_debug_code) {
__ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
__ cmp(r7, r8);
__ Assert(eq, "Undefined value not loaded.");
}
__ b(&entry); __ b(&entry);
__ bind(&loop); __ bind(&loop);
__ str(r7, MemOperand(r2, kPointerSize, PostIndex)); __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
@ -822,13 +850,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} }
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false); Generate_JSConstructStubHelper(masm, false, false);
} }
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true); Generate_JSConstructStubHelper(masm, true, false);
} }

60
deps/v8/src/arm/codegen-arm.cc

@ -246,7 +246,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
frame_->AssertIsSpilled(); frame_->AssertIsSpilled();
for (int i = 0; i < scope()->num_parameters(); i++) { for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i); Variable* par = scope()->parameter(i);
Slot* slot = par->slot(); Slot* slot = par->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
ASSERT(!scope()->is_global_scope()); // No params in global scope. ASSERT(!scope()->is_global_scope()); // No params in global scope.
__ ldr(r1, frame_->ParameterAt(i)); __ ldr(r1, frame_->ParameterAt(i));
@ -270,7 +270,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present. // Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex); frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT); StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
} }
// Initialize the function return target after the locals are set // Initialize the function return target after the locals are set
@ -608,24 +608,24 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->EmitPush(r0); frame_->EmitPush(r0);
} }
Variable* arguments = scope()->arguments()->var(); Variable* arguments = scope()->arguments();
Variable* shadow = scope()->arguments_shadow()->var(); Variable* shadow = scope()->arguments_shadow();
ASSERT(arguments != NULL && arguments->slot() != NULL); ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL); ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
JumpTarget done; JumpTarget done;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has // We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function // already been written to. This can happen if the a function
// has a local variable named 'arguments'. // has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
Register arguments = frame_->PopToRegister(); Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(arguments, ip); __ cmp(arguments, ip);
done.Branch(ne); done.Branch(ne);
} }
StoreToSlot(arguments->slot(), NOT_CONST_INIT); StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
StoreToSlot(shadow->slot(), NOT_CONST_INIT); StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
} }
@ -641,10 +641,10 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
Property property(&global, &key, RelocInfo::kNoPosition); Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property); Reference ref(this, &property);
ref.GetValue(); ref.GetValue();
} else if (variable != NULL && variable->slot() != NULL) { } else if (variable != NULL && variable->AsSlot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate // For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof. // subexpression of a typeof.
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
} else { } else {
// Anything else can be handled normally. // Anything else can be handled normally.
Load(expr); Load(expr);
@ -695,7 +695,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
LoadGlobal(); LoadGlobal();
ref->set_type(Reference::NAMED); ref->set_type(Reference::NAMED);
} else { } else {
ASSERT(var->slot() != NULL); ASSERT(var->AsSlot() != NULL);
ref->set_type(Reference::SLOT); ref->set_type(Reference::SLOT);
} }
} else { } else {
@ -1718,7 +1718,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the // Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here. // expression stack. Avoid allocating the arguments object here.
Load(receiver); Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
// At this point the top two stack elements are probably in registers // At this point the top two stack elements are probably in registers
// since they were just loaded. Ensure they are in regs and get the // since they were just loaded. Ensure they are in regs and get the
@ -1950,7 +1950,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var(); Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile time, // If it was not possible to allocate the variable at compile time,
// we need to "declare" it at runtime to make sure it actually // we need to "declare" it at runtime to make sure it actually
@ -2480,7 +2480,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// the bottom check of the loop condition. // the bottom check of the loop condition.
TypeInfoCodeGenState type_info_scope(this, TypeInfoCodeGenState type_info_scope(this,
node->is_fast_smi_loop() ? node->is_fast_smi_loop() ?
node->loop_variable()->slot() : node->loop_variable()->AsSlot() :
NULL, NULL,
TypeInfo::Smi()); TypeInfo::Smi());
@ -2794,8 +2794,8 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Store the caught exception in the catch variable. // Store the caught exception in the catch variable.
Variable* catch_var = node->catch_var()->var(); Variable* catch_var = node->catch_var()->var();
ASSERT(catch_var != NULL && catch_var->slot() != NULL); ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
StoreToSlot(catch_var->slot(), NOT_CONST_INIT); StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
// Remove the exception from the stack. // Remove the exception from the stack.
frame_->Drop(); frame_->Drop();
@ -3420,7 +3420,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
frame_->SpillAll(); frame_->SpillAll();
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) { if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots. // Generate fast case for locals that rewrite to slots.
@ -3449,7 +3449,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
// variables. Then load the argument from the arguments // variables. Then load the argument from the arguments
// object using keyed load. // object using keyed load.
__ ldr(r0, __ ldr(r0,
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
r1, r1,
r2, r2,
slow)); slow));
@ -3735,7 +3735,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Comment cmnt(masm(), "[ Variable Assignment"); Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable(); Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL); ASSERT(var != NULL);
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
// Evaluate the right-hand side. // Evaluate the right-hand side.
@ -4136,14 +4136,14 @@ void CodeGenerator::VisitCall(Call* node) {
// in generated code. If we succeed, there is no need to perform a // in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system. // context lookup in the runtime system.
JumpTarget done; JumpTarget done;
if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
ASSERT(var->slot()->type() == Slot::LOOKUP); ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
JumpTarget slow; JumpTarget slow;
// Prepare the stack for the call to // Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the // function, the first argument to the eval call and the
// receiver. // receiver.
LoadFromGlobalSlotCheckExtensions(var->slot(), LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF, NOT_INSIDE_TYPEOF,
&slow); &slow);
frame_->EmitPush(r0); frame_->EmitPush(r0);
@ -4225,8 +4225,8 @@ void CodeGenerator::VisitCall(Call* node) {
__ ldr(cp, frame_->Context()); __ ldr(cp, frame_->Context());
frame_->EmitPush(r0); frame_->EmitPush(r0);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->AsSlot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->AsSlot()->type() == Slot::LOOKUP) {
// ---------------------------------- // ----------------------------------
// JavaScript examples: // JavaScript examples:
// //
@ -4244,7 +4244,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Generate fast case for loading functions from slots that // Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they // correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings. // are shadowed by eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(var->slot(), EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF, NOT_INSIDE_TYPEOF,
&slow, &slow,
&done); &done);
@ -5928,7 +5928,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0); frame_->EmitPush(r0);
} else if (variable != NULL) { } else if (variable != NULL) {
Slot* slot = variable->slot(); Slot* slot = variable->AsSlot();
if (variable->is_global()) { if (variable->is_global()) {
LoadGlobal(); LoadGlobal();
frame_->EmitPush(Operand(variable->name())); frame_->EmitPush(Operand(variable->name()));
@ -6062,7 +6062,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
bool is_const = (var != NULL && var->mode() == Variable::CONST); bool is_const = (var != NULL && var->mode() == Variable::CONST);
bool is_slot = (var != NULL && var->mode() == Variable::VAR); bool is_slot = (var != NULL && var->mode() == Variable::VAR);
if (!is_const && is_slot && type_info(var->slot()).IsSmi()) { if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
// The type info declares that this variable is always a Smi. That // The type info declares that this variable is always a Smi. That
// means it is a Smi both before and after the increment/decrement. // means it is a Smi both before and after the increment/decrement.
// Lets make use of that to make a very minimal count. // Lets make use of that to make a very minimal count.
@ -7207,7 +7207,7 @@ void Reference::GetValue() {
switch (type_) { switch (type_) {
case SLOT: { case SLOT: {
Comment cmnt(masm, "[ Load from Slot"); Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
DupIfPersist(); DupIfPersist();
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
@ -7251,7 +7251,7 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
switch (type_) { switch (type_) {
case SLOT: { case SLOT: {
Comment cmnt(masm, "[ Store to Slot"); Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
cgen_->StoreToSlot(slot, init_state); cgen_->StoreToSlot(slot, init_state);
set_unloaded(); set_unloaded();
break; break;

13
deps/v8/src/arm/frames-arm.cc

@ -37,17 +37,8 @@ namespace v8 {
namespace internal { namespace internal {
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { Address ExitFrame::ComputeStackPointer(Address fp) {
if (fp == 0) return NONE; return fp + ExitFrameConstants::kSPOffset;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPOffset;
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
ASSERT(*state->pc_address != NULL);
return EXIT;
} }

869
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

20
deps/v8/src/arm/ic-arm.cc

@ -967,6 +967,14 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
} }
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell) {
// TODO(<bug#>): implement this.
return false;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// Find the end of the inlined code for the store if there is an // Find the end of the inlined code for the store if there is an
// inlined version of the store. // inlined version of the store.
@ -1236,7 +1244,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- r1 : receiver // -- r1 : receiver
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
Label index_out_of_range;
Register receiver = r1; Register receiver = r1;
Register index = r0; Register index = r0;
@ -1251,7 +1258,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result, result,
&miss, // When not a string. &miss, // When not a string.
&miss, // When not a number. &miss, // When not a number.
&index_out_of_range, &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX); STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm); char_at_generator.GenerateFast(masm);
__ Ret(); __ Ret();
@ -1259,10 +1266,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper; ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper); char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
} }
@ -1581,8 +1584,9 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Check that the receiver isn't a smi. // Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &slow); __ BranchOnSmi(r1, &slow);
// Check that the key is a smi. // Check that the key is an array index, that is Uint32.
__ BranchOnNotSmi(r0, &slow); __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
__ b(ne, &slow);
// Get the map of the receiver. // Get the map of the receiver.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));

12
deps/v8/src/arm/simulator-arm.cc

@ -294,7 +294,7 @@ void Debugger::Debug() {
} else if (GetVFPSingleValue(arg1, &svalue)) { } else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue); PrintF("%s: %f \n", arg1, svalue);
} else if (GetVFPDoubleValue(arg1, &dvalue)) { } else if (GetVFPDoubleValue(arg1, &dvalue)) {
PrintF("%s: %lf \n", arg1, dvalue); PrintF("%s: %f \n", arg1, dvalue);
} else { } else {
PrintF("%s unrecognized\n", arg1); PrintF("%s unrecognized\n", arg1);
} }
@ -349,7 +349,8 @@ void Debugger::Debug() {
end = cur + words; end = cur + words;
while (cur < end) { while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d\n", cur, *cur, *cur); PrintF(" 0x%08x: 0x%08x %10d\n",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
cur++; cur++;
} }
} else if (strcmp(cmd, "disasm") == 0) { } else if (strcmp(cmd, "disasm") == 0) {
@ -382,7 +383,8 @@ void Debugger::Debug() {
while (cur < end) { while (cur < end) {
dasm.InstructionDecode(buffer, cur); dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08x %s\n", cur, buffer.start()); PrintF(" 0x%08x %s\n",
reinterpret_cast<intptr_t>(cur), buffer.start());
cur += Instr::kInstrSize; cur += Instr::kInstrSize;
} }
} else if (strcmp(cmd, "gdb") == 0) { } else if (strcmp(cmd, "gdb") == 0) {
@ -1061,7 +1063,7 @@ uintptr_t Simulator::StackLimit() const {
// Unsupported instructions use Format to print an error and stop execution. // Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instr* instr, const char* format) { void Simulator::Format(Instr* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n", PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
instr, format); reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -2650,7 +2652,7 @@ void Simulator::InstructionDecode(Instr* instr) {
v8::internal::EmbeddedVector<char, 256> buffer; v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(instr)); reinterpret_cast<byte*>(instr));
PrintF(" 0x%08x %s\n", instr, buffer.start()); PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr), buffer.start());
} }
if (instr->ConditionField() == special_condition) { if (instr->ConditionField() == special_condition) {
DecodeUnconditional(instr); DecodeUnconditional(instr);

150
deps/v8/src/arm/stub-cache-arm.cc

@ -266,7 +266,12 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype) { MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
__ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ Move(ip, Top::global());
__ cmp(prototype, ip);
__ b(ne, miss);
// Get the global function with the given index. // Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index)); JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps. // Load its initial map. The global functions all have initial maps.
@ -1434,7 +1439,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
r0); r0,
&miss);
ASSERT(object != holder); ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss); r1, r3, r4, name, &miss);
@ -1505,7 +1511,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
r0); r0,
&miss);
ASSERT(object != holder); ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss); r1, r3, r4, name, &miss);
@ -1626,6 +1633,118 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
} }
Object* CallStubCompiler::CompileMathFloorCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// TODO(872): implement this.
return Heap::undefined_value();
}
Object* CallStubCompiler::CompileMathAbsCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -- r2 : function name
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
// -- ...
// -- sp[argc * 4] : receiver
// -----------------------------------
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
if (cell == NULL) {
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
} else {
ASSERT(cell->value() == function);
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into r0.
__ ldr(r0, MemOperand(sp, 0 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ BranchOnNotSmi(r0, &not_smi);
// Do bitwise not or do nothing depending on the sign of the
// argument.
__ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
// Add 1 or do nothing depending on the sign of the argument.
__ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ b(mi, &slow);
// Smi case done.
__ Drop(argc + 1);
__ Ret();
// Check if the argument is a heap number and load its exponent and
// sign.
__ bind(&not_smi);
__ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, true);
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ tst(r1, Operand(HeapNumber::kSignMask));
__ b(ne, &negative_sign);
__ Drop(argc + 1);
__ Ret();
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ eor(r1, r1, Operand(HeapNumber::kSignMask));
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r0, r4, r5, r6, &slow);
__ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ Drop(argc + 1);
__ Ret();
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
__ bind(&miss);
// r2: function name.
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object, Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder, JSObject* holder,
JSFunction* function, JSFunction* function,
@ -1705,7 +1824,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ b(hs, &miss); __ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0); masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss); r1, r4, name, &miss);
} }
@ -1725,7 +1844,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, r0); masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss); r1, r4, name, &miss);
} }
@ -1748,7 +1867,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, r0); masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
r1, r4, name, &miss); r1, r4, name, &miss);
} }
@ -2067,7 +2186,10 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
name, name,
r1, r1,
&miss); &miss);
if (cell->IsFailure()) return cell; if (cell->IsFailure()) {
miss.Unuse();
return cell;
}
} }
// Return undefined if maps of the full prototype chain are still the // Return undefined if maps of the full prototype chain are still the
@ -2117,7 +2239,10 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError(); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
callback, name, &miss, &failure); callback, name, &miss, &failure);
if (!success) return failure; if (!success) {
miss.Unuse();
return failure;
}
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -2212,11 +2337,11 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
} }
__ mov(r0, r4); __ mov(r0, r4);
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_load_global_stub, 1, r1, r3);
__ Ret(); __ Ret();
__ bind(&miss); __ bind(&miss);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3); __ IncrementCounter(&Counters::named_load_global_stub_miss, 1, r1, r3);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code. // Return the generated code.
@ -2265,7 +2390,10 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError(); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4,
callback, name, &miss, &failure); callback, name, &miss, &failure);
if (!success) return failure; if (!success) {
miss.Unuse();
return failure;
}
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

4
deps/v8/src/assembler.cc

@ -465,7 +465,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
void RelocInfo::Print() { void RelocInfo::Print() {
PrintF("%p %s", pc_, RelocModeName(rmode_)); PrintF("%p %s", pc_, RelocModeName(rmode_));
if (IsComment(rmode_)) { if (IsComment(rmode_)) {
PrintF(" (%s)", data_); PrintF(" (%s)", reinterpret_cast<char*>(data_));
} else if (rmode_ == EMBEDDED_OBJECT) { } else if (rmode_ == EMBEDDED_OBJECT) {
PrintF(" ("); PrintF(" (");
target_object()->ShortPrint(); target_object()->ShortPrint();
@ -479,7 +479,7 @@ void RelocInfo::Print() {
Code* code = Code::GetCodeFromTargetAddress(target_address()); Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address()); PrintF(" (%s) (%p)", Code::Kind2String(code->kind()), target_address());
} else if (IsPosition(rmode_)) { } else if (IsPosition(rmode_)) {
PrintF(" (%d)", data()); PrintF(" (%" V8_PTR_PREFIX "d)", data());
} }
PrintF("\n"); PrintF("\n");

51
deps/v8/src/assembler.h

@ -91,6 +91,57 @@ class Label BASE_EMBEDDED {
}; };
// -----------------------------------------------------------------------------
// NearLabels are labels used for short jumps (in Intel jargon).
// NearLabels should be used if it can be guaranteed that the jump range is
// within -128 to +127. We already use short jumps when jumping backwards,
// so using a NearLabel will only have performance impact if used for forward
// jumps.
class NearLabel BASE_EMBEDDED {
public:
NearLabel() { Unuse(); }
~NearLabel() { ASSERT(!is_linked()); }
void Unuse() {
pos_ = -1;
unresolved_branches_ = 0;
#ifdef DEBUG
for (int i = 0; i < kMaxUnresolvedBranches; i++) {
unresolved_positions_[i] = -1;
}
#endif
}
int pos() {
ASSERT(is_bound());
return pos_;
}
bool is_bound() { return pos_ >= 0; }
bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
void bind_to(int position) {
ASSERT(!is_bound());
pos_ = position;
}
void link_to(int position) {
ASSERT(!is_bound());
ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
unresolved_positions_[unresolved_branches_++] = position;
}
private:
static const int kMaxUnresolvedBranches = 8;
int pos_;
int unresolved_branches_;
int unresolved_positions_[kMaxUnresolvedBranches];
friend class Assembler;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Relocation information // Relocation information

10
deps/v8/src/ast.cc

@ -70,6 +70,16 @@ CountOperation* ExpressionStatement::StatementAsCountOperation() {
} }
VariableProxy::VariableProxy(Variable* var)
: name_(var->name()),
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
inside_with_(false),
is_trivial_(false) {
BindTo(var);
}
VariableProxy::VariableProxy(Handle<String> name, VariableProxy::VariableProxy(Handle<String> name,
bool is_this, bool is_this,
bool inside_with) bool inside_with)

204
deps/v8/src/ast.h

@ -118,35 +118,38 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList; typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
virtual type* As##type() { return this; }
class AstNode: public ZoneObject { class AstNode: public ZoneObject {
public: public:
#define DECLARE_TYPE_ENUM(type) k##type,
enum Type {
AST_NODE_LIST(DECLARE_TYPE_ENUM)
kInvalid = -1
};
#undef DECLARE_TYPE_ENUM
virtual ~AstNode() { } virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0; virtual void Accept(AstVisitor* v) = 0;
virtual Type node_type() const { return kInvalid; }
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
virtual type* As##type() { return NULL; }
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
// Type testing & conversion.
virtual Statement* AsStatement() { return NULL; } virtual Statement* AsStatement() { return NULL; }
virtual Block* AsBlock() { return NULL; }
virtual ExpressionStatement* AsExpressionStatement() { return NULL; }
virtual EmptyStatement* AsEmptyStatement() { return NULL; }
virtual Expression* AsExpression() { return NULL; } virtual Expression* AsExpression() { return NULL; }
virtual Literal* AsLiteral() { return NULL; }
virtual Slot* AsSlot() { return NULL; }
virtual VariableProxy* AsVariableProxy() { return NULL; }
virtual Property* AsProperty() { return NULL; }
virtual Call* AsCall() { return NULL; }
virtual TargetCollector* AsTargetCollector() { return NULL; } virtual TargetCollector* AsTargetCollector() { return NULL; }
virtual BreakableStatement* AsBreakableStatement() { return NULL; } virtual BreakableStatement* AsBreakableStatement() { return NULL; }
virtual IterationStatement* AsIterationStatement() { return NULL; } virtual IterationStatement* AsIterationStatement() { return NULL; }
virtual ForStatement* AsForStatement() { return NULL; }
virtual UnaryOperation* AsUnaryOperation() { return NULL; }
virtual CountOperation* AsCountOperation() { return NULL; }
virtual BinaryOperation* AsBinaryOperation() { return NULL; }
virtual Assignment* AsAssignment() { return NULL; }
virtual FunctionLiteral* AsFunctionLiteral() { return NULL; }
virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; } virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; }
}; };
@ -155,7 +158,6 @@ class Statement: public AstNode {
Statement() : statement_pos_(RelocInfo::kNoPosition) {} Statement() : statement_pos_(RelocInfo::kNoPosition) {}
virtual Statement* AsStatement() { return this; } virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; }
virtual Assignment* StatementAsSimpleAssignment() { return NULL; } virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
virtual CountOperation* StatementAsCountOperation() { return NULL; } virtual CountOperation* StatementAsCountOperation() { return NULL; }
@ -172,18 +174,6 @@ class Statement: public AstNode {
class Expression: public AstNode { class Expression: public AstNode {
public: public:
enum Context {
// Not assigned a context yet, or else will not be visited during
// code generation.
kUninitialized,
// Evaluated for its side effects.
kEffect,
// Evaluated for its value (and side effects).
kValue,
// Evaluated for control flow (and side effects).
kTest
};
Expression() : bitfields_(0) {} Expression() : bitfields_(0) {}
virtual Expression* AsExpression() { return this; } virtual Expression* AsExpression() { return this; }
@ -325,9 +315,7 @@ class Block: public BreakableStatement {
public: public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block); inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Block)
virtual Block* AsBlock() { return this; }
virtual Assignment* StatementAsSimpleAssignment() { virtual Assignment* StatementAsSimpleAssignment() {
if (statements_.length() != 1) return NULL; if (statements_.length() != 1) return NULL;
@ -361,7 +349,7 @@ class Declaration: public AstNode {
ASSERT(fun == NULL || mode == Variable::VAR); ASSERT(fun == NULL || mode == Variable::VAR);
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Declaration)
VariableProxy* proxy() const { return proxy_; } VariableProxy* proxy() const { return proxy_; }
Variable::Mode mode() const { return mode_; } Variable::Mode mode() const { return mode_; }
@ -402,13 +390,13 @@ class DoWhileStatement: public IterationStatement {
public: public:
explicit inline DoWhileStatement(ZoneStringList* labels); explicit inline DoWhileStatement(ZoneStringList* labels);
DECLARE_NODE_TYPE(DoWhileStatement)
void Initialize(Expression* cond, Statement* body) { void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body); IterationStatement::Initialize(body);
cond_ = cond; cond_ = cond;
} }
virtual void Accept(AstVisitor* v);
Expression* cond() const { return cond_; } Expression* cond() const { return cond_; }
// Position where condition expression starts. We need it to make // Position where condition expression starts. We need it to make
@ -426,13 +414,13 @@ class WhileStatement: public IterationStatement {
public: public:
explicit WhileStatement(ZoneStringList* labels); explicit WhileStatement(ZoneStringList* labels);
DECLARE_NODE_TYPE(WhileStatement)
void Initialize(Expression* cond, Statement* body) { void Initialize(Expression* cond, Statement* body) {
IterationStatement::Initialize(body); IterationStatement::Initialize(body);
cond_ = cond; cond_ = cond;
} }
virtual void Accept(AstVisitor* v);
Expression* cond() const { return cond_; } Expression* cond() const { return cond_; }
bool may_have_function_literal() const { bool may_have_function_literal() const {
return may_have_function_literal_; return may_have_function_literal_;
@ -452,7 +440,7 @@ class ForStatement: public IterationStatement {
public: public:
explicit inline ForStatement(ZoneStringList* labels); explicit inline ForStatement(ZoneStringList* labels);
virtual ForStatement* AsForStatement() { return this; } DECLARE_NODE_TYPE(ForStatement)
void Initialize(Statement* init, void Initialize(Statement* init,
Expression* cond, Expression* cond,
@ -464,8 +452,6 @@ class ForStatement: public IterationStatement {
next_ = next; next_ = next;
} }
virtual void Accept(AstVisitor* v);
Statement* init() const { return init_; } Statement* init() const { return init_; }
void set_init(Statement* stmt) { init_ = stmt; } void set_init(Statement* stmt) { init_ = stmt; }
Expression* cond() const { return cond_; } Expression* cond() const { return cond_; }
@ -498,14 +484,14 @@ class ForInStatement: public IterationStatement {
public: public:
explicit inline ForInStatement(ZoneStringList* labels); explicit inline ForInStatement(ZoneStringList* labels);
DECLARE_NODE_TYPE(ForInStatement)
void Initialize(Expression* each, Expression* enumerable, Statement* body) { void Initialize(Expression* each, Expression* enumerable, Statement* body) {
IterationStatement::Initialize(body); IterationStatement::Initialize(body);
each_ = each; each_ = each;
enumerable_ = enumerable; enumerable_ = enumerable;
} }
virtual void Accept(AstVisitor* v);
Expression* each() const { return each_; } Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; } Expression* enumerable() const { return enumerable_; }
@ -520,10 +506,7 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression) explicit ExpressionStatement(Expression* expression)
: expression_(expression) { } : expression_(expression) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(ExpressionStatement)
// Type testing & conversion.
virtual ExpressionStatement* AsExpressionStatement() { return this; }
virtual Assignment* StatementAsSimpleAssignment(); virtual Assignment* StatementAsSimpleAssignment();
virtual CountOperation* StatementAsCountOperation(); virtual CountOperation* StatementAsCountOperation();
@ -541,7 +524,7 @@ class ContinueStatement: public Statement {
explicit ContinueStatement(IterationStatement* target) explicit ContinueStatement(IterationStatement* target)
: target_(target) { } : target_(target) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(ContinueStatement)
IterationStatement* target() const { return target_; } IterationStatement* target() const { return target_; }
@ -555,7 +538,7 @@ class BreakStatement: public Statement {
explicit BreakStatement(BreakableStatement* target) explicit BreakStatement(BreakableStatement* target)
: target_(target) { } : target_(target) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(BreakStatement)
BreakableStatement* target() const { return target_; } BreakableStatement* target() const { return target_; }
@ -569,10 +552,7 @@ class ReturnStatement: public Statement {
explicit ReturnStatement(Expression* expression) explicit ReturnStatement(Expression* expression)
: expression_(expression) { } : expression_(expression) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(ReturnStatement)
// Type testing & conversion.
virtual ReturnStatement* AsReturnStatement() { return this; }
Expression* expression() { return expression_; } Expression* expression() { return expression_; }
@ -586,7 +566,7 @@ class WithEnterStatement: public Statement {
explicit WithEnterStatement(Expression* expression, bool is_catch_block) explicit WithEnterStatement(Expression* expression, bool is_catch_block)
: expression_(expression), is_catch_block_(is_catch_block) { } : expression_(expression), is_catch_block_(is_catch_block) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(WithEnterStatement)
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
@ -602,7 +582,7 @@ class WithExitStatement: public Statement {
public: public:
WithExitStatement() { } WithExitStatement() { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(WithExitStatement)
}; };
@ -629,13 +609,13 @@ class SwitchStatement: public BreakableStatement {
public: public:
explicit inline SwitchStatement(ZoneStringList* labels); explicit inline SwitchStatement(ZoneStringList* labels);
DECLARE_NODE_TYPE(SwitchStatement)
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) { void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag; tag_ = tag;
cases_ = cases; cases_ = cases;
} }
virtual void Accept(AstVisitor* v);
Expression* tag() const { return tag_; } Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; } ZoneList<CaseClause*>* cases() const { return cases_; }
@ -659,7 +639,7 @@ class IfStatement: public Statement {
then_statement_(then_statement), then_statement_(then_statement),
else_statement_(else_statement) { } else_statement_(else_statement) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(IfStatement)
bool HasThenStatement() const { return !then_statement()->IsEmpty(); } bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
bool HasElseStatement() const { return !else_statement()->IsEmpty(); } bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
@ -729,7 +709,7 @@ class TryCatchStatement: public TryStatement {
catch_block_(catch_block) { catch_block_(catch_block) {
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(TryCatchStatement)
VariableProxy* catch_var() const { return catch_var_; } VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; } Block* catch_block() const { return catch_block_; }
@ -746,7 +726,7 @@ class TryFinallyStatement: public TryStatement {
: TryStatement(try_block), : TryStatement(try_block),
finally_block_(finally_block) { } finally_block_(finally_block) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(TryFinallyStatement)
Block* finally_block() const { return finally_block_; } Block* finally_block() const { return finally_block_; }
@ -757,18 +737,13 @@ class TryFinallyStatement: public TryStatement {
class DebuggerStatement: public Statement { class DebuggerStatement: public Statement {
public: public:
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(DebuggerStatement)
}; };
class EmptyStatement: public Statement { class EmptyStatement: public Statement {
public: public:
EmptyStatement() {} DECLARE_NODE_TYPE(EmptyStatement)
virtual void Accept(AstVisitor* v);
// Type testing & conversion.
virtual EmptyStatement* AsEmptyStatement() { return this; }
}; };
@ -776,13 +751,11 @@ class Literal: public Expression {
public: public:
explicit Literal(Handle<Object> handle) : handle_(handle) { } explicit Literal(Handle<Object> handle) : handle_(handle) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Literal)
virtual bool IsTrivial() { return true; } virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); } virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Type testing & conversion.
virtual Literal* AsLiteral() { return this; }
// Check if this literal is identical to the other literal. // Check if this literal is identical to the other literal.
bool IsIdenticalTo(const Literal* other) const { bool IsIdenticalTo(const Literal* other) const {
return handle_.is_identical_to(other->handle_); return handle_.is_identical_to(other->handle_);
@ -876,8 +849,7 @@ class ObjectLiteral: public MaterializedLiteral {
properties_(properties), properties_(properties),
fast_elements_(fast_elements) {} fast_elements_(fast_elements) {}
virtual ObjectLiteral* AsObjectLiteral() { return this; } DECLARE_NODE_TYPE(ObjectLiteral)
virtual void Accept(AstVisitor* v);
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
return constant_properties_; return constant_properties_;
@ -903,7 +875,7 @@ class RegExpLiteral: public MaterializedLiteral {
pattern_(pattern), pattern_(pattern),
flags_(flags) {} flags_(flags) {}
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(RegExpLiteral)
Handle<String> pattern() const { return pattern_; } Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; } Handle<String> flags() const { return flags_; }
@ -926,8 +898,7 @@ class ArrayLiteral: public MaterializedLiteral {
constant_elements_(constant_elements), constant_elements_(constant_elements),
values_(values) {} values_(values) {}
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(ArrayLiteral)
virtual ArrayLiteral* AsArrayLiteral() { return this; }
Handle<FixedArray> constant_elements() const { return constant_elements_; } Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; } ZoneList<Expression*>* values() const { return values_; }
@ -947,7 +918,7 @@ class CatchExtensionObject: public Expression {
: key_(key), value_(value) { : key_(key), value_(value) {
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CatchExtensionObject)
Literal* key() const { return key_; } Literal* key() const { return key_; }
VariableProxy* value() const { return value_; } VariableProxy* value() const { return value_; }
@ -960,19 +931,20 @@ class CatchExtensionObject: public Expression {
class VariableProxy: public Expression { class VariableProxy: public Expression {
public: public:
virtual void Accept(AstVisitor* v); explicit VariableProxy(Variable* var);
DECLARE_NODE_TYPE(VariableProxy)
// Type testing & conversion // Type testing & conversion
virtual Property* AsProperty() { virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty(); return var_ == NULL ? NULL : var_->AsProperty();
} }
virtual VariableProxy* AsVariableProxy() {
return this;
}
Variable* AsVariable() { Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable(); if (this == NULL || var_ == NULL) return NULL;
Expression* rewrite = var_->rewrite();
if (rewrite == NULL || rewrite->AsSlot() != NULL) return var_;
return NULL;
} }
virtual bool IsValidLeftHandSide() { virtual bool IsValidLeftHandSide() {
@ -1062,10 +1034,7 @@ class Slot: public Expression {
ASSERT(var != NULL); ASSERT(var != NULL);
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Slot)
// Type testing & conversion
virtual Slot* AsSlot() { return this; }
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; } bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
@ -1092,10 +1061,7 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { } : obj_(obj), key_(key), pos_(pos), type_(type) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Property)
// Type testing & conversion
virtual Property* AsProperty() { return this; }
virtual bool IsValidLeftHandSide() { return true; } virtual bool IsValidLeftHandSide() { return true; }
@ -1124,10 +1090,7 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos) Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { } : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Call)
// Type testing and conversion.
virtual Call* AsCall() { return this; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1149,7 +1112,7 @@ class CallNew: public Expression {
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos) CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { } : expression_(expression), arguments_(arguments), pos_(pos) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CallNew)
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1173,7 +1136,7 @@ class CallRuntime: public Expression {
ZoneList<Expression*>* arguments) ZoneList<Expression*>* arguments)
: name_(name), function_(function), arguments_(arguments) { } : name_(name), function_(function), arguments_(arguments) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CallRuntime)
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; } Runtime::Function* function() const { return function_; }
@ -1194,11 +1157,9 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op)); ASSERT(Token::IsUnaryOp(op));
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(UnaryOperation)
virtual bool ResultOverwriteAllowed();
// Type testing & conversion virtual bool ResultOverwriteAllowed();
virtual UnaryOperation* AsUnaryOperation() { return this; }
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
@ -1222,11 +1183,9 @@ class BinaryOperation: public Expression {
// Create the binary operation corresponding to a compound assignment. // Create the binary operation corresponding to a compound assignment.
explicit BinaryOperation(Assignment* assignment); explicit BinaryOperation(Assignment* assignment);
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(BinaryOperation)
virtual bool ResultOverwriteAllowed();
// Type testing & conversion virtual bool ResultOverwriteAllowed();
virtual BinaryOperation* AsBinaryOperation() { return this; }
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* left() const { return left_; } Expression* left() const { return left_; }
@ -1248,12 +1207,12 @@ class IncrementOperation: public Expression {
ASSERT(Token::IsCountOp(op)); ASSERT(Token::IsCountOp(op));
} }
DECLARE_NODE_TYPE(IncrementOperation)
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
bool is_increment() { return op_ == Token::INC; } bool is_increment() { return op_ == Token::INC; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
virtual void Accept(AstVisitor* v);
private: private:
Token::Value op_; Token::Value op_;
Expression* expression_; Expression* expression_;
@ -1266,9 +1225,7 @@ class CountOperation: public Expression {
CountOperation(bool is_prefix, IncrementOperation* increment, int pos) CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
: is_prefix_(is_prefix), increment_(increment), pos_(pos) { } : is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CountOperation)
virtual CountOperation* AsCountOperation() { return this; }
bool is_prefix() const { return is_prefix_; } bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; } bool is_postfix() const { return !is_prefix_; }
@ -1301,16 +1258,13 @@ class CompareOperation: public Expression {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CompareOperation)
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* left() const { return left_; } Expression* left() const { return left_; }
Expression* right() const { return right_; } Expression* right() const { return right_; }
int position() const { return pos_; } int position() const { return pos_; }
// Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; }
private: private:
Token::Value op_; Token::Value op_;
Expression* left_; Expression* left_;
@ -1324,7 +1278,7 @@ class CompareToNull: public Expression {
CompareToNull(bool is_strict, Expression* expression) CompareToNull(bool is_strict, Expression* expression)
: is_strict_(is_strict), expression_(expression) { } : is_strict_(is_strict), expression_(expression) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(CompareToNull)
bool is_strict() const { return is_strict_; } bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
@ -1349,7 +1303,7 @@ class Conditional: public Expression {
then_expression_position_(then_expression_position), then_expression_position_(then_expression_position),
else_expression_position_(else_expression_position) { } else_expression_position_(else_expression_position) { }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Conditional)
Expression* condition() const { return condition_; } Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; } Expression* then_expression() const { return then_expression_; }
@ -1375,8 +1329,7 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op)); ASSERT(Token::IsAssignmentOp(op));
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Assignment)
virtual Assignment* AsAssignment() { return this; }
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
@ -1413,7 +1366,7 @@ class Throw: public Expression {
Throw(Expression* exception, int pos) Throw(Expression* exception, int pos)
: exception_(exception), pos_(pos) {} : exception_(exception), pos_(pos) {}
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(Throw)
Expression* exception() const { return exception_; } Expression* exception() const { return exception_; }
int position() const { return pos_; } int position() const { return pos_; }
@ -1459,10 +1412,7 @@ class FunctionLiteral: public Expression {
#endif #endif
} }
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(FunctionLiteral)
// Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; }
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
@ -1529,12 +1479,12 @@ class SharedFunctionInfoLiteral: public Expression {
Handle<SharedFunctionInfo> shared_function_info) Handle<SharedFunctionInfo> shared_function_info)
: shared_function_info_(shared_function_info) { } : shared_function_info_(shared_function_info) { }
DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
Handle<SharedFunctionInfo> shared_function_info() const { Handle<SharedFunctionInfo> shared_function_info() const {
return shared_function_info_; return shared_function_info_;
} }
virtual void Accept(AstVisitor* v);
private: private:
Handle<SharedFunctionInfo> shared_function_info_; Handle<SharedFunctionInfo> shared_function_info_;
}; };
@ -1542,7 +1492,7 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression { class ThisFunction: public Expression {
public: public:
virtual void Accept(AstVisitor* v); DECLARE_NODE_TYPE(ThisFunction)
}; };

57
deps/v8/src/bootstrapper.cc

@ -1064,8 +1064,11 @@ bool Genesis::InstallNatives() {
// global object. // global object.
static const PropertyAttributes attributes = static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
SetProperty(builtins, Factory::LookupAsciiSymbol("global"), Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
Handle<Object>(global_context()->global()), attributes); SetProperty(builtins,
global_symbol,
Handle<Object>(global_context()->global()),
attributes);
// Setup the reference from the global object to the builtins object. // Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins); JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@ -1344,33 +1347,41 @@ bool Genesis::InstallNatives() {
} }
static void InstallCustomCallGenerator( static Handle<JSObject> ResolveCustomCallGeneratorHolder(
Handle<JSFunction> holder_function, Handle<Context> global_context,
CallStubCompiler::CustomGeneratorOwner owner_flag, const char* holder_expr) {
Handle<GlobalObject> global(global_context->global());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
GetProperty(global, Factory::LookupAsciiSymbol(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
GetProperty(global, Factory::LookupSymbol(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
static void InstallCustomCallGenerator(Handle<JSObject> holder,
const char* function_name, const char* function_name,
int id) { int id) {
Handle<JSObject> owner;
if (owner_flag == CallStubCompiler::FUNCTION) {
owner = Handle<JSObject>::cast(holder_function);
} else {
ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
owner = Handle<JSObject>(
JSObject::cast(holder_function->instance_prototype()));
}
Handle<String> name = Factory::LookupAsciiSymbol(function_name); Handle<String> name = Factory::LookupAsciiSymbol(function_name);
Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name))); Handle<JSFunction> function(JSFunction::cast(holder->GetProperty(*name)));
function->shared()->set_function_data(Smi::FromInt(id)); function->shared()->set_function_data(Smi::FromInt(id));
} }
void Genesis::InstallCustomCallGenerators() { void Genesis::InstallCustomCallGenerators() {
HandleScope scope; HandleScope scope;
#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \ #define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
{ \ { \
Handle<JSFunction> holder(global_context()->holder_fun##_function()); \ Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
global_context(), #holder_expr); \
const int id = CallStubCompiler::k##name##CallGenerator; \ const int id = CallStubCompiler::k##name##CallGenerator; \
InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \ InstallCustomCallGenerator(holder, #fun_name, id); \
#fun_name, id); \
} }
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR) CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR #undef INSTALL_CALL_GENERATOR
@ -1405,8 +1416,14 @@ void Genesis::InstallJSFunctionResultCaches() {
Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED); Handle<FixedArray> caches = Factory::NewFixedArray(kNumberOfCaches, TENURED);
int index = 0; int index = 0;
#define F(size, func) caches->set(index++, CreateCache(size, func));
JSFUNCTION_RESULT_CACHE_LIST(F) #define F(size, func) do { \
FixedArray* cache = CreateCache((size), (func)); \
caches->set(index++, cache); \
} while (false)
JSFUNCTION_RESULT_CACHE_LIST(F);
#undef F #undef F
global_context()->set_jsfunction_result_caches(*caches); global_context()->set_jsfunction_result_caches(*caches);

2
deps/v8/src/builtins.h

@ -65,6 +65,7 @@ enum BuiltinExtraArguments {
#define BUILTIN_LIST_A(V) \ #define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \ V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \ V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
@ -249,6 +250,7 @@ class Builtins : public AllStatic {
CFunctionId id, CFunctionId id,
BuiltinExtraArguments extra_args); BuiltinExtraArguments extra_args);
static void Generate_JSConstructCall(MacroAssembler* masm); static void Generate_JSConstructCall(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm); static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm); static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm); static void Generate_JSEntryTrampoline(MacroAssembler* masm);

4
deps/v8/src/codegen.cc

@ -289,7 +289,7 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i); Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var(); Variable* var = node->proxy()->var();
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile // If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it // time, we need to "declare" it at runtime to make sure it
@ -310,7 +310,7 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
for (int j = 0, i = 0; i < length; i++) { for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i); Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var(); Variable* var = node->proxy()->var();
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) { if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
// Skip - already processed. // Skip - already processed.

49
deps/v8/src/compilation-cache.cc

@ -110,6 +110,9 @@ class CompilationCacheScript : public CompilationSubCache {
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info); void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private: private:
MUST_USE_RESULT Object* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion. // Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut( Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info); Handle<String> source, Handle<SharedFunctionInfo> function_info);
@ -137,6 +140,12 @@ class CompilationCacheEval: public CompilationSubCache {
Handle<SharedFunctionInfo> function_info); Handle<SharedFunctionInfo> function_info);
private: private:
MUST_USE_RESULT Object* TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion. // Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut( Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<String> source,
@ -159,6 +168,10 @@ class CompilationCacheRegExp: public CompilationSubCache {
JSRegExp::Flags flags, JSRegExp::Flags flags,
Handle<FixedArray> data); Handle<FixedArray> data);
private: private:
MUST_USE_RESULT Object* TryTablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Note: Returns a new hash table if operation results in expansion. // Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source, Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags, JSRegExp::Flags flags,
@ -320,11 +333,18 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
} }
Object* CompilationCacheScript::TryTablePut(
Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
Handle<CompilationCacheTable> table = GetFirstTable();
return table->Put(*source, *function_info);
}
Handle<CompilationCacheTable> CompilationCacheScript::TablePut( Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source, Handle<String> source,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info), CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
CompilationCacheTable);
} }
@ -366,13 +386,20 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
} }
Object* CompilationCacheEval::TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
Handle<CompilationCacheTable> table = GetFirstTable();
return table->PutEval(*source, *context, *function_info);
}
Handle<CompilationCacheTable> CompilationCacheEval::TablePut( Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source, Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
*context,
*function_info),
CompilationCacheTable); CompilationCacheTable);
} }
@ -415,12 +442,20 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
} }
Object* CompilationCacheRegExp::TryTablePut(
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
Handle<CompilationCacheTable> table = GetFirstTable();
return table->PutRegExp(*source, flags, *data);
}
Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut( Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source, Handle<String> source,
JSRegExp::Flags flags, JSRegExp::Flags flags,
Handle<FixedArray> data) { Handle<FixedArray> data) {
CALL_HEAP_FUNCTION(GetFirstTable()->PutRegExp(*source, flags, *data), CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
CompilationCacheTable);
} }

47
deps/v8/src/compiler.cc

@ -120,8 +120,9 @@ Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) {
Handle<Context> context = Handle<Context>::null(); Handle<Context> context = Handle<Context>::null();
Handle<Code> code = MakeCode(context, info); Handle<Code> code = MakeCode(context, info);
if (!info->shared_info().is_null()) { if (!info->shared_info().is_null()) {
info->shared_info()->set_scope_info( Handle<SerializedScopeInfo> scope_info =
*SerializedScopeInfo::Create(info->scope())); SerializedScopeInfo::Create(info->scope());
info->shared_info()->set_scope_info(*scope_info);
} }
return code; return code;
} }
@ -145,9 +146,10 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
bool is_json = (validate == Compiler::VALIDATE_JSON); bool is_json = (validate == Compiler::VALIDATE_JSON);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (is_eval || is_json) { if (is_eval || is_json) {
script->set_compilation_type( Script::CompilationType compilation_type = is_json
is_json ? Smi::FromInt(Script::COMPILATION_TYPE_JSON) : ? Script::COMPILATION_TYPE_JSON
Smi::FromInt(Script::COMPILATION_TYPE_EVAL)); : Script::COMPILATION_TYPE_EVAL;
script->set_compilation_type(Smi::FromInt(compilation_type));
// For eval scripts add information on the function from which eval was // For eval scripts add information on the function from which eval was
// called. // called.
if (is_eval) { if (is_eval) {
@ -170,16 +172,16 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
ASSERT(is_eval || is_global); ASSERT(is_eval || is_global);
// Build AST. // Build AST.
EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit = FunctionLiteral* lit =
MakeAST(is_global, script, extension, pre_data, is_json); MakeAST(is_global, script, extension, pre_data, is_json);
LiveEditFunctionTracker live_edit_tracker(lit);
// Check for parse errors. // Check for parse errors.
if (lit == NULL) { if (lit == NULL) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
info.set_function(lit);
// Measure how long it takes to do the compilation; only take the // Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the // rest of the function into account to avoid overlap with the
@ -190,7 +192,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
HistogramTimerScope timer(rate); HistogramTimerScope timer(rate);
// Compile the code. // Compile the code.
CompilationInfo info(lit, script, is_eval); LiveEditFunctionTracker live_edit_tracker(lit);
Handle<Code> code = MakeCode(context, &info); Handle<Code> code = MakeCode(context, &info);
// Check for stack-overflow exceptions. // Check for stack-overflow exceptions.
@ -375,20 +377,12 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compute name, source code and script data. // Compute name, source code and script data.
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
Handle<String> name(String::cast(shared->name())); int compiled_size = shared->end_position() - shared->start_position();
Counters::total_compile_size.Increment(compiled_size);
int start_position = shared->start_position();
int end_position = shared->end_position();
bool is_expression = shared->is_expression();
Counters::total_compile_size.Increment(end_position - start_position);
// Generate the AST for the lazily compiled function. The AST may be // Generate the AST for the lazily compiled function. The AST may be
// NULL in case of parser stack overflow. // NULL in case of parser stack overflow.
FunctionLiteral* lit = MakeLazyAST(info->script(), FunctionLiteral* lit = MakeLazyAST(shared);
name,
start_position,
end_position,
is_expression);
// Check for parse errors. // Check for parse errors.
if (lit == NULL) { if (lit == NULL) {
@ -412,18 +406,20 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
} }
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
name, Handle<String>(String::cast(shared->name())),
Handle<String>(shared->inferred_name()), Handle<String>(shared->inferred_name()),
start_position, shared->start_position(),
info->script(), info->script(),
code); code);
// Update the shared function info with the compiled code and the scope info. // Update the shared function info with the compiled code and the scope info.
// Please note, that the order of the sharedfunction initialization is // Please note, that the order of the sharedfunction initialization is
// important since set_scope_info might trigger a GC, causing the ASSERT // important since SerializedScopeInfo::Create might trigger a GC, causing
// below to be invalid if the code was flushed. By settting the code // the ASSERT below to be invalid if the code was flushed. By setting the code
// object last we avoid this. // object last we avoid this.
shared->set_scope_info(*SerializedScopeInfo::Create(info->scope())); Handle<SerializedScopeInfo> scope_info =
SerializedScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*code); shared->set_code(*code);
if (!info->closure().is_null()) { if (!info->closure().is_null()) {
info->closure()->set_code(*code); info->closure()->set_code(*code);
@ -479,7 +475,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Generate code and return it. The way that the compilation mode // Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in // is controlled by the command-line flags is described in
// the static helper function MakeCode. // the static helper function MakeCode.
CompilationInfo info(literal, script, false); EagerCompilationInfo info(script, false);
info.set_function(literal);
bool is_run_once = literal->try_full_codegen(); bool is_run_once = literal->try_full_codegen();
bool use_full = FLAG_full_compiler && !literal->contains_loops(); bool use_full = FLAG_full_compiler && !literal->contains_loops();

155
deps/v8/src/compiler.h

@ -41,118 +41,109 @@ namespace internal {
// is constructed based on the resources available at compile-time. // is constructed based on the resources available at compile-time.
class CompilationInfo BASE_EMBEDDED { class CompilationInfo BASE_EMBEDDED {
public: public:
// Lazy compilation of a JSFunction. virtual ~CompilationInfo() {}
CompilationInfo(Handle<JSFunction> closure,
int loop_nesting, // Dispatched behavior.
Handle<Object> receiver) virtual Handle<SharedFunctionInfo> shared_info() const = 0;
: closure_(closure),
function_(NULL),
is_eval_(false),
loop_nesting_(loop_nesting),
receiver_(receiver) {
Initialize();
ASSERT(!closure_.is_null() &&
shared_info_.is_null() &&
script_.is_null());
}
// Lazy compilation based on SharedFunctionInfo. virtual Handle<Script> script() const {
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info) return Handle<Script>(Script::cast(shared_info()->script()));
: shared_info_(shared_info),
function_(NULL),
is_eval_(false),
loop_nesting_(0) {
Initialize();
ASSERT(closure_.is_null() &&
!shared_info_.is_null() &&
script_.is_null());
} }
// Eager compilation. virtual Handle<JSFunction> closure() const {
CompilationInfo(FunctionLiteral* literal, Handle<Script> script, bool is_eval) return Handle<JSFunction>::null();
: script_(script),
function_(literal),
is_eval_(is_eval),
loop_nesting_(0) {
Initialize();
ASSERT(closure_.is_null() &&
shared_info_.is_null() &&
!script_.is_null());
} }
// We can only get a JSFunction if we actually have one. virtual bool is_eval() const { return false; }
Handle<JSFunction> closure() { return closure_; }
// We can get a SharedFunctionInfo from a JSFunction or if we actually virtual int loop_nesting() const { return 0; }
// have one.
Handle<SharedFunctionInfo> shared_info() {
if (!closure().is_null()) {
return Handle<SharedFunctionInfo>(closure()->shared());
} else {
return shared_info_;
}
}
// We can always get a script. Either we have one or we can get a shared virtual bool has_global_object() const { return false; }
// function info. virtual GlobalObject* global_object() const { return NULL; }
Handle<Script> script() {
if (!script_.is_null()) {
return script_;
} else {
ASSERT(shared_info()->script()->IsScript());
return Handle<Script>(Script::cast(shared_info()->script()));
}
}
// There should always be a function literal, but it may be set after // There should always be a function literal, but it may be set after
// construction (for lazy compilation). // construction (for lazy compilation).
FunctionLiteral* function() { return function_; } FunctionLiteral* function() { return function_; }
void set_function(FunctionLiteral* literal) { function_ = literal; } void set_function(FunctionLiteral* literal) { function_ = literal; }
// Simple accessors. // Derived accessors.
bool is_eval() { return is_eval_; } Scope* scope() { return function()->scope(); }
int loop_nesting() { return loop_nesting_; }
bool has_receiver() { return !receiver_.is_null(); } protected:
Handle<Object> receiver() { return receiver_; } CompilationInfo() : function_(NULL) {}
private:
FunctionLiteral* function_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
bool has_this_properties() { return has_this_properties_; }
void set_has_this_properties(bool flag) { has_this_properties_ = flag; }
bool has_global_object() { class EagerCompilationInfo: public CompilationInfo {
return !closure().is_null() && (closure()->context()->global() != NULL); public:
EagerCompilationInfo(Handle<Script> script, bool is_eval)
: script_(script), is_eval_(is_eval) {
ASSERT(!script.is_null());
} }
GlobalObject* global_object() { // Overridden functions from the base class.
return has_global_object() ? closure()->context()->global() : NULL; virtual Handle<SharedFunctionInfo> shared_info() const {
return Handle<SharedFunctionInfo>::null();
} }
bool has_globals() { return has_globals_; } virtual Handle<Script> script() const { return script_; }
void set_has_globals(bool flag) { has_globals_ = flag; }
// Derived accessors. virtual bool is_eval() const { return is_eval_; }
Scope* scope() { return function()->scope(); }
private: private:
void Initialize() { Handle<Script> script_;
has_this_properties_ = false; bool is_eval_;
has_globals_ = false; };
class LazySharedCompilationInfo: public CompilationInfo {
public:
explicit LazySharedCompilationInfo(Handle<SharedFunctionInfo> shared_info)
: shared_info_(shared_info) {
ASSERT(!shared_info.is_null());
} }
Handle<JSFunction> closure_; // Overridden functions from the base class.
virtual Handle<SharedFunctionInfo> shared_info() const {
return shared_info_;
}
private:
Handle<SharedFunctionInfo> shared_info_; Handle<SharedFunctionInfo> shared_info_;
Handle<Script> script_; };
FunctionLiteral* function_;
bool is_eval_; class LazyFunctionCompilationInfo: public CompilationInfo {
int loop_nesting_; public:
LazyFunctionCompilationInfo(Handle<JSFunction> closure,
int loop_nesting)
: closure_(closure), loop_nesting_(loop_nesting) {
ASSERT(!closure.is_null());
}
Handle<Object> receiver_; // Overridden functions from the base class.
virtual Handle<SharedFunctionInfo> shared_info() const {
return Handle<SharedFunctionInfo>(closure_->shared());
}
bool has_this_properties_; virtual int loop_nesting() const { return loop_nesting_; }
bool has_globals_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo); virtual bool has_global_object() const {
return closure_->context()->global() != NULL;
}
virtual GlobalObject* global_object() const {
return closure_->context()->global();
}
private:
Handle<JSFunction> closure_;
int loop_nesting_;
}; };

5
deps/v8/src/contexts.cc

@ -90,7 +90,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
do { do {
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF(" - looking in context %p", *context); PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
if (context->IsGlobalContext()) PrintF(" (global context)"); if (context->IsGlobalContext()) PrintF(" (global context)");
PrintF("\n"); PrintF("\n");
} }
@ -110,7 +110,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
if (*attributes != ABSENT) { if (*attributes != ABSENT) {
// property found // property found
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n", *extension); PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*extension));
} }
return extension; return extension;
} }

62
deps/v8/src/conversions.cc

@ -956,8 +956,9 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
char* DoubleToExponentialCString(double value, int f) { char* DoubleToExponentialCString(double value, int f) {
const int kMaxDigitsAfterPoint = 20;
// f might be -1 to signal that f was undefined in JavaScript. // f might be -1 to signal that f was undefined in JavaScript.
ASSERT(f >= -1 && f <= 20); ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
bool negative = false; bool negative = false;
if (value < 0) { if (value < 0) {
@ -969,29 +970,60 @@ char* DoubleToExponentialCString(double value, int f) {
int decimal_point; int decimal_point;
int sign; int sign;
char* decimal_rep = NULL; char* decimal_rep = NULL;
bool used_gay_dtoa = false;
// f corresponds to the digits after the point. There is always one digit
// before the point. The number of requested_digits equals hence f + 1.
// And we have to add one character for the null-terminator.
const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
// Make sure that the buffer is big enough, even if we fall back to the
// shortest representation (which happens when f equals -1).
ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
char v8_dtoa_buffer[kV8DtoaBufferCapacity];
int decimal_rep_length;
if (f == -1) { if (f == -1) {
if (DoubleToAscii(value, DTOA_SHORTEST, 0,
Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
&sign, &decimal_rep_length, &decimal_point)) {
f = decimal_rep_length - 1;
decimal_rep = v8_dtoa_buffer;
} else {
decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL); decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
f = StrLength(decimal_rep) - 1; decimal_rep_length = StrLength(decimal_rep);
f = decimal_rep_length - 1;
used_gay_dtoa = true;
}
} else {
if (DoubleToAscii(value, DTOA_PRECISION, f + 1,
Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
&sign, &decimal_rep_length, &decimal_point)) {
decimal_rep = v8_dtoa_buffer;
} else { } else {
decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL); decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
decimal_rep_length = StrLength(decimal_rep);
used_gay_dtoa = true;
}
} }
int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0); ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1); ASSERT(decimal_rep_length <= f + 1);
USE(decimal_rep_length);
int exponent = decimal_point - 1; int exponent = decimal_point - 1;
char* result = char* result =
CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1); CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
if (used_gay_dtoa) {
freedtoa(decimal_rep); freedtoa(decimal_rep);
}
return result; return result;
} }
char* DoubleToPrecisionCString(double value, int p) { char* DoubleToPrecisionCString(double value, int p) {
ASSERT(p >= 1 && p <= 21); const int kMinimalDigits = 1;
const int kMaximalDigits = 21;
ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
USE(kMinimalDigits);
bool negative = false; bool negative = false;
if (value < 0) { if (value < 0) {
@ -1002,8 +1034,22 @@ char* DoubleToPrecisionCString(double value, int p) {
// Find a sufficiently precise decimal representation of n. // Find a sufficiently precise decimal representation of n.
int decimal_point; int decimal_point;
int sign; int sign;
char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL); char* decimal_rep = NULL;
int decimal_rep_length = StrLength(decimal_rep); bool used_gay_dtoa = false;
// Add one for the terminating null character.
const int kV8DtoaBufferCapacity = kMaximalDigits + 1;
char v8_dtoa_buffer[kV8DtoaBufferCapacity];
int decimal_rep_length;
if (DoubleToAscii(value, DTOA_PRECISION, p,
Vector<char>(v8_dtoa_buffer, kV8DtoaBufferCapacity),
&sign, &decimal_rep_length, &decimal_point)) {
decimal_rep = v8_dtoa_buffer;
} else {
decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
decimal_rep_length = StrLength(decimal_rep);
used_gay_dtoa = true;
}
ASSERT(decimal_rep_length <= p); ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1; int exponent = decimal_point - 1;
@ -1047,7 +1093,9 @@ char* DoubleToPrecisionCString(double value, int p) {
result = builder.Finalize(); result = builder.Finalize();
} }
if (used_gay_dtoa) {
freedtoa(decimal_rep); freedtoa(decimal_rep);
}
return result; return result;
} }

5
deps/v8/src/cpu-profiler-inl.h

@ -82,14 +82,11 @@ TickSample* ProfilerEventsProcessor::TickSampleEvent() {
bool ProfilerEventsProcessor::FilterOutCodeCreateEvent( bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
Logger::LogEventsAndTags tag) { Logger::LogEventsAndTags tag) {
// In browser mode, leave only callbacks and non-native JS entries.
// We filter out regular expressions as currently we can't tell
// whether they origin from native scripts, so let's not confise people by
// showing them weird regexes they didn't wrote.
return FLAG_prof_browser_mode return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG && (tag != Logger::CALLBACK_TAG
&& tag != Logger::FUNCTION_TAG && tag != Logger::FUNCTION_TAG
&& tag != Logger::LAZY_COMPILE_TAG && tag != Logger::LAZY_COMPILE_TAG
&& tag != Logger::REG_EXP_TAG
&& tag != Logger::SCRIPT_TAG); && tag != Logger::SCRIPT_TAG);
} }

64
deps/v8/src/cpu-profiler.cc

@ -32,6 +32,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
#include "frames-inl.h" #include "frames-inl.h"
#include "hashmap.h"
#include "log-inl.h" #include "log-inl.h"
#include "../include/v8-profiler.h" #include "../include/v8-profiler.h"
@ -50,7 +51,13 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord), ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize, kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount), kTickSamplesBufferChunksCount),
enqueue_order_(0) { enqueue_order_(0),
known_functions_(new HashMap(AddressesMatch)) {
}
ProfilerEventsProcessor::~ProfilerEventsProcessor() {
delete known_functions_;
} }
@ -152,16 +159,32 @@ void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
rec->entry = generator_->NewCodeEntry(security_token_id); rec->entry = generator_->NewCodeEntry(security_token_id);
rec->code_start = start; rec->code_start = start;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
known_functions_->Lookup(alias, AddressHash(alias), true);
} }
void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) { void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
CodeMoveEvent(from, to); CodeMoveEvent(from, to);
if (IsKnownFunction(from)) {
known_functions_->Remove(from, AddressHash(from));
known_functions_->Lookup(to, AddressHash(to), true);
}
} }
void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) { void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
CodeDeleteEvent(from); CodeDeleteEvent(from);
known_functions_->Remove(from, AddressHash(from));
}
bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
HashMap::Entry* entry =
known_functions_->Lookup(start, AddressHash(start), false);
return entry != NULL;
} }
@ -403,6 +426,40 @@ void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
} }
void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function,
HeapObject* source) {
// This function is called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
// The same function can be reported several times.
if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
|| singleton_->processor_->IsKnownFunction(function->address())) return;
int security_token_id = TokenEnumerator::kNoSecurityToken;
// In debug mode, assertions may fail for contexts,
// and we can live without security tokens in debug mode.
#ifndef DEBUG
if (function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
function->context()->global_context()->security_token());
}
// Security token may not be moved yet.
if (security_token_id == TokenEnumerator::kNoSecurityToken) {
JSFunction* old_function = reinterpret_cast<JSFunction*>(source);
if (old_function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
old_function->context()->global_context()->security_token());
}
}
#endif
singleton_->processor_->FunctionCreateEvent(
function->address(),
function->unchecked_code()->address(),
security_token_id);
}
void CpuProfiler::FunctionMoveEvent(Address from, Address to) { void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
singleton_->processor_->FunctionMoveEvent(from, to); singleton_->processor_->FunctionMoveEvent(from, to);
} }
@ -473,7 +530,12 @@ void CpuProfiler::StartProcessorIfNotStarted() {
processor_->Start(); processor_->Start();
// Enumerate stuff we already have in the heap. // Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) { if (Heap::HasBeenSetup()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
Logger::LogCodeObjects(); Logger::LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
Logger::LogCompiledFunctions(); Logger::LogCompiledFunctions();
Logger::LogFunctionObjects(); Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks(); Logger::LogAccessorCallbacks();

18
deps/v8/src/cpu-profiler.h

@ -41,6 +41,7 @@ class CodeEntry;
class CodeMap; class CodeMap;
class CpuProfile; class CpuProfile;
class CpuProfilesCollection; class CpuProfilesCollection;
class HashMap;
class ProfileGenerator; class ProfileGenerator;
class TokenEnumerator; class TokenEnumerator;
@ -132,7 +133,7 @@ class TickSampleEventRecord BASE_EMBEDDED {
class ProfilerEventsProcessor : public Thread { class ProfilerEventsProcessor : public Thread {
public: public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator); explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() { } virtual ~ProfilerEventsProcessor();
// Thread control. // Thread control.
virtual void Run(); virtual void Run();
@ -163,6 +164,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size); Address start, unsigned size);
// Puts current stack into tick sample events buffer. // Puts current stack into tick sample events buffer.
void AddCurrentStack(); void AddCurrentStack();
bool IsKnownFunction(Address start);
// Tick sample events are filled directly in the buffer of the circular // Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all // queue (because the structure is of fixed width, but usually not all
@ -183,6 +185,13 @@ class ProfilerEventsProcessor : public Thread {
bool ProcessTicks(unsigned dequeue_order); bool ProcessTicks(unsigned dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
INLINE(static bool AddressesMatch(void* key1, void* key2)) {
return key1 == key2;
}
INLINE(static uint32_t AddressHash(Address addr)) {
return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
}
ProfileGenerator* generator_; ProfileGenerator* generator_;
bool running_; bool running_;
@ -190,6 +199,9 @@ class ProfilerEventsProcessor : public Thread {
SamplingCircularQueue ticks_buffer_; SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_; UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_; unsigned enqueue_order_;
// Used from the VM thread.
HashMap* known_functions_;
}; };
} } // namespace v8::internal } } // namespace v8::internal
@ -242,6 +254,10 @@ class CpuProfiler {
static void CodeMoveEvent(Address from, Address to); static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from); static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function); static void FunctionCreateEvent(JSFunction* function);
// Reports function creation in case we had missed it (e.g.
// if it was created from compiled code).
static void FunctionCreateEventFromMove(JSFunction* function,
HeapObject* source);
static void FunctionMoveEvent(Address from, Address to); static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from); static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point); static void GetterCallbackEvent(String* name, Address entry_point);

4
deps/v8/src/data-flow.cc

@ -42,7 +42,7 @@ void BitVector::Print() {
if (Contains(i)) { if (Contains(i)) {
if (!first) PrintF(","); if (!first) PrintF(",");
first = false; first = false;
PrintF("%d"); PrintF("%d", i);
} }
} }
PrintF("}"); PrintF("}");
@ -125,7 +125,7 @@ Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
int AssignedVariablesAnalyzer::BitIndex(Variable* var) { int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
ASSERT(var != NULL); ASSERT(var != NULL);
ASSERT(var->IsStackAllocated()); ASSERT(var->IsStackAllocated());
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
if (slot->type() == Slot::PARAMETER) { if (slot->type() == Slot::PARAMETER) {
return slot->index(); return slot->index();
} else { } else {

34
deps/v8/src/debug-debugger.js

@ -45,7 +45,7 @@ Debug.DebugEvent = { Break: 1,
ScriptCollected: 6 }; ScriptCollected: 6 };
// Types of exceptions that can be broken upon. // Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { All : 0, Debug.ExceptionBreak = { Caught : 0,
Uncaught: 1 }; Uncaught: 1 };
// The different types of steps. // The different types of steps.
@ -87,7 +87,27 @@ var debugger_flags = {
this.value = !!value; this.value = !!value;
%SetDisableBreak(!this.value); %SetDisableBreak(!this.value);
} }
},
breakOnCaughtException: {
getValue: function() { return Debug.isBreakOnException(); },
setValue: function(value) {
if (value) {
Debug.setBreakOnException();
} else {
Debug.clearBreakOnException();
}
}
},
breakOnUncaughtException: {
getValue: function() { return Debug.isBreakOnUncaughtException(); },
setValue: function(value) {
if (value) {
Debug.setBreakOnUncaughtException();
} else {
Debug.clearBreakOnUncaughtException();
} }
}
},
}; };
@ -781,11 +801,15 @@ Debug.clearStepping = function() {
} }
Debug.setBreakOnException = function() { Debug.setBreakOnException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.All, true); return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
}; };
Debug.clearBreakOnException = function() { Debug.clearBreakOnException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.All, false); return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
};
Debug.isBreakOnException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Caught);
}; };
Debug.setBreakOnUncaughtException = function() { Debug.setBreakOnUncaughtException = function() {
@ -796,6 +820,10 @@ Debug.clearBreakOnUncaughtException = function() {
return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false); return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
}; };
Debug.isBreakOnUncaughtException = function() {
return !!%IsBreakOnException(Debug.ExceptionBreak.Uncaught);
};
Debug.showBreakPoints = function(f, full) { Debug.showBreakPoints = function(f, full) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var source = full ? this.scriptSource(f) : this.source(f); var source = full ? this.scriptSource(f) : this.source(f);

17
deps/v8/src/debug.cc

@ -1034,10 +1034,12 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
if (!break_point_object->IsJSObject()) return true; if (!break_point_object->IsJSObject()) return true;
// Get the function CheckBreakPoint (defined in debug.js). // Get the function CheckBreakPoint (defined in debug.js).
Handle<String> is_break_point_triggered_symbol =
Factory::LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point = Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast( Handle<JSFunction>(JSFunction::cast(
debug_context()->global()->GetProperty( debug_context()->global()->GetProperty(
*Factory::LookupAsciiSymbol("IsBreakPointTriggered")))); *is_break_point_triggered_symbol)));
// Get the break id as an object. // Get the break id as an object.
Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id()); Handle<Object> break_id = Factory::NewNumberFromInt(Debug::break_id());
@ -1200,6 +1202,15 @@ void Debug::ChangeBreakOnException(ExceptionBreakType type, bool enable) {
} }
bool Debug::IsBreakOnException(ExceptionBreakType type) {
if (type == BreakUncaughtException) {
return break_on_uncaught_exception_;
} else {
return break_on_exception_;
}
}
void Debug::PrepareStep(StepAction step_action, int step_count) { void Debug::PrepareStep(StepAction step_action, int step_count) {
HandleScope scope; HandleScope scope;
ASSERT(Debug::InDebugger()); ASSERT(Debug::InDebugger());
@ -2167,9 +2178,11 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// script. Make sure that these break points are set. // script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js). // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_symbol =
Factory::LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points = Handle<Object> update_script_break_points =
Handle<Object>(Debug::debug_context()->global()->GetProperty( Handle<Object>(Debug::debug_context()->global()->GetProperty(
*Factory::LookupAsciiSymbol("UpdateScriptBreakPoints"))); *update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) { if (!update_script_break_points->IsJSFunction()) {
return; return;
} }

1
deps/v8/src/debug.h

@ -236,6 +236,7 @@ class Debug {
static void FloodWithOneShot(Handle<SharedFunctionInfo> shared); static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
static void FloodHandlerWithOneShot(); static void FloodHandlerWithOneShot();
static void ChangeBreakOnException(ExceptionBreakType type, bool enable); static void ChangeBreakOnException(ExceptionBreakType type, bool enable);
static bool IsBreakOnException(ExceptionBreakType type);
static void PrepareStep(StepAction step_action, int step_count); static void PrepareStep(StepAction step_action, int step_count);
static void ClearStepping(); static void ClearStepping();
static bool StepNextContinue(BreakLocationIterator* break_location_iterator, static bool StepNextContinue(BreakLocationIterator* break_location_iterator,

5
deps/v8/src/disassembler.cc

@ -44,7 +44,10 @@ namespace internal {
void Disassembler::Dump(FILE* f, byte* begin, byte* end) { void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
for (byte* pc = begin; pc < end; pc++) { for (byte* pc = begin; pc < end; pc++) {
if (f == NULL) { if (f == NULL) {
PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n", pc, pc - begin, *pc); PrintF("%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<intptr_t>(pc),
pc - begin,
*pc);
} else { } else {
fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n", fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<uintptr_t>(pc), pc - begin, *pc); reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);

7
deps/v8/src/dtoa.cc

@ -65,11 +65,12 @@ bool DoubleToAscii(double v, DtoaMode mode, int requested_digits,
switch (mode) { switch (mode) {
case DTOA_SHORTEST: case DTOA_SHORTEST:
return FastDtoa(v, buffer, length, point); return FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, length, point);
case DTOA_FIXED: case DTOA_FIXED:
return FastFixedDtoa(v, requested_digits, buffer, length, point); return FastFixedDtoa(v, requested_digits, buffer, length, point);
default: case DTOA_PRECISION:
break; return FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
buffer, length, point);
} }
return false; return false;
} }

323
deps/v8/src/fast-dtoa.cc

@ -42,8 +42,8 @@ namespace internal {
// //
// A different range might be chosen on a different platform, to optimize digit // A different range might be chosen on a different platform, to optimize digit
// generation, but a smaller range requires more powers of ten to be cached. // generation, but a smaller range requires more powers of ten to be cached.
static const int minimal_target_exponent = -60; static const int kMinimalTargetExponent = -60;
static const int maximal_target_exponent = -32; static const int kMaximalTargetExponent = -32;
// Adjusts the last digit of the generated number, and screens out generated // Adjusts the last digit of the generated number, and screens out generated
@ -61,7 +61,7 @@ static const int maximal_target_exponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest // Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input. // representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w. // Modifies the generated digits in the buffer to approach (round towards) w.
bool RoundWeed(Vector<char> buffer, static bool RoundWeed(Vector<char> buffer,
int length, int length,
uint64_t distance_too_high_w, uint64_t distance_too_high_w,
uint64_t unsafe_interval, uint64_t unsafe_interval,
@ -75,7 +75,7 @@ bool RoundWeed(Vector<char> buffer,
// Note: w_low < w < w_high // Note: w_low < w < w_high
// //
// The real w (* unit) must lie somewhere inside the interval // The real w (* unit) must lie somewhere inside the interval
// ]w_low; w_low[ (often written as "(w_low; w_low)") // ]w_low; w_high[ (often written as "(w_low; w_high)")
// Basically the buffer currently contains a number in the unsafe interval // Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high // ]too_low; too_high[ with too_low < w < too_high
@ -122,10 +122,10 @@ bool RoundWeed(Vector<char> buffer,
// inside the safe interval then we simply do not know and bail out (returning // inside the safe interval then we simply do not know and bail out (returning
// false). // false).
// //
// Similarly we have to take into account the imprecision of 'w' when rounding // Similarly we have to take into account the imprecision of 'w' when finding
// the buffer. If we have two potential representations we need to make sure // the closest representation of 'w'. If we have two potential
// that the chosen one is closer to w_low and w_high since v can be anywhere // representations, and one is closer to both w_low and w_high, then we know
// between them. // it is closer to the actual value v.
// //
// By generating the digits of too_high we got the largest (closest to // By generating the digits of too_high we got the largest (closest to
// too_high) buffer that is still in the unsafe interval. In the case where // too_high) buffer that is still in the unsafe interval. In the case where
@ -139,6 +139,9 @@ bool RoundWeed(Vector<char> buffer,
// (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high // (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
// Instead of using the buffer directly we use its distance to too_high. // Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer // Conceptually rest ~= too_high - buffer
// We need to do the following tests in this order to avoid over- and
// underflows.
ASSERT(rest <= unsafe_interval);
while (rest < small_distance && // Negated condition 1 while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2 unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high (rest + ten_kappa < small_distance || // buffer{-1} > w_high
@ -166,6 +169,62 @@ bool RoundWeed(Vector<char> buffer,
} }
// Rounds the buffer upwards if the result is closer to v by possibly adding
// 1 to the buffer. If the precision of the calculation is not sufficient to
// round correctly, return false.
// The rounding might shift the whole buffer in which case the kappa is
// adjusted. For example "99", kappa = 3 might become "10", kappa = 4.
//
// If 2*rest > ten_kappa then the buffer needs to be round up.
// rest can have an error of +/- 1 unit. This function accounts for the
// imprecision and returns false, if the rounding direction cannot be
// unambiguously determined.
//
// Precondition: rest < ten_kappa.
static bool RoundWeedCounted(Vector<char> buffer,
int length,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit,
int* kappa) {
ASSERT(rest < ten_kappa);
// The following tests are done in a specific order to avoid overflows. They
// will work correctly with any uint64 values of rest < ten_kappa and unit.
//
// If the unit is too big, then we don't know which way to round. For example
// a unit of 50 means that the real number lies within rest +/- 50. If
// 10^kappa == 40 then there is no way to tell which way to round.
if (unit >= ten_kappa) return false;
// Even if unit is just half the size of 10^kappa we are already completely
// lost. (And after the previous test we know that the expression will not
// over/underflow.)
if (ten_kappa - unit <= unit) return false;
// If 2 * (rest + unit) <= 10^kappa we can safely round down.
if ((ten_kappa - rest > rest) && (ten_kappa - 2 * rest >= 2 * unit)) {
return true;
}
// If 2 * (rest - unit) >= 10^kappa, then we can safely round up.
if ((rest > unit) && (ten_kappa - (rest - unit) <= (rest - unit))) {
// Increment the last digit recursively until we find a non '9' digit.
buffer[length - 1]++;
for (int i = length - 1; i > 0; --i) {
if (buffer[i] != '0' + 10) break;
buffer[i] = '0';
buffer[i - 1]++;
}
// If the first digit is now '0'+ 10 we had a buffer with all '9's. With the
// exception of the first digit all digits are now '0'. Simply switch the
// first digit to '1' and adjust the kappa. Example: "99" becomes "10" and
// the power (the kappa) is increased.
if (buffer[0] == '0' + 10) {
buffer[0] = '1';
(*kappa) += 1;
}
return true;
}
return false;
}
static const uint32_t kTen4 = 10000; static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000; static const uint32_t kTen5 = 100000;
@ -178,7 +237,7 @@ static const uint32_t kTen9 = 1000000000;
// number. We furthermore receive the maximum number of bits 'number' has. // number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned // If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32. // The number of bits must be <= 32.
// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)). // Precondition: number < (1 << (number_bits + 1)).
static void BiggestPowerTen(uint32_t number, static void BiggestPowerTen(uint32_t number,
int number_bits, int number_bits,
uint32_t* power, uint32_t* power,
@ -281,18 +340,18 @@ static void BiggestPowerTen(uint32_t number,
// Generates the digits of input number w. // Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an // w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by minimal_target_exponent and // exponent. Its exponent is bounded by kMinimalTargetExponent and
// maximal_target_exponent. // kMaximalTargetExponent.
// Hence -60 <= w.e() <= -32. // Hence -60 <= w.e() <= -32.
// //
// Returns false if it fails, in which case the generated digits in the buffer // Returns false if it fails, in which case the generated digits in the buffer
// should not be used. // should not be used.
// Preconditions: // Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That // * low, w and high are correct up to 1 ulp (unit in the last place). That
// is, their error must be less that a unit of their last digits. // is, their error must be less than a unit of their last digits.
// * low.e() == w.e() == high.e() // * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~ // * low < w < high, and taking into account their error: low~ <= high~
// * minimal_target_exponent <= w.e() <= maximal_target_exponent // * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
// Postconditions: returns false if procedure fails. // Postconditions: returns false if procedure fails.
// otherwise: // otherwise:
// * buffer is not null-terminated, but len contains the number of digits. // * buffer is not null-terminated, but len contains the number of digits.
@ -321,7 +380,7 @@ static void BiggestPowerTen(uint32_t number,
// represent 'w' we can stop. Everything inside the interval low - high // represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's // represents w. However we have to pay attention to low, high and w's
// imprecision. // imprecision.
bool DigitGen(DiyFp low, static bool DigitGen(DiyFp low,
DiyFp w, DiyFp w,
DiyFp high, DiyFp high,
Vector<char> buffer, Vector<char> buffer,
@ -329,7 +388,7 @@ bool DigitGen(DiyFp low,
int* kappa) { int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e()); ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1); ASSERT(low.f() + 1 <= high.f() - 1);
ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent); ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
// low, w and high are imprecise, but by less than one ulp (unit in the last // low, w and high are imprecise, but by less than one ulp (unit in the last
// place). // place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@ -359,23 +418,23 @@ bool DigitGen(DiyFp low,
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e()); uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and. // Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1); uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divider; uint32_t divisor;
int divider_exponent; int divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divider, &divider_exponent); &divisor, &divisor_exponent);
*kappa = divider_exponent + 1; *kappa = divisor_exponent + 1;
*length = 0; *length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division) // Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized // The invariant holds for the first iteration: kappa has been initialized
// with the divider exponent + 1. And the divider is the biggest power of ten // with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than integrals. // that is smaller than integrals.
while (*kappa > 0) { while (*kappa > 0) {
int digit = integrals / divider; int digit = integrals / divisor;
buffer[*length] = '0' + digit; buffer[*length] = '0' + digit;
(*length)++; (*length)++;
integrals %= divider; integrals %= divisor;
(*kappa)--; (*kappa)--;
// Note that kappa now equals the exponent of the divider and that the // Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again. // invariant thus holds again.
uint64_t rest = uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals; (static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
@ -386,32 +445,24 @@ bool DigitGen(DiyFp low,
// that lies within the unsafe interval. // that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(), return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest, unsafe_interval.f(), rest,
static_cast<uint64_t>(divider) << -one.e(), unit); static_cast<uint64_t>(divisor) << -one.e(), unit);
} }
divider /= 10; divisor /= 10;
} }
// The integrals have been generated. We are at the point of the decimal // The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by // separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated // 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too. // data (like the interval or 'unit'), too.
// Instead of multiplying by 10 we multiply by 5 (cheaper operation) and // Note that the multiplication by 10 does not overflow, because w.e >= -60
// increase its (imaginary) exponent. At the same time we decrease the // and thus one.e >= -60.
// divider's (one's) exponent and shift its significand. ASSERT(one.e() >= -60);
// Basically, if fractionals was a DiyFp (with fractionals.e == one.e): ASSERT(fractionals < one.f());
// fractionals.f *= 10; ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
// fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
// one.f >>= 1; one.e++; // value remains unchanged.
// and we have again fractionals.e == one.e which allows us to divide
// fractionals.f() by one.f()
// We simply combine the *= 10 and the >>= 1.
while (true) { while (true) {
fractionals *= 5; fractionals *= 10;
unit *= 5; unit *= 10;
unsafe_interval.set_f(unsafe_interval.f() * 5); unsafe_interval.set_f(unsafe_interval.f() * 10);
unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
one.set_f(one.f() >> 1);
one.set_e(one.e() + 1);
// Integer division by one. // Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e()); int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit; buffer[*length] = '0' + digit;
@ -426,6 +477,113 @@ bool DigitGen(DiyFp low,
} }
// Generates (at most) requested_digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
// kMaximalTargetExponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * w is correct up to 1 ulp (unit in the last place). That
// is, its error must be strictly less than a unit of its last digit.
// * kMinimalTargetExponent <= w.e() <= kMaximalTargetExponent
//
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but length contains the number of
// digits.
// * the representation in buffer is the most precise representation of
// requested_digits digits.
// * buffer contains at most requested_digits digits of w. If there are less
// than requested_digits digits then some trailing '0's have been removed.
// * kappa is such that
// w = buffer * 10^kappa + eps with |eps| < 10^kappa / 2.
//
// Remark: This procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely, but the failure-rate
// increases with higher requested_digits.
static bool DigitGenCounted(DiyFp w,
int requested_digits,
Vector<char> buffer,
int* length,
int* kappa) {
ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
ASSERT(kMinimalTargetExponent >= -60);
ASSERT(kMaximalTargetExponent <= -32);
// w is assumed to have an error less than 1 unit. Whenever w is scaled we
// also scale its error.
uint64_t w_error = 1;
// We cut the input number into two parts: the integral digits and the
// fractional digits. We don't emit any decimal separator, but adapt kappa
// instead. Example: instead of writing "1.2" we put "12" into the buffer and
// increase kappa by 1.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(w.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = w.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divisor, &divisor_exponent);
*kappa = divisor_exponent + 1;
*length = 0;
// Loop invariant: buffer = w / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divisor exponent + 1. And the divisor is the biggest power of ten
// that is smaller than 'integrals'.
while (*kappa > 0) {
int digit = integrals / divisor;
buffer[*length] = '0' + digit;
(*length)++;
requested_digits--;
integrals %= divisor;
(*kappa)--;
// Note that kappa now equals the exponent of the divisor and that the
// invariant thus holds again.
if (requested_digits == 0) break;
divisor /= 10;
}
if (requested_digits == 0) {
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
return RoundWeedCounted(buffer, *length, rest,
static_cast<uint64_t>(divisor) << -one.e(), w_error,
kappa);
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (the 'unit'), too.
// Note that the multiplication by 10 does not overflow, because w.e >= -60
// and thus one.e >= -60.
ASSERT(one.e() >= -60);
ASSERT(fractionals < one.f());
ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
while (requested_digits > 0 && fractionals > w_error) {
fractionals *= 10;
w_error *= 10;
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
(*length)++;
requested_digits--;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
}
if (requested_digits != 0) return false;
return RoundWeedCounted(buffer, *length, fractionals, one.f(), w_error,
kappa);
}
// Provides a decimal representation of v. // Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted. // Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated). // There will be *length digits inside the buffer (not null-terminated).
@ -437,7 +595,10 @@ bool DigitGen(DiyFp low,
// The last digit will be closest to the actual v. That is, even if several // The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be // digits might correctly yield 'v' when read again, the closest will be
// computed. // computed.
bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) { static bool Grisu3(double v,
Vector<char> buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp(); DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its // boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between // closest floating-point neighbors. Any number strictly between
@ -448,12 +609,12 @@ bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
ASSERT(boundary_plus.e() == w.e()); ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k int mk; // -k
GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent, GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
maximal_target_exponent, &mk, &ten_mk); kMaximalTargetExponent, &mk, &ten_mk);
ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() + ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
DiyFp::kSignificandSize && DiyFp::kSignificandSize) &&
maximal_target_exponent >= w.e() + ten_mk.e() + (kMaximalTargetExponent >= w.e() + ten_mk.e() +
DiyFp::kSignificandSize); DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a // Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits. // 64 bit significand and ten_mk is thus only precise up to 64 bits.
@ -488,17 +649,75 @@ bool grisu3(double v, Vector<char> buffer, int* length, int* decimal_exponent) {
} }
// The "counted" version of grisu3 (see above) only generates requested_digits
// number of digits. This version does not generate the shortest representation,
// and with enough requested digits 0.1 will at some point print as 0.9999999...
// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
// therefore the rounding strategy for halfway cases is irrelevant.
static bool Grisu3Counted(double v,
int requested_digits,
Vector<char> buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
GetCachedPower(w.e() + DiyFp::kSignificandSize, kMinimalTargetExponent,
kMaximalTargetExponent, &mk, &ten_mk);
ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
DiyFp::kSignificandSize) &&
(kMaximalTargetExponent >= w.e() + ten_mk.e() +
DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
// We now have (double) (scaled_w * 10^-mk).
// DigitGen will generate the first requested_digits digits of scaled_w and
// return together with a kappa such that scaled_w ~= buffer * 10^kappa. (It
// will not always be exactly the same since DigitGenCounted only produces a
// limited number of digits.)
int kappa;
bool result = DigitGenCounted(scaled_w, requested_digits,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
bool FastDtoa(double v, bool FastDtoa(double v,
FastDtoaMode mode,
int requested_digits,
Vector<char> buffer, Vector<char> buffer,
int* length, int* length,
int* point) { int* decimal_point) {
ASSERT(v > 0); ASSERT(v > 0);
ASSERT(!Double(v).IsSpecial()); ASSERT(!Double(v).IsSpecial());
int decimal_exponent; bool result = false;
bool result = grisu3(v, buffer, length, &decimal_exponent); int decimal_exponent = 0;
*point = *length + decimal_exponent; switch (mode) {
case FAST_DTOA_SHORTEST:
result = Grisu3(v, buffer, length, &decimal_exponent);
break;
case FAST_DTOA_PRECISION:
result = Grisu3Counted(v, requested_digits,
buffer, length, &decimal_exponent);
break;
default:
UNREACHABLE();
}
if (result) {
*decimal_point = *length + decimal_exponent;
buffer[*length] = '\0'; buffer[*length] = '\0';
}
return result; return result;
} }

43
deps/v8/src/fast-dtoa.h

@ -31,27 +31,52 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
enum FastDtoaMode {
// Computes the shortest representation of the given input. The returned
// result will be the most accurate number of this length. Longer
// representations might be more accurate.
FAST_DTOA_SHORTEST,
// Computes a representation where the precision (number of digits) is
// given as input. The precision is independent of the decimal point.
FAST_DTOA_PRECISION
};
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character. // include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17; static const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v. // Provides a decimal representation of v.
// v must be a strictly positive finite double. // The result should be interpreted as buffer * 10^(point - length).
//
// Precondition:
// * v must be a strictly positive finite double.
//
// Returns true if it succeeds, otherwise the result can not be trusted. // Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator. // There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true then // If the function returns true and mode equals
// - FAST_DTOA_SHORTEST, then
// the parameter requested_digits is ignored.
// The result satisfies
// v == (double) (buffer * 10^(point - length)). // v == (double) (buffer * 10^(point - length)).
// The digits in the buffer are the shortest representation possible: no // The digits in the buffer are the shortest representation possible. E.g.
// 0.099999999999 instead of 0.1. // if 0.099999999999 and 0.1 represent the same double then "1" is returned
// with point = 0.
// The last digit will be closest to the actual v. That is, even if several // The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain the // digits might correctly yield 'v' when read again, the buffer will contain
// one closest to v. // the one closest to v.
// The variable 'sign' will be '0' if the given number is positive, and '1' // - FAST_DTOA_PRECISION, then
// otherwise. // the buffer contains requested_digits digits.
// the difference v - (buffer * 10^(point-length)) is closest to zero for
// all possible representations of requested_digits digits.
// If there are two values that are equally close, then FastDtoa returns
// false.
// For both modes the buffer must be large enough to hold the result.
bool FastDtoa(double d, bool FastDtoa(double d,
FastDtoaMode mode,
int requested_digits,
Vector<char> buffer, Vector<char> buffer,
int* length, int* length,
int* point); int* decimal_point);
} } // namespace v8::internal } } // namespace v8::internal

6
deps/v8/src/flag-definitions.h

@ -108,6 +108,8 @@ DEFINE_bool(enable_sse2, true,
"enable use of SSE2 instructions if available") "enable use of SSE2 instructions if available")
DEFINE_bool(enable_sse3, true, DEFINE_bool(enable_sse3, true,
"enable use of SSE3 instructions if available") "enable use of SSE3 instructions if available")
DEFINE_bool(enable_sse4_1, true,
"enable use of SSE4.1 instructions if available")
DEFINE_bool(enable_cmov, true, DEFINE_bool(enable_cmov, true,
"enable use of CMOV instruction if available") "enable use of CMOV instruction if available")
DEFINE_bool(enable_rdtsc, true, DEFINE_bool(enable_rdtsc, true,
@ -179,8 +181,8 @@ DEFINE_bool(always_inline_smi_code, false,
"always inline smi code in non-opt code") "always inline smi code in non-opt code")
// heap.cc // heap.cc
DEFINE_int(max_new_space_size, 0, "max size of the new generation") DEFINE_int(max_new_space_size, 0, "max size of the new generation (in kBytes)")
DEFINE_int(max_old_space_size, 0, "max size of the old generation") DEFINE_int(max_old_space_size, 0, "max size of the old generation (in Mbytes)")
DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_bool(gc_global, false, "always perform global GCs")
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations") DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_bool(trace_gc, false, DEFINE_bool(trace_gc, false,

52
deps/v8/src/frames.cc

@ -143,8 +143,8 @@ void StackFrameIterator::Reset() {
state.pc_address = state.pc_address =
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)); reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
type = StackFrame::ComputeType(&state); type = StackFrame::ComputeType(&state);
if (SingletonFor(type) == NULL) return;
} }
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state); frame_ = SingletonFor(type, &state);
} }
@ -203,13 +203,24 @@ bool StackTraceFrameIterator::IsValidFrame() {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
bool SafeStackFrameIterator::ExitFrameValidator::IsValidFP(Address fp) {
if (!validator_.IsValid(fp)) return false;
Address sp = ExitFrame::ComputeStackPointer(fp);
if (!validator_.IsValid(sp)) return false;
StackFrame::State state;
ExitFrame::FillState(fp, sp, &state);
if (!validator_.IsValid(reinterpret_cast<Address>(state.pc_address))) {
return false;
}
return *state.pc_address != NULL;
}
SafeStackFrameIterator::SafeStackFrameIterator( SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) : Address fp, Address sp, Address low_bound, Address high_bound) :
maintainer_(), low_bound_(low_bound), high_bound_(high_bound), maintainer_(),
is_valid_top_( stack_validator_(low_bound, high_bound),
IsWithinBounds(low_bound, high_bound, is_valid_top_(IsValidTop(low_bound, high_bound)),
Top::c_entry_fp(Top::GetCurrentThread())) &&
Top::handler(Top::GetCurrentThread()) != NULL),
is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)), is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
is_working_iterator_(is_valid_top_ || is_valid_fp_), is_working_iterator_(is_valid_top_ || is_valid_fp_),
iteration_done_(!is_working_iterator_), iteration_done_(!is_working_iterator_),
@ -217,6 +228,14 @@ SafeStackFrameIterator::SafeStackFrameIterator(
} }
bool SafeStackFrameIterator::IsValidTop(Address low_bound, Address high_bound) {
Address fp = Top::c_entry_fp(Top::GetCurrentThread());
ExitFrameValidator validator(low_bound, high_bound);
if (!validator.IsValidFP(fp)) return false;
return Top::handler(Top::GetCurrentThread()) != NULL;
}
void SafeStackFrameIterator::Advance() { void SafeStackFrameIterator::Advance() {
ASSERT(is_working_iterator_); ASSERT(is_working_iterator_);
ASSERT(!done()); ASSERT(!done());
@ -258,9 +277,8 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// sure that caller FP address is valid. // sure that caller FP address is valid.
Address caller_fp = Memory::Address_at( Address caller_fp = Memory::Address_at(
frame->fp() + EntryFrameConstants::kCallerFPOffset); frame->fp() + EntryFrameConstants::kCallerFPOffset);
if (!IsValidStackAddress(caller_fp)) { ExitFrameValidator validator(stack_validator_);
return false; if (!validator.IsValidFP(caller_fp)) return false;
}
} else if (frame->is_arguments_adaptor()) { } else if (frame->is_arguments_adaptor()) {
// See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that // See ArgumentsAdaptorFrame::GetCallerStackPointer. It assumes that
// the number of arguments is stored on stack as Smi. We need to check // the number of arguments is stored on stack as Smi. We need to check
@ -415,6 +433,22 @@ Address ExitFrame::GetCallerStackPointer() const {
} }
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
Address sp = ComputeStackPointer(fp);
FillState(fp, sp, state);
ASSERT(*state->pc_address != NULL);
return EXIT;
}
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
}
Address StandardFrame::GetExpressionAddress(int n) const { Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset; const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize; return fp() + offset - n * kPointerSize;

47
deps/v8/src/frames.h

@ -67,7 +67,7 @@ class PcToCodeCache : AllStatic {
static PcToCodeCacheEntry* GetCacheEntry(Address pc); static PcToCodeCacheEntry* GetCacheEntry(Address pc);
private: private:
static const int kPcToCodeCacheSize = 256; static const int kPcToCodeCacheSize = 1024;
static PcToCodeCacheEntry cache_[kPcToCodeCacheSize]; static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
}; };
@ -141,6 +141,13 @@ class StackFrame BASE_EMBEDDED {
NO_ID = 0 NO_ID = 0
}; };
struct State {
State() : sp(NULL), fp(NULL), pc_address(NULL) { }
Address sp;
Address fp;
Address* pc_address;
};
// Copy constructor; it breaks the connection to host iterator. // Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) { StackFrame(const StackFrame& original) {
this->state_ = original.state_; this->state_ = original.state_;
@ -201,12 +208,6 @@ class StackFrame BASE_EMBEDDED {
int index) const { } int index) const { }
protected: protected:
struct State {
Address sp;
Address fp;
Address* pc_address;
};
explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { } explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
virtual ~StackFrame() { } virtual ~StackFrame() { }
@ -318,6 +319,8 @@ class ExitFrame: public StackFrame {
// pointer. Used when constructing the first stack frame seen by an // pointer. Used when constructing the first stack frame seen by an
// iterator and the frames following entry frames. // iterator and the frames following entry frames.
static Type GetStateForFramePointer(Address fp, State* state); static Type GetStateForFramePointer(Address fp, State* state);
static Address ComputeStackPointer(Address fp);
static void FillState(Address fp, Address sp, State* state);
protected: protected:
explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { } explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@ -443,6 +446,7 @@ class JavaScriptFrame: public StandardFrame {
inline Object* function_slot_object() const; inline Object* function_slot_object() const;
friend class StackFrameIterator; friend class StackFrameIterator;
friend class StackTracer;
}; };
@ -654,12 +658,36 @@ class SafeStackFrameIterator BASE_EMBEDDED {
} }
private: private:
bool IsValidStackAddress(Address addr) const { class StackAddressValidator {
public:
StackAddressValidator(Address low_bound, Address high_bound)
: low_bound_(low_bound), high_bound_(high_bound) { }
bool IsValid(Address addr) const {
return IsWithinBounds(low_bound_, high_bound_, addr); return IsWithinBounds(low_bound_, high_bound_, addr);
} }
private:
Address low_bound_;
Address high_bound_;
};
class ExitFrameValidator {
public:
explicit ExitFrameValidator(const StackAddressValidator& validator)
: validator_(validator) { }
ExitFrameValidator(Address low_bound, Address high_bound)
: validator_(low_bound, high_bound) { }
bool IsValidFP(Address fp);
private:
StackAddressValidator validator_;
};
bool IsValidStackAddress(Address addr) const {
return stack_validator_.IsValid(addr);
}
bool CanIterateHandles(StackFrame* frame, StackHandler* handler); bool CanIterateHandles(StackFrame* frame, StackHandler* handler);
bool IsValidFrame(StackFrame* frame) const; bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame); bool IsValidCaller(StackFrame* frame);
static bool IsValidTop(Address low_bound, Address high_bound);
// This is a nasty hack to make sure the active count is incremented // This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This // before the constructor for the embedded iterator is invoked. This
@ -674,8 +702,7 @@ class SafeStackFrameIterator BASE_EMBEDDED {
ActiveCountMaintainer maintainer_; ActiveCountMaintainer maintainer_;
static int active_count_; static int active_count_;
Address low_bound_; StackAddressValidator stack_validator_;
Address high_bound_;
const bool is_valid_top_; const bool is_valid_top_;
const bool is_valid_fp_; const bool is_valid_fp_;
const bool is_working_iterator_; const bool is_working_iterator_;

241
deps/v8/src/full-codegen.cc

@ -332,30 +332,93 @@ bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
} }
void FullCodeGenerator::PrepareTest(Label* materialize_true, void FullCodeGenerator::EffectContext::Plug(Register reg) const {
}
void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
// Move value into place.
__ Move(result_register(), reg);
}
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
// Move value into place.
__ push(reg);
}
void FullCodeGenerator::TestContext::Plug(Register reg) const {
// For simplicity we always test the accumulator register.
__ Move(result_register(), reg);
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
void FullCodeGenerator::EffectContext::PlugTOS() const {
__ Drop(1);
}
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
__ pop(result_register());
}
void FullCodeGenerator::StackValueContext::PlugTOS() const {
}
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
__ pop(result_register());
codegen()->DoTest(true_label_, false_label_, fall_through_);
}
void FullCodeGenerator::EffectContext::PrepareTest(
Label* materialize_true,
Label* materialize_false, Label* materialize_false,
Label** if_true, Label** if_true,
Label** if_false, Label** if_false,
Label** fall_through) { Label** fall_through) const {
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kEffect:
// In an effect context, the true and the false case branch to the // In an effect context, the true and the false case branch to the
// same label. // same label.
*if_true = *if_false = *fall_through = materialize_true; *if_true = *if_false = *fall_through = materialize_true;
break; }
case Expression::kValue:
void FullCodeGenerator::AccumulatorValueContext::PrepareTest(
Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const {
*if_true = *fall_through = materialize_true; *if_true = *fall_through = materialize_true;
*if_false = materialize_false; *if_false = materialize_false;
break; }
case Expression::kTest:
void FullCodeGenerator::StackValueContext::PrepareTest(
Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const {
*if_true = *fall_through = materialize_true;
*if_false = materialize_false;
}
void FullCodeGenerator::TestContext::PrepareTest(
Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const {
*if_true = true_label_; *if_true = true_label_;
*if_false = false_label_; *if_false = false_label_;
*fall_through = fall_through_; *fall_through = fall_through_;
break;
}
} }
@ -366,7 +429,7 @@ void FullCodeGenerator::VisitDeclarations(
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
Declaration* decl = declarations->at(i); Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var(); Variable* var = decl->proxy()->var();
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile // If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it // time, we need to "declare" it at runtime to make sure it
@ -386,7 +449,7 @@ void FullCodeGenerator::VisitDeclarations(
for (int j = 0, i = 0; i < length; i++) { for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i); Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var(); Variable* var = decl->proxy()->var();
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) { if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
array->set(j++, *(var->name())); array->set(j++, *(var->name()));
@ -576,20 +639,20 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
// Load only the operands that we need to materialize. // Load only the operands that we need to materialize.
if (constant == kNoConstants) { if (constant == kNoConstants) {
VisitForValue(left, kStack); VisitForStackValue(left);
VisitForValue(right, kAccumulator); VisitForAccumulatorValue(right);
} else if (constant == kRightConstant) { } else if (constant == kRightConstant) {
VisitForValue(left, kAccumulator); VisitForAccumulatorValue(left);
} else { } else {
ASSERT(constant == kLeftConstant); ASSERT(constant == kLeftConstant);
VisitForValue(right, kAccumulator); VisitForAccumulatorValue(right);
} }
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
if (ShouldInlineSmiCase(op)) { if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant); EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant);
} else { } else {
EmitBinaryOp(op, context_, mode); EmitBinaryOp(op, mode);
} }
break; break;
} }
@ -603,39 +666,7 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done; Label eval_right, done;
// Set up the appropriate context for the left subexpression based context()->EmitLogicalLeft(expr, &eval_right, &done);
// on the operation and our own context. Initially assume we can
// inherit both true and false labels from our context.
if (expr->op() == Token::OR) {
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
VisitForControl(expr->left(), &done, &eval_right, &eval_right);
break;
case Expression::kValue:
VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
break;
}
} else {
ASSERT_EQ(Token::AND, expr->op());
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
VisitForControl(expr->left(), &eval_right, &done, &eval_right);
break;
case Expression::kValue:
VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
break;
case Expression::kTest:
VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
break;
}
}
__ bind(&eval_right); __ bind(&eval_right);
Visit(expr->right()); Visit(expr->right());
@ -644,43 +675,75 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
} }
void FullCodeGenerator::VisitLogicalForValue(Expression* expr, void FullCodeGenerator::EffectContext::EmitLogicalLeft(BinaryOperation* expr,
Token::Value op, Label* eval_right,
Location where, Label* done) const {
Label* done) { if (expr->op() == Token::OR) {
ASSERT(op == Token::AND || op == Token::OR); codegen()->VisitForControl(expr->left(), done, eval_right, eval_right);
VisitForValue(expr, kAccumulator); } else {
__ push(result_register()); ASSERT(expr->op() == Token::AND);
codegen()->VisitForControl(expr->left(), eval_right, done, eval_right);
}
}
Label discard;
switch (where) { void FullCodeGenerator::AccumulatorValueContext::EmitLogicalLeft(
case kAccumulator: { BinaryOperation* expr,
Label restore; Label* eval_right,
if (op == Token::OR) { Label* done) const {
DoTest(&restore, &discard, &restore); codegen()->Visit(expr->left());
// We want the value in the accumulator for the test, and on the stack in case
// we need it.
__ push(result_register());
Label discard, restore;
if (expr->op() == Token::OR) {
codegen()->DoTest(&restore, &discard, &restore);
} else { } else {
DoTest(&discard, &restore, &restore); ASSERT(expr->op() == Token::AND);
codegen()->DoTest(&discard, &restore, &restore);
} }
__ bind(&restore); __ bind(&restore);
__ pop(result_register()); __ pop(result_register());
__ jmp(done); __ jmp(done);
break; __ bind(&discard);
__ Drop(1);
} }
case kStack: {
if (op == Token::OR) {
DoTest(done, &discard, &discard); void FullCodeGenerator::StackValueContext::EmitLogicalLeft(
BinaryOperation* expr,
Label* eval_right,
Label* done) const {
codegen()->VisitForAccumulatorValue(expr->left());
// We want the value in the accumulator for the test, and on the stack in case
// we need it.
__ push(result_register());
Label discard;
if (expr->op() == Token::OR) {
codegen()->DoTest(done, &discard, &discard);
} else { } else {
DoTest(&discard, done, &discard); ASSERT(expr->op() == Token::AND);
} codegen()->DoTest(&discard, done, &discard);
break;
} }
}
__ bind(&discard); __ bind(&discard);
__ Drop(1); __ Drop(1);
} }
void FullCodeGenerator::TestContext::EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const {
if (expr->op() == Token::OR) {
codegen()->VisitForControl(expr->left(),
true_label_, eval_right, eval_right);
} else {
ASSERT(expr->op() == Token::AND);
codegen()->VisitForControl(expr->left(),
eval_right, false_label_, eval_right);
}
}
void FullCodeGenerator::VisitBlock(Block* stmt) { void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block"); Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt); Breakable nested_statement(this, stmt);
@ -761,7 +824,7 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement"); Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
Expression* expr = stmt->expression(); Expression* expr = stmt->expression();
VisitForValue(expr, kAccumulator); VisitForAccumulatorValue(expr);
// Exit all nested statements. // Exit all nested statements.
NestedStatement* current = nesting_stack_; NestedStatement* current = nesting_stack_;
@ -780,7 +843,7 @@ void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
Comment cmnt(masm_, "[ WithEnterStatement"); Comment cmnt(masm_, "[ WithEnterStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
VisitForValue(stmt->expression(), kStack); VisitForStackValue(stmt->expression());
if (stmt->is_catch_block()) { if (stmt->is_catch_block()) {
__ CallRuntime(Runtime::kPushCatchContext, 1); __ CallRuntime(Runtime::kPushCatchContext, 1);
} else { } else {
@ -955,7 +1018,7 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// The catch variable is *always* a variable proxy for a local variable. // The catch variable is *always* a variable proxy for a local variable.
Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable(); Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(catch_var); ASSERT_NOT_NULL(catch_var);
Slot* variable_slot = catch_var->slot(); Slot* variable_slot = catch_var->AsSlot();
ASSERT_NOT_NULL(variable_slot); ASSERT_NOT_NULL(variable_slot);
ASSERT_EQ(Slot::LOCAL, variable_slot->type()); ASSERT_EQ(Slot::LOCAL, variable_slot->type());
StoreToFrameField(SlotOffset(variable_slot), result_register()); StoreToFrameField(SlotOffset(variable_slot), result_register());
@ -1061,7 +1124,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
expr->then_expression_position()); expr->then_expression_position());
Visit(expr->then_expression()); Visit(expr->then_expression());
// If control flow falls through Visit, jump to done. // If control flow falls through Visit, jump to done.
if (context_ == Expression::kEffect || context_ == Expression::kValue) { if (!context()->IsTest()) {
__ jmp(&done); __ jmp(&done);
} }
@ -1070,7 +1133,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
expr->else_expression_position()); expr->else_expression_position());
Visit(expr->else_expression()); Visit(expr->else_expression());
// If control flow falls through Visit, merge it with true case here. // If control flow falls through Visit, merge it with true case here.
if (context_ == Expression::kEffect || context_ == Expression::kValue) { if (!context()->IsTest()) {
__ bind(&done); __ bind(&done);
} }
} }
@ -1084,7 +1147,7 @@ void FullCodeGenerator::VisitSlot(Slot* expr) {
void FullCodeGenerator::VisitLiteral(Literal* expr) { void FullCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal"); Comment cmnt(masm_, "[ Literal");
Apply(context_, expr); context()->Plug(expr->handle());
} }
@ -1110,17 +1173,17 @@ void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
// Call runtime routine to allocate the catch extension object and // Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable. // assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject"); Comment cmnt(masm_, "[ CatchExtensionObject");
VisitForValue(expr->key(), kStack); VisitForStackValue(expr->key());
VisitForValue(expr->value(), kStack); VisitForStackValue(expr->value());
// Create catch extension object. // Create catch extension object.
__ CallRuntime(Runtime::kCreateCatchExtensionObject, 2); __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
Apply(context_, result_register()); context()->Plug(result_register());
} }
void FullCodeGenerator::VisitThrow(Throw* expr) { void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw"); Comment cmnt(masm_, "[ Throw");
VisitForValue(expr->exception(), kStack); VisitForStackValue(expr->exception());
__ CallRuntime(Runtime::kThrow, 1); __ CallRuntime(Runtime::kThrow, 1);
// Never returns here. // Never returns here.
} }
@ -1150,9 +1213,9 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForValue(args->at(0), kStack); VisitForStackValue(args->at(0));
__ CallRuntime(Runtime::kRegExpCloneResult, 1); __ CallRuntime(Runtime::kRegExpCloneResult, 1);
Apply(context_, result_register()); context()->Plug(result_register());
} }
#undef __ #undef __

272
deps/v8/src/full-codegen.h

@ -71,10 +71,7 @@ class FullCodeGenerator: public AstVisitor {
info_(NULL), info_(NULL),
nesting_stack_(NULL), nesting_stack_(NULL),
loop_depth_(0), loop_depth_(0),
location_(kStack), context_(NULL) {
true_label_(NULL),
false_label_(NULL),
fall_through_(NULL) {
} }
static Handle<Code> MakeCode(CompilationInfo* info); static Handle<Code> MakeCode(CompilationInfo* info);
@ -232,11 +229,6 @@ class FullCodeGenerator: public AstVisitor {
DISALLOW_COPY_AND_ASSIGN(ForIn); DISALLOW_COPY_AND_ASSIGN(ForIn);
}; };
enum Location {
kAccumulator,
kStack
};
enum ConstantOperand { enum ConstantOperand {
kNoConstants, kNoConstants,
kLeftConstant, kLeftConstant,
@ -262,39 +254,6 @@ class FullCodeGenerator: public AstVisitor {
Expression* left, Expression* left,
Expression* right); Expression* right);
// Emit code to convert a pure value (in a register, slot, as a literal,
// or on top of the stack) into the result expected according to an
// expression context.
void Apply(Expression::Context context, Register reg);
// Slot cannot have type Slot::LOOKUP.
void Apply(Expression::Context context, Slot* slot);
void Apply(Expression::Context context, Literal* lit);
void ApplyTOS(Expression::Context context);
// Emit code to discard count elements from the top of stack, then convert
// a pure value into the result expected according to an expression
// context.
void DropAndApply(int count, Expression::Context context, Register reg);
// Set up branch labels for a test expression.
void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through);
// Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context.
void Apply(Expression::Context context,
Label* materialize_true,
Label* materialize_false);
// Emit code to convert constant control flow (true or false) into
// the result expected according to an expression context.
void Apply(Expression::Context context, bool flag);
// Helper function to convert a pure value into a test context. The value // Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform. // is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details. // See the platform-specific implementation for details.
@ -316,39 +275,26 @@ class FullCodeGenerator: public AstVisitor {
MemOperand EmitSlotSearch(Slot* slot, Register scratch); MemOperand EmitSlotSearch(Slot* slot, Register scratch);
void VisitForEffect(Expression* expr) { void VisitForEffect(Expression* expr) {
Expression::Context saved_context = context_; EffectContext context(this);
context_ = Expression::kEffect;
Visit(expr); Visit(expr);
context_ = saved_context;
} }
void VisitForValue(Expression* expr, Location where) { void VisitForAccumulatorValue(Expression* expr) {
Expression::Context saved_context = context_; AccumulatorValueContext context(this);
Location saved_location = location_; Visit(expr);
context_ = Expression::kValue; }
location_ = where;
void VisitForStackValue(Expression* expr) {
StackValueContext context(this);
Visit(expr); Visit(expr);
context_ = saved_context;
location_ = saved_location;
} }
void VisitForControl(Expression* expr, void VisitForControl(Expression* expr,
Label* if_true, Label* if_true,
Label* if_false, Label* if_false,
Label* fall_through) { Label* fall_through) {
Expression::Context saved_context = context_; TestContext context(this, if_true, if_false, fall_through);
Label* saved_true = true_label_;
Label* saved_false = false_label_;
Label* saved_fall_through = fall_through_;
context_ = Expression::kTest;
true_label_ = if_true;
false_label_ = if_false;
fall_through_ = fall_through;
Visit(expr); Visit(expr);
context_ = saved_context;
true_label_ = saved_true;
false_label_ = saved_false;
fall_through_ = saved_fall_through;
} }
void VisitDeclarations(ZoneList<Declaration*>* declarations); void VisitDeclarations(ZoneList<Declaration*>* declarations);
@ -398,7 +344,7 @@ class FullCodeGenerator: public AstVisitor {
TypeofState typeof_state, TypeofState typeof_state,
Label* slow, Label* slow,
Label* done); Label* done);
void EmitVariableLoad(Variable* expr, Expression::Context context); void EmitVariableLoad(Variable* expr);
// Platform-specific support for allocating a new closure based on // Platform-specific support for allocating a new closure based on
// the given function info. // the given function info.
@ -417,14 +363,12 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top // Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator. // of the stack and the right one in the accumulator.
void EmitBinaryOp(Token::Value op, void EmitBinaryOp(Token::Value op,
Expression::Context context,
OverwriteMode mode); OverwriteMode mode);
// Helper functions for generating inlined smi code for certain // Helper functions for generating inlined smi code for certain
// binary operations. // binary operations.
void EmitInlineSmiBinaryOp(Expression* expr, void EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op, Token::Value op,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
Expression* left, Expression* left,
Expression* right, Expression* right,
@ -432,31 +376,26 @@ class FullCodeGenerator: public AstVisitor {
void EmitConstantSmiBinaryOp(Expression* expr, void EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op, Token::Value op,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
bool left_is_constant_smi, bool left_is_constant_smi,
Smi* value); Smi* value);
void EmitConstantSmiBitOp(Expression* expr, void EmitConstantSmiBitOp(Expression* expr,
Token::Value op, Token::Value op,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
Smi* value); Smi* value);
void EmitConstantSmiShiftOp(Expression* expr, void EmitConstantSmiShiftOp(Expression* expr,
Token::Value op, Token::Value op,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
Smi* value); Smi* value);
void EmitConstantSmiAdd(Expression* expr, void EmitConstantSmiAdd(Expression* expr,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
bool left_is_constant_smi, bool left_is_constant_smi,
Smi* value); Smi* value);
void EmitConstantSmiSub(Expression* expr, void EmitConstantSmiSub(Expression* expr,
Expression::Context context,
OverwriteMode mode, OverwriteMode mode,
bool left_is_constant_smi, bool left_is_constant_smi,
Smi* value); Smi* value);
@ -468,8 +407,7 @@ class FullCodeGenerator: public AstVisitor {
// Complete a variable assignment. The right-hand-side value is expected // Complete a variable assignment. The right-hand-side value is expected
// in the accumulator. // in the accumulator.
void EmitVariableAssignment(Variable* var, void EmitVariableAssignment(Variable* var,
Token::Value op, Token::Value op);
Expression::Context context);
// Complete a named property assignment. The receiver is expected on top // Complete a named property assignment. The receiver is expected on top
// of the stack and the right-hand-side value in the accumulator. // of the stack and the right-hand-side value in the accumulator.
@ -501,6 +439,10 @@ class FullCodeGenerator: public AstVisitor {
MacroAssembler* masm() { return masm_; } MacroAssembler* masm() { return masm_; }
class ExpressionContext;
const ExpressionContext* context() { return context_; }
void set_new_context(const ExpressionContext* context) { context_ = context; }
Handle<Script> script() { return info_->script(); } Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); } bool is_eval() { return info_->is_eval(); }
FunctionLiteral* function() { return info_->function(); } FunctionLiteral* function() { return info_->function(); }
@ -509,6 +451,9 @@ class FullCodeGenerator: public AstVisitor {
static Register result_register(); static Register result_register();
static Register context_register(); static Register context_register();
// Helper for calling an IC stub.
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Set fields in the stack frame. Offsets are the frame pointer relative // Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants. // offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value); void StoreToFrameField(int frame_offset, Register value);
@ -527,13 +472,7 @@ class FullCodeGenerator: public AstVisitor {
// Handles the shortcutted logical binary operations in VisitBinaryOperation. // Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr); void EmitLogicalOperation(BinaryOperation* expr);
void VisitForTypeofValue(Expression* expr, Location where); void VisitForTypeofValue(Expression* expr);
void VisitLogicalForValue(Expression* expr,
Token::Value op,
Location where,
Label* done);
MacroAssembler* masm_; MacroAssembler* masm_;
CompilationInfo* info_; CompilationInfo* info_;
@ -542,11 +481,178 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_; NestedStatement* nesting_stack_;
int loop_depth_; int loop_depth_;
Expression::Context context_; class ExpressionContext {
Location location_; public:
explicit ExpressionContext(FullCodeGenerator* codegen)
: masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
codegen->set_new_context(this);
}
virtual ~ExpressionContext() {
codegen_->set_new_context(old_);
}
// Convert constant control flow (true or false) to the result expected for
// this expression context.
virtual void Plug(bool flag) const = 0;
// Emit code to convert a pure value (in a register, slot, as a literal,
// or on top of the stack) into the result expected according to this
// expression context.
virtual void Plug(Register reg) const = 0;
virtual void Plug(Slot* slot) const = 0;
virtual void Plug(Handle<Object> lit) const = 0;
virtual void Plug(Heap::RootListIndex index) const = 0;
virtual void PlugTOS() const = 0;
// Emit code to convert pure control flow to a pair of unbound labels into
// the result expected according to this expression context. The
// implementation may decide to bind either of the labels.
virtual void Plug(Label* materialize_true,
Label* materialize_false) const = 0;
// Emit code to discard count elements from the top of stack, then convert
// a pure value into the result expected according to this expression
// context.
virtual void DropAndPlug(int count, Register reg) const = 0;
// For shortcutting operations || and &&.
virtual void EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const = 0;
// Set up branch labels for a test expression. The three Label** parameters
// are output parameters.
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const = 0;
// Returns true if we are evaluating only for side effects (ie if the result
// will be discarded.
virtual bool IsEffect() const { return false; }
// Returns true if we are branching on the value rather than materializing
// it.
virtual bool IsTest() const { return false; }
protected:
FullCodeGenerator* codegen() const { return codegen_; }
MacroAssembler* masm() const { return masm_; }
MacroAssembler* masm_;
private:
const ExpressionContext* old_;
FullCodeGenerator* codegen_;
};
class AccumulatorValueContext : public ExpressionContext {
public:
explicit AccumulatorValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
virtual void Plug(Label* materialize_true, Label* materialize_false) const;
virtual void Plug(Slot* slot) const;
virtual void Plug(Handle<Object> lit) const;
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
virtual void EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
};
class StackValueContext : public ExpressionContext {
public:
explicit StackValueContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
virtual void Plug(Label* materialize_true, Label* materialize_false) const;
virtual void Plug(Slot* slot) const;
virtual void Plug(Handle<Object> lit) const;
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
virtual void EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
};
class TestContext : public ExpressionContext {
public:
explicit TestContext(FullCodeGenerator* codegen,
Label* true_label,
Label* false_label,
Label* fall_through)
: ExpressionContext(codegen),
true_label_(true_label),
false_label_(false_label),
fall_through_(fall_through) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
virtual void Plug(Label* materialize_true, Label* materialize_false) const;
virtual void Plug(Slot* slot) const;
virtual void Plug(Handle<Object> lit) const;
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
virtual void EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual bool IsTest() const { return true; }
private:
Label* true_label_; Label* true_label_;
Label* false_label_; Label* false_label_;
Label* fall_through_; Label* fall_through_;
};
class EffectContext : public ExpressionContext {
public:
explicit EffectContext(FullCodeGenerator* codegen)
: ExpressionContext(codegen) { }
virtual void Plug(bool flag) const;
virtual void Plug(Register reg) const;
virtual void Plug(Label* materialize_true, Label* materialize_false) const;
virtual void Plug(Slot* slot) const;
virtual void Plug(Handle<Object> lit) const;
virtual void Plug(Heap::RootListIndex) const;
virtual void PlugTOS() const;
virtual void DropAndPlug(int count, Register reg) const;
virtual void EmitLogicalLeft(BinaryOperation* expr,
Label* eval_right,
Label* done) const;
virtual void PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) const;
virtual bool IsEffect() const { return true; }
};
const ExpressionContext* context_;
friend class NestedStatement; friend class NestedStatement;

8
deps/v8/src/global-handles.cc

@ -486,7 +486,7 @@ void GlobalHandles::PrintStats() {
} }
PrintF("Global Handle Statistics:\n"); PrintF("Global Handle Statistics:\n");
PrintF(" allocated memory = %dB\n", sizeof(Node) * total); PrintF(" allocated memory = %" V8_PTR_PREFIX "dB\n", sizeof(Node) * total);
PrintF(" # weak = %d\n", weak); PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending); PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death); PrintF(" # near_death = %d\n", near_death);
@ -497,8 +497,10 @@ void GlobalHandles::PrintStats() {
void GlobalHandles::Print() { void GlobalHandles::Print() {
PrintF("Global handles:\n"); PrintF("Global handles:\n");
for (Node* current = head_; current != NULL; current = current->next()) { for (Node* current = head_; current != NULL; current = current->next()) {
PrintF(" handle %p to %p (weak=%d)\n", current->handle().location(), PrintF(" handle %p to %p (weak=%d)\n",
*current->handle(), current->state_ == Node::WEAK); reinterpret_cast<void*>(current->handle().location()),
reinterpret_cast<void*>(*current->handle()),
current->state_ == Node::WEAK);
} }
} }

10
deps/v8/src/globals.h

@ -214,6 +214,12 @@ const intptr_t kMapAlignmentBits = kObjectAlignmentBits + 3;
const intptr_t kMapAlignment = (1 << kMapAlignmentBits); const intptr_t kMapAlignment = (1 << kMapAlignmentBits);
const intptr_t kMapAlignmentMask = kMapAlignment - 1; const intptr_t kMapAlignmentMask = kMapAlignment - 1;
// Desired alignment for generated code is 32 bytes (to improve cache line
// utilization).
const int kCodeAlignmentBits = 5;
const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
// Tag information for Failure. // Tag information for Failure.
const int kFailureTag = 3; const int kFailureTag = 3;
const int kFailureTagSize = 2; const int kFailureTagSize = 2;
@ -588,6 +594,10 @@ enum StateTag {
#define MAP_POINTER_ALIGN(value) \ #define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask) (((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// CODE_POINTER_ALIGN returns the value aligned as a generated code segment.
#define CODE_POINTER_ALIGN(value) \
(((value) + kCodeAlignmentMask) & ~kCodeAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset // The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This // of the specified field relative to the containing type. This
// corresponds to 'offsetof' (in stddef.h), except that it doesn't // corresponds to 'offsetof' (in stddef.h), except that it doesn't

37
deps/v8/src/handles.cc

@ -142,6 +142,13 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
void SetExpectedNofProperties(Handle<JSFunction> func, int nof) { void SetExpectedNofProperties(Handle<JSFunction> func, int nof) {
// If objects constructed from this function exist then changing
// 'estimated_nof_properties' is dangerous since the previois value might
// have been compiled into the fast construct stub. More over, the inobject
// slack tracking logic might have adjusted the previous value, so even
// passing the same value is risky.
if (func->shared()->live_objects_may_exist()) return;
func->shared()->set_expected_nof_properties(nof); func->shared()->set_expected_nof_properties(nof);
if (func->has_initial_map()) { if (func->has_initial_map()) {
Handle<Map> new_initial_map = Handle<Map> new_initial_map =
@ -158,16 +165,25 @@ void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value) {
static int ExpectedNofPropertiesFromEstimate(int estimate) { static int ExpectedNofPropertiesFromEstimate(int estimate) {
// TODO(1231235): We need dynamic feedback to estimate the number // If no properties are added in the constructor, they are more likely
// of expected properties in an object. The static hack below // to be added later.
// is barely a solution. if (estimate == 0) estimate = 2;
if (estimate == 0) return 4;
return estimate + 2; // We do not shrink objects that go into a snapshot (yet), so we adjust
// the estimate conservatively.
if (Serializer::enabled()) return estimate + 2;
// Inobject slack tracking will reclaim redundant inobject space later,
// so we can afford to adjust the estimate generously.
return estimate + 6;
} }
void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
int estimate) { int estimate) {
// See the comment in SetExpectedNofProperties.
if (shared->live_objects_may_exist()) return;
shared->set_expected_nof_properties( shared->set_expected_nof_properties(
ExpectedNofPropertiesFromEstimate(estimate)); ExpectedNofPropertiesFromEstimate(estimate));
} }
@ -466,7 +482,8 @@ void InitScriptLineEnds(Handle<Script> script) {
if (!script->source()->IsString()) { if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined()); ASSERT(script->source()->IsUndefined());
script->set_line_ends(*(Factory::NewFixedArray(0))); Handle<FixedArray> empty = Factory::NewFixedArray(0);
script->set_line_ends(*empty);
ASSERT(script->line_ends()->IsFixedArray()); ASSERT(script->line_ends()->IsFixedArray());
return; return;
} }
@ -762,20 +779,19 @@ static bool CompileLazyHelper(CompilationInfo* info,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared, bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
CompilationInfo info(shared); LazySharedCompilationInfo info(shared);
return CompileLazyHelper(&info, flag); return CompileLazyHelper(&info, flag);
} }
bool CompileLazy(Handle<JSFunction> function, bool CompileLazy(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) { if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code()); function->set_code(function->shared()->code());
function->shared()->set_code_age(0); function->shared()->set_code_age(0);
return true; return true;
} else { } else {
CompilationInfo info(function, 0, receiver); LazyFunctionCompilationInfo info(function, 0);
bool result = CompileLazyHelper(&info, flag); bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function)); PROFILE(FunctionCreateEvent(*function));
return result; return result;
@ -784,14 +800,13 @@ bool CompileLazy(Handle<JSFunction> function,
bool CompileLazyInLoop(Handle<JSFunction> function, bool CompileLazyInLoop(Handle<JSFunction> function,
Handle<Object> receiver,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) { if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code()); function->set_code(function->shared()->code());
function->shared()->set_code_age(0); function->shared()->set_code_age(0);
return true; return true;
} else { } else {
CompilationInfo info(function, 1, receiver); LazyFunctionCompilationInfo info(function, 1);
bool result = CompileLazyHelper(&info, flag); bool result = CompileLazyHelper(&info, flag);
PROFILE(FunctionCreateEvent(*function)); PROFILE(FunctionCreateEvent(*function));
return result; return result;

8
deps/v8/src/handles.h

@ -345,13 +345,9 @@ bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
bool CompileLazyShared(Handle<SharedFunctionInfo> shared, bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag); ClearExceptionFlag flag);
bool CompileLazy(Handle<JSFunction> function, bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
Handle<Object> receiver,
ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
Handle<Object> receiver,
ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED { class NoHandleAllocation BASE_EMBEDDED {
public: public:

7
deps/v8/src/heap-inl.h

@ -36,7 +36,7 @@ namespace v8 {
namespace internal { namespace internal {
void Heap::UpdateOldSpaceLimits() { void Heap::UpdateOldSpaceLimits() {
int old_gen_size = PromotedSpaceSize(); intptr_t old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ = old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ = old_gen_allocation_limit_ =
@ -59,6 +59,11 @@ Object* Heap::AllocateSymbol(Vector<const char> str,
} }
Object* Heap::CopyFixedArray(FixedArray* src) {
return CopyFixedArrayWithMap(src, src->map());
}
Object* Heap::AllocateRaw(int size_in_bytes, Object* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space, AllocationSpace space,
AllocationSpace retry_space) { AllocationSpace retry_space) {

197
deps/v8/src/heap.cc

@ -63,8 +63,8 @@ MapSpace* Heap::map_space_ = NULL;
CellSpace* Heap::cell_space_ = NULL; CellSpace* Heap::cell_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL; LargeObjectSpace* Heap::lo_space_ = NULL;
int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit; intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit; intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
int Heap::old_gen_exhausted_ = false; int Heap::old_gen_exhausted_ = false;
@ -75,19 +75,19 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// a multiple of Page::kPageSize. // a multiple of Page::kPageSize.
#if defined(ANDROID) #if defined(ANDROID)
int Heap::max_semispace_size_ = 2*MB; int Heap::max_semispace_size_ = 2*MB;
int Heap::max_old_generation_size_ = 192*MB; intptr_t Heap::max_old_generation_size_ = 192*MB;
int Heap::initial_semispace_size_ = 128*KB; int Heap::initial_semispace_size_ = 128*KB;
size_t Heap::code_range_size_ = 0; intptr_t Heap::code_range_size_ = 0;
#elif defined(V8_TARGET_ARCH_X64) #elif defined(V8_TARGET_ARCH_X64)
int Heap::max_semispace_size_ = 16*MB; int Heap::max_semispace_size_ = 16*MB;
int Heap::max_old_generation_size_ = 1*GB; intptr_t Heap::max_old_generation_size_ = 1*GB;
int Heap::initial_semispace_size_ = 1*MB; int Heap::initial_semispace_size_ = 1*MB;
size_t Heap::code_range_size_ = 512*MB; intptr_t Heap::code_range_size_ = 512*MB;
#else #else
int Heap::max_semispace_size_ = 8*MB; int Heap::max_semispace_size_ = 8*MB;
int Heap::max_old_generation_size_ = 512*MB; intptr_t Heap::max_old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 512*KB; int Heap::initial_semispace_size_ = 512*KB;
size_t Heap::code_range_size_ = 0; intptr_t Heap::code_range_size_ = 0;
#endif #endif
// The snapshot semispace size will be the default semispace size if // The snapshot semispace size will be the default semispace size if
@ -108,7 +108,7 @@ HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Will be 4 * reserved_semispace_size_ to ensure that young // Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size. // generation can be aligned to its size.
int Heap::survived_since_last_expansion_ = 0; int Heap::survived_since_last_expansion_ = 0;
int Heap::external_allocation_limit_ = 0; intptr_t Heap::external_allocation_limit_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC; Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
@ -137,13 +137,13 @@ int Heap::allocation_timeout_ = 0;
bool Heap::disallow_allocation_failure_ = false; bool Heap::disallow_allocation_failure_ = false;
#endif // DEBUG #endif // DEBUG
int GCTracer::alive_after_last_gc_ = 0; intptr_t GCTracer::alive_after_last_gc_ = 0;
double GCTracer::last_gc_end_timestamp_ = 0.0; double GCTracer::last_gc_end_timestamp_ = 0.0;
int GCTracer::max_gc_pause_ = 0; int GCTracer::max_gc_pause_ = 0;
int GCTracer::max_alive_after_gc_ = 0; intptr_t GCTracer::max_alive_after_gc_ = 0;
int GCTracer::min_in_mutator_ = kMaxInt; int GCTracer::min_in_mutator_ = kMaxInt;
int Heap::Capacity() { intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0; if (!HasBeenSetup()) return 0;
return new_space_.Capacity() + return new_space_.Capacity() +
@ -155,7 +155,7 @@ int Heap::Capacity() {
} }
int Heap::CommittedMemory() { intptr_t Heap::CommittedMemory() {
if (!HasBeenSetup()) return 0; if (!HasBeenSetup()) return 0;
return new_space_.CommittedMemory() + return new_space_.CommittedMemory() +
@ -168,7 +168,7 @@ int Heap::CommittedMemory() {
} }
int Heap::Available() { intptr_t Heap::Available() {
if (!HasBeenSetup()) return 0; if (!HasBeenSetup()) return 0;
return new_space_.Available() + return new_space_.Available() +
@ -289,33 +289,46 @@ void Heap::ReportStatisticsBeforeGC() {
#if defined(ENABLE_LOGGING_AND_PROFILING) #if defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::PrintShortHeapStatistics() { void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return; if (!FLAG_trace_gc_verbose) return;
PrintF("Memory allocator, used: %8d, available: %8d\n", PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
MemoryAllocator::Size(), MemoryAllocator::Size(),
MemoryAllocator::Available()); MemoryAllocator::Available());
PrintF("New space, used: %8d, available: %8d\n", PrintF("New space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
Heap::new_space_.Size(), Heap::new_space_.Size(),
new_space_.Available()); new_space_.Available());
PrintF("Old pointers, used: %8d, available: %8d, waste: %8d\n", PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d"
", waste: %8" V8_PTR_PREFIX "d\n",
old_pointer_space_->Size(), old_pointer_space_->Size(),
old_pointer_space_->Available(), old_pointer_space_->Available(),
old_pointer_space_->Waste()); old_pointer_space_->Waste());
PrintF("Old data space, used: %8d, available: %8d, waste: %8d\n", PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d"
", waste: %8" V8_PTR_PREFIX "d\n",
old_data_space_->Size(), old_data_space_->Size(),
old_data_space_->Available(), old_data_space_->Available(),
old_data_space_->Waste()); old_data_space_->Waste());
PrintF("Code space, used: %8d, available: %8d, waste: %8d\n", PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d"
", waste: %8" V8_PTR_PREFIX "d\n",
code_space_->Size(), code_space_->Size(),
code_space_->Available(), code_space_->Available(),
code_space_->Waste()); code_space_->Waste());
PrintF("Map space, used: %8d, available: %8d, waste: %8d\n", PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d"
", waste: %8" V8_PTR_PREFIX "d\n",
map_space_->Size(), map_space_->Size(),
map_space_->Available(), map_space_->Available(),
map_space_->Waste()); map_space_->Waste());
PrintF("Cell space, used: %8d, available: %8d, waste: %8d\n", PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d"
", waste: %8" V8_PTR_PREFIX "d\n",
cell_space_->Size(), cell_space_->Size(),
cell_space_->Available(), cell_space_->Available(),
cell_space_->Waste()); cell_space_->Waste());
PrintF("Large object space, used: %8d, avaialble: %8d\n", PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
", available: %8" V8_PTR_PREFIX "d\n",
lo_space_->Size(), lo_space_->Size(),
lo_space_->Available()); lo_space_->Available());
} }
@ -364,8 +377,8 @@ void Heap::GarbageCollectionPrologue() {
#endif #endif
} }
int Heap::SizeOfObjects() { intptr_t Heap::SizeOfObjects() {
int total = 0; intptr_t total = 0;
AllSpaces spaces; AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Size(); total += space->Size();
@ -388,7 +401,7 @@ void Heap::GarbageCollectionEpilogue() {
if (FLAG_code_stats) ReportCodeStatistics("After GC"); if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif #endif
Counters::alive_after_last_gc.Set(SizeOfObjects()); Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Counters::symbol_table_capacity.Set(symbol_table()->Capacity()); Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
@ -690,7 +703,7 @@ void Heap::PerformGarbageCollection(GarbageCollector collector,
EnsureFromSpaceIsCommitted(); EnsureFromSpaceIsCommitted();
int start_new_space_size = Heap::new_space()->Size(); int start_new_space_size = Heap::new_space()->SizeAsInt();
if (collector == MARK_COMPACTOR) { if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
@ -962,7 +975,7 @@ void Heap::Scavenge() {
DescriptorLookupCache::Clear(); DescriptorLookupCache::Clear();
// Used for updating survived_since_last_expansion_ at function end. // Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize(); intptr_t survived_watermark = PromotedSpaceSize();
CheckNewSpaceExpansionCriteria(); CheckNewSpaceExpansionCriteria();
@ -1032,8 +1045,8 @@ void Heap::Scavenge() {
new_space_.set_age_mark(new_space_.top()); new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge. // Update how much has survived scavenge.
IncrementYoungSurvivorsCounter( IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()); (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
LOG(ResourceEvent("scavenge", "end")); LOG(ResourceEvent("scavenge", "end"));
@ -1218,7 +1231,14 @@ class ScavengingVisitor : public StaticVisitorBase {
RecordCopiedObject(target); RecordCopiedObject(target);
#endif #endif
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address())); HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
if (target->IsJSFunction()) {
PROFILE(FunctionMoveEvent(source->address(), target->address()));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target), source));
}
}
#endif
return target; return target;
} }
@ -2068,6 +2088,7 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_debug_info(undefined_value()); share->set_debug_info(undefined_value());
share->set_inferred_name(empty_string()); share->set_inferred_name(empty_string());
share->set_compiler_hints(0); share->set_compiler_hints(0);
share->set_initial_map(undefined_value());
share->set_this_property_assignments_count(0); share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value()); share->set_this_property_assignments(undefined_value());
share->set_num_literals(0); share->set_num_literals(0);
@ -2436,7 +2457,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// Compute size // Compute size
int body_size = RoundUp(desc.instr_size, kObjectAlignment); int body_size = RoundUp(desc.instr_size, kObjectAlignment);
int obj_size = Code::SizeFor(body_size); int obj_size = Code::SizeFor(body_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment)); ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
Object* result; Object* result;
if (obj_size > MaxObjectSizeInPagedSpace()) { if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size); result = lo_space_->AllocateRawCode(obj_size);
@ -2650,6 +2671,20 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
} }
static bool HasDuplicates(DescriptorArray* descriptors) {
int count = descriptors->number_of_descriptors();
if (count > 1) {
String* prev_key = descriptors->GetKey(0);
for (int i = 1; i != count; i++) {
String* current_key = descriptors->GetKey(i);
if (prev_key == current_key) return true;
prev_key = current_key;
}
}
return false;
}
Object* Heap::AllocateInitialMap(JSFunction* fun) { Object* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map()); ASSERT(!fun->has_initial_map());
@ -2683,8 +2718,9 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
if (fun->shared()->CanGenerateInlineConstructor(prototype)) { if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count(); int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) { if (count > in_object_properties) {
count = in_object_properties; // Inline constructor can only handle inobject properties.
} fun->shared()->ForbidInlineConstructor();
} else {
Object* descriptors_obj = DescriptorArray::Allocate(count); Object* descriptors_obj = DescriptorArray::Allocate(count);
if (descriptors_obj->IsFailure()) return descriptors_obj; if (descriptors_obj->IsFailure()) return descriptors_obj;
DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj); DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
@ -2696,11 +2732,24 @@ Object* Heap::AllocateInitialMap(JSFunction* fun) {
descriptors->Set(i, &field); descriptors->Set(i, &field);
} }
descriptors->SetNextEnumerationIndex(count); descriptors->SetNextEnumerationIndex(count);
descriptors->Sort(); descriptors->SortUnchecked();
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
// quadratic time). Once the descriptors are sorted we can check for
// duplicates in linear time.
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
map->set_instance_descriptors(descriptors); map->set_instance_descriptors(descriptors);
map->set_pre_allocated_property_fields(count); map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count); map->set_unused_property_fields(in_object_properties - count);
} }
}
}
fun->shared()->StartInobjectSlackTracking(map);
return map; return map;
} }
@ -2717,7 +2766,20 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj,
// fixed array (eg, Heap::empty_fixed_array()). Currently, the object // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See // verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify). // for example, JSArray::JSArrayVerify).
obj->InitializeBody(map->instance_size()); Object* filler;
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
// with undefined_value.
if (map->constructor()->IsJSFunction() &&
JSFunction::cast(map->constructor())->shared()->
IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
ASSERT(obj->GetInternalFieldCount() == 0);
filler = Heap::one_pointer_filler_map();
} else {
filler = Heap::undefined_value();
}
obj->InitializeBody(map->instance_size(), filler);
} }
@ -2900,19 +2962,13 @@ Object* Heap::CopyJSObject(JSObject* source) {
Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor, Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
JSGlobalProxy* object) { JSGlobalProxy* object) {
// Allocate initial map if absent. ASSERT(constructor->has_initial_map());
if (!constructor->has_initial_map()) {
Object* initial_map = AllocateInitialMap(constructor);
if (initial_map->IsFailure()) return initial_map;
constructor->set_initial_map(Map::cast(initial_map));
Map::cast(initial_map)->set_constructor(constructor);
}
Map* map = constructor->initial_map(); Map* map = constructor->initial_map();
// Check that the already allocated object has the same size as // Check that the already allocated object has the same size and type as
// objects allocated using the constructor. // objects allocated using the constructor.
ASSERT(map->instance_size() == object->map()->instance_size()); ASSERT(map->instance_size() == object->map()->instance_size());
ASSERT(map->instance_type() == object->map()->instance_type());
// Allocate the backing storage for the properties. // Allocate the backing storage for the properties.
int prop_size = map->unused_property_fields() - map->inobject_properties(); int prop_size = map->unused_property_fields() - map->inobject_properties();
@ -3159,6 +3215,7 @@ Object* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) { if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException(); return Failure::OutOfMemoryException();
} }
ASSERT(length > 0);
// Use the general function if we're forced to always allocate. // Use the general function if we're forced to always allocate.
if (always_allocate()) return AllocateFixedArray(length, TENURED); if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array. // Allocate the raw data for a fixed array.
@ -3169,16 +3226,19 @@ Object* Heap::AllocateRawFixedArray(int length) {
} }
Object* Heap::CopyFixedArray(FixedArray* src) { Object* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length(); int len = src->length();
Object* obj = AllocateRawFixedArray(len); Object* obj = AllocateRawFixedArray(len);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) { if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj); HeapObject* dst = HeapObject::cast(obj);
CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len)); dst->set_map(map);
CopyBlock(dst->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj; return obj;
} }
HeapObject::cast(obj)->set_map(src->map()); HeapObject::cast(obj)->set_map(map);
FixedArray* result = FixedArray::cast(obj); FixedArray* result = FixedArray::cast(obj);
result->set_length(len); result->set_length(len);
@ -3449,8 +3509,10 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_); title, gc_count_);
PrintF("mark-compact GC : %d\n", mc_count_); PrintF("mark-compact GC : %d\n", mc_count_);
PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_); PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_); old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
PrintF("\n"); PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@ -4022,15 +4084,16 @@ bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
bool Heap::ConfigureHeapDefault() { bool Heap::ConfigureHeapDefault() {
return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size); return ConfigureHeap(
FLAG_max_new_space_size * (KB / 2), FLAG_max_old_space_size * MB);
} }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker; *stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = HeapStats::kEndMarker; *stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.Size(); *stats->new_space_size = new_space_.SizeAsInt();
*stats->new_space_capacity = new_space_.Capacity(); *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
*stats->old_pointer_space_size = old_pointer_space_->Size(); *stats->old_pointer_space_size = old_pointer_space_->Size();
*stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
*stats->old_data_space_size = old_data_space_->Size(); *stats->old_data_space_size = old_data_space_->Size();
@ -4064,7 +4127,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
} }
int Heap::PromotedSpaceSize() { intptr_t Heap::PromotedSpaceSize() {
return old_pointer_space_->Size() return old_pointer_space_->Size()
+ old_data_space_->Size() + old_data_space_->Size()
+ code_space_->Size() + code_space_->Size()
@ -4175,8 +4238,8 @@ bool Heap::Setup(bool create_heap_objects) {
if (!CreateInitialObjects()) return false; if (!CreateInitialObjects()) return false;
} }
LOG(IntEvent("heap-capacity", Capacity())); LOG(IntPtrTEvent("heap-capacity", Capacity()));
LOG(IntEvent("heap-available", Available())); LOG(IntPtrTEvent("heap-available", Available()));
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
// This should be called only after initial objects have been created. // This should be called only after initial objects have been created.
@ -4210,7 +4273,8 @@ void Heap::TearDown() {
PrintF("mark_compact_count=%d ", mc_count_); PrintF("mark_compact_count=%d ", mc_count_);
PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause()); PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator()); PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc()); PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
GCTracer::get_max_alive_after_gc());
PrintF("\n\n"); PrintF("\n\n");
} }
@ -4336,7 +4400,9 @@ class PrintHandleVisitor: public ObjectVisitor {
public: public:
void VisitPointers(Object** start, Object** end) { void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", p, *p); PrintF(" handle %p to %p\n",
reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
} }
}; };
@ -4689,8 +4755,8 @@ void Heap::TracePathToGlobal() {
#endif #endif
static int CountTotalHolesSize() { static intptr_t CountTotalHolesSize() {
int holes_size = 0; intptr_t holes_size = 0;
OldSpaces spaces; OldSpaces spaces;
for (OldSpace* space = spaces.next(); for (OldSpace* space = spaces.next();
space != NULL; space != NULL;
@ -4788,13 +4854,14 @@ GCTracer::~GCTracer() {
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE])); PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT])); PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("total_size_before=%d ", start_size_); PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects()); PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_); PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
PrintF("holes_size_after=%d ", CountTotalHolesSize()); in_free_list_or_wasted_before_gc_);
PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
PrintF("allocated=%d ", allocated_since_last_gc_); PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%d ", promoted_objects_size_); PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
PrintF("\n"); PrintF("\n");
} }

75
deps/v8/src/heap.h

@ -245,31 +245,31 @@ class Heap : public AllStatic {
// semi space. The young generation consists of two semi spaces and // semi space. The young generation consists of two semi spaces and
// we reserve twice the amount needed for those in order to ensure // we reserve twice the amount needed for those in order to ensure
// that new space can be aligned to its size. // that new space can be aligned to its size.
static int MaxReserved() { static intptr_t MaxReserved() {
return 4 * reserved_semispace_size_ + max_old_generation_size_; return 4 * reserved_semispace_size_ + max_old_generation_size_;
} }
static int MaxSemiSpaceSize() { return max_semispace_size_; } static int MaxSemiSpaceSize() { return max_semispace_size_; }
static int ReservedSemiSpaceSize() { return reserved_semispace_size_; } static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
static int InitialSemiSpaceSize() { return initial_semispace_size_; } static int InitialSemiSpaceSize() { return initial_semispace_size_; }
static int MaxOldGenerationSize() { return max_old_generation_size_; } static intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when // Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit. // more spaces are needed until it reaches the limit.
static int Capacity(); static intptr_t Capacity();
// Returns the amount of memory currently committed for the heap. // Returns the amount of memory currently committed for the heap.
static int CommittedMemory(); static intptr_t CommittedMemory();
// Returns the available bytes in space w/o growing. // Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires // Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead. // all available bytes. Check MaxHeapObjectSize() instead.
static int Available(); static intptr_t Available();
// Returns the maximum object size in paged space. // Returns the maximum object size in paged space.
static inline int MaxObjectSizeInPagedSpace(); static inline int MaxObjectSizeInPagedSpace();
// Returns of size of all objects residing in the heap. // Returns of size of all objects residing in the heap.
static int SizeOfObjects(); static intptr_t SizeOfObjects();
// Return the starting address and a mask for the new space. And-masking an // Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space // address with the mask will result in the start address of the new space
@ -498,7 +498,12 @@ class Heap : public AllStatic {
// Make a copy of src and return it. Returns // Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src); MUST_USE_RESULT static inline Object* CopyFixedArray(FixedArray* src);
// Make a copy of src, set the map, and return the copy. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT static Object* CopyFixedArrayWithMap(FixedArray* src,
Map* map);
// Allocates a fixed array initialized with the hole values. // Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@ -1064,8 +1069,8 @@ class Heap : public AllStatic {
static int reserved_semispace_size_; static int reserved_semispace_size_;
static int max_semispace_size_; static int max_semispace_size_;
static int initial_semispace_size_; static int initial_semispace_size_;
static int max_old_generation_size_; static intptr_t max_old_generation_size_;
static size_t code_range_size_; static intptr_t code_range_size_;
// For keeping track of how much data has survived // For keeping track of how much data has survived
// scavenge since last new space expansion. // scavenge since last new space expansion.
@ -1093,7 +1098,7 @@ class Heap : public AllStatic {
static HeapState gc_state_; static HeapState gc_state_;
// Returns the size of object residing in non new spaces. // Returns the size of object residing in non new spaces.
static int PromotedSpaceSize(); static intptr_t PromotedSpaceSize();
// Returns the amount of external memory registered since last global gc. // Returns the amount of external memory registered since last global gc.
static int PromotedExternalMemorySize(); static int PromotedExternalMemorySize();
@ -1128,16 +1133,16 @@ class Heap : public AllStatic {
// Limit that triggers a global GC on the next (normally caused) GC. This // Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine // is checked when we have already decided to do a GC to help determine
// which collector to invoke. // which collector to invoke.
static int old_gen_promotion_limit_; static intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is // Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on // checked before expanding a paged space in the old generation and on
// every allocation in large object space. // every allocation in large object space.
static int old_gen_allocation_limit_; static intptr_t old_gen_allocation_limit_;
// Limit on the amount of externally allocated memory allowed // Limit on the amount of externally allocated memory allowed
// between global GCs. If reached a global GC is forced. // between global GCs. If reached a global GC is forced.
static int external_allocation_limit_; static intptr_t external_allocation_limit_;
// The amount of external memory registered through the API kept alive // The amount of external memory registered through the API kept alive
// by global handles // by global handles
@ -1226,8 +1231,8 @@ class Heap : public AllStatic {
GCTracer* tracer, GCTracer* tracer,
CollectionPolicy collectionPolicy); CollectionPolicy collectionPolicy);
static const int kMinimumPromotionLimit = 2 * MB; static const intptr_t kMinimumPromotionLimit = 2 * MB;
static const int kMinimumAllocationLimit = 8 * MB; static const intptr_t kMinimumAllocationLimit = 8 * MB;
inline static void UpdateOldSpaceLimits(); inline static void UpdateOldSpaceLimits();
@ -1380,24 +1385,24 @@ class HeapStats {
int* start_marker; // 0 int* start_marker; // 0
int* new_space_size; // 1 int* new_space_size; // 1
int* new_space_capacity; // 2 int* new_space_capacity; // 2
int* old_pointer_space_size; // 3 intptr_t* old_pointer_space_size; // 3
int* old_pointer_space_capacity; // 4 intptr_t* old_pointer_space_capacity; // 4
int* old_data_space_size; // 5 intptr_t* old_data_space_size; // 5
int* old_data_space_capacity; // 6 intptr_t* old_data_space_capacity; // 6
int* code_space_size; // 7 intptr_t* code_space_size; // 7
int* code_space_capacity; // 8 intptr_t* code_space_capacity; // 8
int* map_space_size; // 9 intptr_t* map_space_size; // 9
int* map_space_capacity; // 10 intptr_t* map_space_capacity; // 10
int* cell_space_size; // 11 intptr_t* cell_space_size; // 11
int* cell_space_capacity; // 12 intptr_t* cell_space_capacity; // 12
int* lo_space_size; // 13 intptr_t* lo_space_size; // 13
int* global_handle_count; // 14 int* global_handle_count; // 14
int* weak_global_handle_count; // 15 int* weak_global_handle_count; // 15
int* pending_global_handle_count; // 16 int* pending_global_handle_count; // 16
int* near_death_global_handle_count; // 17 int* near_death_global_handle_count; // 17
int* destroyed_global_handle_count; // 18 int* destroyed_global_handle_count; // 18
int* memory_allocator_size; // 19 intptr_t* memory_allocator_size; // 19
int* memory_allocator_capacity; // 20 intptr_t* memory_allocator_capacity; // 20
int* objects_per_type; // 21 int* objects_per_type; // 21
int* size_per_type; // 22 int* size_per_type; // 22
int* os_error; // 23 int* os_error; // 23
@ -1832,7 +1837,7 @@ class GCTracer BASE_EMBEDDED {
static int get_max_gc_pause() { return max_gc_pause_; } static int get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC. // Returns maximum size of objects alive after GC.
static int get_max_alive_after_gc() { return max_alive_after_gc_; } static intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections. // Returns minimal interval between two subsequent collections.
static int get_min_in_mutator() { return min_in_mutator_; } static int get_min_in_mutator() { return min_in_mutator_; }
@ -1847,7 +1852,7 @@ class GCTracer BASE_EMBEDDED {
} }
double start_time_; // Timestamp set in the constructor. double start_time_; // Timestamp set in the constructor.
int start_size_; // Size of objects in heap set in constructor. intptr_t start_size_; // Size of objects in heap set in constructor.
GarbageCollector collector_; // Type of collector. GarbageCollector collector_; // Type of collector.
// A count (including this one, eg, the first collection is 1) of the // A count (including this one, eg, the first collection is 1) of the
@ -1879,30 +1884,30 @@ class GCTracer BASE_EMBEDDED {
// Total amount of space either wasted or contained in one of free lists // Total amount of space either wasted or contained in one of free lists
// before the current GC. // before the current GC.
int in_free_list_or_wasted_before_gc_; intptr_t in_free_list_or_wasted_before_gc_;
// Difference between space used in the heap at the beginning of the current // Difference between space used in the heap at the beginning of the current
// collection and the end of the previous collection. // collection and the end of the previous collection.
int allocated_since_last_gc_; intptr_t allocated_since_last_gc_;
// Amount of time spent in mutator that is time elapsed between end of the // Amount of time spent in mutator that is time elapsed between end of the
// previous collection and the beginning of the current one. // previous collection and the beginning of the current one.
double spent_in_mutator_; double spent_in_mutator_;
// Size of objects promoted during the current collection. // Size of objects promoted during the current collection.
int promoted_objects_size_; intptr_t promoted_objects_size_;
// Maximum GC pause. // Maximum GC pause.
static int max_gc_pause_; static int max_gc_pause_;
// Maximum size of objects alive after GC. // Maximum size of objects alive after GC.
static int max_alive_after_gc_; static intptr_t max_alive_after_gc_;
// Minimal interval between two subsequent collections. // Minimal interval between two subsequent collections.
static int min_in_mutator_; static int min_in_mutator_;
// Size of objects alive after last GC. // Size of objects alive after last GC.
static int alive_after_last_gc_; static intptr_t alive_after_last_gc_;
static double last_gc_end_timestamp_; static double last_gc_end_timestamp_;
}; };

133
deps/v8/src/ia32/assembler-ia32.cc

@ -993,6 +993,14 @@ void Assembler::dec_b(Register dst) {
} }
void Assembler::dec_b(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xFE);
emit_operand(ecx, dst);
}
void Assembler::dec(Register dst) { void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1511,32 +1519,6 @@ void Assembler::bind_to(Label* L, int pos) {
} }
void Assembler::link_to(Label* L, Label* appendix) {
EnsureSpace ensure_space(this);
last_pc_ = NULL;
if (appendix->is_linked()) {
if (L->is_linked()) {
// Append appendix to L's list.
Label p;
Label q = *L;
do {
p = q;
Displacement disp = disp_at(&q);
disp.next(&q);
} while (q.is_linked());
Displacement disp = disp_at(&p);
disp.link_to(appendix);
disp_at_put(&p, disp);
p.Unuse(); // to avoid assertion failure in ~Label
} else {
// L is empty, simply use appendix.
*L = *appendix;
}
}
appendix->Unuse(); // appendix should not be used anymore
}
void Assembler::bind(Label* L) { void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = NULL; last_pc_ = NULL;
@ -1545,6 +1527,19 @@ void Assembler::bind(Label* L) {
} }
void Assembler::bind(NearLabel* L) {
ASSERT(!L->is_bound());
last_pc_ = NULL;
while (L->unresolved_branches_ > 0) {
int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
int disp = pc_offset() - branch_pos;
ASSERT(is_int8(disp));
set_byte_at(branch_pos - sizeof(int8_t), disp);
L->unresolved_branches_--;
}
L->bind_to(pc_offset());
}
void Assembler::call(Label* L) { void Assembler::call(Label* L) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1641,6 +1636,24 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
} }
void Assembler::jmp(NearLabel* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 1110 1011 #8-bit disp.
EMIT(0xEB);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0xEB);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
void Assembler::j(Condition cc, Label* L, Hint hint) { void Assembler::j(Condition cc, Label* L, Hint hint) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -1696,6 +1709,27 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
} }
void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
if (FLAG_emit_branch_hints && hint != no_hint) EMIT(hint);
if (L->is_bound()) {
const int short_size = 2;
int offs = L->pos() - pc_offset();
ASSERT(offs <= 0);
ASSERT(is_int8(offs - short_size));
// 0111 tttn #8-bit disp
EMIT(0x70 | cc);
EMIT((offs - short_size) & 0xFF);
} else {
EMIT(0x70 | cc);
EMIT(0x00); // The displacement will be resolved later.
L->link_to(pc_offset());
}
}
// FPU instructions. // FPU instructions.
void Assembler::fld(int i) { void Assembler::fld(int i) {
@ -2179,6 +2213,16 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x54);
emit_sse_operand(dst, src);
}
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2)); ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -2201,6 +2245,28 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
} }
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
EMIT(0x0F);
EMIT(0xC2);
emit_sse_operand(dst, src);
EMIT(1); // LT == 1
}
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
EMIT(0x28);
emit_sse_operand(dst, src);
}
void Assembler::movdqa(const Operand& dst, XMMRegister src) { void Assembler::movdqa(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2)); ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -2348,7 +2414,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
void Assembler::ptest(XMMRegister dst, XMMRegister src) { void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2)); ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
EMIT(0x66); EMIT(0x66);
@ -2358,6 +2424,19 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src); emit_sse_operand(dst, src);
} }
void Assembler::psllq(XMMRegister reg, int8_t imm8) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x73);
emit_sse_operand(esi, reg); // esi == 6
EMIT(imm8);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() }; Register ireg = { reg.code() };
emit_operand(ireg, adr); emit_operand(ireg, adr);

19
deps/v8/src/ia32/assembler-ia32.h

@ -376,6 +376,7 @@ class CpuFeatures : public AllStatic {
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false; if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false; if (f == CMOV && !FLAG_enable_cmov) return false;
if (f == RDTSC && !FLAG_enable_rdtsc) return false; if (f == RDTSC && !FLAG_enable_rdtsc) return false;
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
@ -595,6 +596,7 @@ class Assembler : public Malloced {
void cmp(const Operand& op, Handle<Object> handle); void cmp(const Operand& op, Handle<Object> handle);
void dec_b(Register dst); void dec_b(Register dst);
void dec_b(const Operand& dst);
void dec(Register dst); void dec(Register dst);
void dec(const Operand& dst); void dec(const Operand& dst);
@ -687,6 +689,7 @@ class Assembler : public Malloced {
// but it may be bound only once. // but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position void bind(Label* L); // binds an unbound label L to the current code position
void bind(NearLabel* L);
// Calls // Calls
void call(Label* L); void call(Label* L);
@ -701,11 +704,17 @@ class Assembler : public Malloced {
void jmp(const Operand& adr); void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode); void jmp(Handle<Code> code, RelocInfo::Mode rmode);
// Short jump
void jmp(NearLabel* L);
// Conditional jumps // Conditional jumps
void j(Condition cc, Label* L, Hint hint = no_hint); void j(Condition cc, Label* L, Hint hint = no_hint);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint); void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
void j(Condition cc, Handle<Code> code, Hint hint = no_hint); void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
// Conditional short jump
void j(Condition cc, NearLabel* L, Hint hint = no_hint);
// Floating-point operations // Floating-point operations
void fld(int i); void fld(int i);
void fstp(int i); void fstp(int i);
@ -788,9 +797,15 @@ class Assembler : public Malloced {
void xorpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src); void movmskpd(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void movaps(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src); void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src); void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src); void movdqu(XMMRegister dst, const Operand& src);
@ -806,6 +821,8 @@ class Assembler : public Malloced {
void pxor(XMMRegister dst, XMMRegister src); void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src); void ptest(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t imm8);
// Parallel XMM operations. // Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst); void movntdqa(XMMRegister src, const Operand& dst);
void movntdq(const Operand& dst, XMMRegister src); void movntdq(const Operand& dst, XMMRegister src);
@ -868,6 +885,7 @@ class Assembler : public Malloced {
private: private:
byte* addr_at(int pos) { return buffer_ + pos; } byte* addr_at(int pos) { return buffer_ + pos; }
byte byte_at(int pos) { return buffer_[pos]; } byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
uint32_t long_at(int pos) { uint32_t long_at(int pos) {
return *reinterpret_cast<uint32_t*>(addr_at(pos)); return *reinterpret_cast<uint32_t*>(addr_at(pos));
} }
@ -902,7 +920,6 @@ class Assembler : public Malloced {
// labels // labels
void print(Label* L); void print(Label* L);
void bind_to(Label* L, int pos); void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
// displacements // displacements
inline Displacement disp_at(Label* L); inline Displacement disp_at(Label* L);

40
deps/v8/src/ia32/builtins-ia32.cc

@ -105,7 +105,11 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm, static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function) { bool is_api_function,
bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -148,6 +152,26 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpInstanceType(eax, JS_FUNCTION_TYPE); __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
__ j(equal, &rt_call); __ j(equal, &rt_call);
if (count_constructions) {
Label allocate;
// Decrease generous allocation count.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
__ push(eax);
__ push(edi);
__ push(edi); // constructor
// The call will replace the stub, so the countdown is only done once.
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
__ pop(edi);
__ pop(eax);
__ bind(&allocate);
}
// Now allocate the JSObject on the heap. // Now allocate the JSObject on the heap.
// edi: constructor // edi: constructor
// eax: initial map // eax: initial map
@ -167,7 +191,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// ebx: JSObject // ebx: JSObject
// edi: start of next object // edi: start of next object
{ Label loop, entry; { Label loop, entry;
// To allow for truncation.
if (count_constructions) {
__ mov(edx, Factory::one_pointer_filler_map());
} else {
__ mov(edx, Factory::undefined_value()); __ mov(edx, Factory::undefined_value());
}
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize)); __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
__ jmp(&entry); __ jmp(&entry);
__ bind(&loop); __ bind(&loop);
@ -351,13 +380,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
} }
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false); Generate_JSConstructStubHelper(masm, false, false);
} }
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, true); Generate_JSConstructStubHelper(masm, true, false);
} }

102
deps/v8/src/ia32/code-stubs-ia32.cc

@ -208,7 +208,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). // NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
void ToBooleanStub::Generate(MacroAssembler* masm) { void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string; NearLabel false_result, true_result, not_string;
__ mov(eax, Operand(esp, 1 * kPointerSize)); __ mov(eax, Operand(esp, 1 * kPointerSize));
// 'null' => false. // 'null' => false.
@ -966,7 +966,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&non_smi_result); __ bind(&non_smi_result);
// Allocate a heap number if needed. // Allocate a heap number if needed.
__ mov(ebx, Operand(eax)); // ebx: result __ mov(ebx, Operand(eax)); // ebx: result
Label skip_allocation; NearLabel skip_allocation;
switch (mode_) { switch (mode_) {
case OVERWRITE_LEFT: case OVERWRITE_LEFT:
case OVERWRITE_RIGHT: case OVERWRITE_RIGHT:
@ -1036,7 +1036,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
// Test if left operand is a string. // Test if left operand is a string.
Label lhs_not_string; NearLabel lhs_not_string;
__ test(lhs, Immediate(kSmiTagMask)); __ test(lhs, Immediate(kSmiTagMask));
__ j(zero, &lhs_not_string); __ j(zero, &lhs_not_string);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
@ -1045,7 +1045,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
__ TailCallStub(&string_add_left_stub); __ TailCallStub(&string_add_left_stub);
Label call_runtime_with_args; NearLabel call_runtime_with_args;
// Left operand is not a string, test right. // Left operand is not a string, test right.
__ bind(&lhs_not_string); __ bind(&lhs_not_string);
__ test(rhs, Immediate(kSmiTagMask)); __ test(rhs, Immediate(kSmiTagMask));
@ -1221,8 +1221,8 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Test that eax is a number. // Test that eax is a number.
Label runtime_call; Label runtime_call;
Label runtime_call_clear_stack; Label runtime_call_clear_stack;
Label input_not_smi; NearLabel input_not_smi;
Label loaded; NearLabel loaded;
__ mov(eax, Operand(esp, kPointerSize)); __ mov(eax, Operand(esp, kPointerSize));
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &input_not_smi); __ j(not_zero, &input_not_smi);
@ -1295,7 +1295,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ lea(ecx, Operand(ecx, ecx, times_2, 0)); __ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ lea(ecx, Operand(eax, ecx, times_4, 0)); __ lea(ecx, Operand(eax, ecx, times_4, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array. // Check if cache matches: Double value is stored in uint32_t[2] array.
Label cache_miss; NearLabel cache_miss;
__ cmp(ebx, Operand(ecx, 0)); __ cmp(ebx, Operand(ecx, 0));
__ j(not_equal, &cache_miss); __ j(not_equal, &cache_miss);
__ cmp(edx, Operand(ecx, kIntSize)); __ cmp(edx, Operand(ecx, kIntSize));
@ -1338,7 +1338,7 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Only free register is edi. // Only free register is edi.
Label done; NearLabel done;
ASSERT(type_ == TranscendentalCache::SIN || ASSERT(type_ == TranscendentalCache::SIN ||
type_ == TranscendentalCache::COS); type_ == TranscendentalCache::COS);
// More transcendental types can be added later. // More transcendental types can be added later.
@ -1346,7 +1346,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Both fsin and fcos require arguments in the range +/-2^63 and // Both fsin and fcos require arguments in the range +/-2^63 and
// return NaN for infinities and NaN. They can share all code except // return NaN for infinities and NaN. They can share all code except
// the actual fsin/fcos operation. // the actual fsin/fcos operation.
Label in_range; NearLabel in_range;
// If argument is outside the range -2^63..2^63, fsin/cos doesn't // If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range. // work. We must reduce it to the appropriate range.
__ mov(edi, edx); __ mov(edi, edx);
@ -1357,7 +1357,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ j(below, &in_range, taken); __ j(below, &in_range, taken);
// Check for infinity and NaN. Both return NaN for sin. // Check for infinity and NaN. Both return NaN for sin.
__ cmp(Operand(edi), Immediate(0x7ff00000)); __ cmp(Operand(edi), Immediate(0x7ff00000));
Label non_nan_result; NearLabel non_nan_result;
__ j(not_equal, &non_nan_result, taken); __ j(not_equal, &non_nan_result, taken);
// Input is +/-Infinity or NaN. Result is NaN. // Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0); __ fstp(0);
@ -1377,7 +1377,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
__ fld(1); __ fld(1);
// FPU Stack: input, 2*pi, input. // FPU Stack: input, 2*pi, input.
{ {
Label no_exceptions; NearLabel no_exceptions;
__ fwait(); __ fwait();
__ fnstsw_ax(); __ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set. // Clear if Illegal Operand or Zero Division exceptions are set.
@ -1389,7 +1389,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
// Compute st(0) % st(1) // Compute st(0) % st(1)
{ {
Label partial_remainder_loop; NearLabel partial_remainder_loop;
__ bind(&partial_remainder_loop); __ bind(&partial_remainder_loop);
__ fprem1(); __ fprem1();
__ fwait(); __ fwait();
@ -1552,7 +1552,7 @@ void IntegerConvert(MacroAssembler* masm,
__ shr_cl(scratch2); __ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and // Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign. // we may need to fix the sign.
Label negative; NearLabel negative;
__ xor_(ecx, Operand(ecx)); __ xor_(ecx, Operand(ecx));
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative); __ j(greater, &negative);
@ -1702,7 +1702,7 @@ void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) { Register number) {
Label load_smi, done; NearLabel load_smi, done;
__ test(number, Immediate(kSmiTagMask)); __ test(number, Immediate(kSmiTagMask));
__ j(zero, &load_smi, not_taken); __ j(zero, &load_smi, not_taken);
@ -1720,7 +1720,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
Label load_smi_edx, load_eax, load_smi_eax, done; NearLabel load_smi_edx, load_eax, load_smi_eax, done;
// Load operand in edx into xmm0. // Load operand in edx into xmm0.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
@ -1750,7 +1750,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
Label* not_numbers) { Label* not_numbers) {
Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
// Load operand in edx into xmm0, or branch to not_numbers. // Load operand in edx into xmm0, or branch to not_numbers.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
@ -1798,7 +1798,7 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register scratch, Register scratch,
ArgLocation arg_location) { ArgLocation arg_location) {
Label load_smi_1, load_smi_2, done_load_1, done; NearLabel load_smi_1, load_smi_2, done_load_1, done;
if (arg_location == ARGS_IN_REGISTERS) { if (arg_location == ARGS_IN_REGISTERS) {
__ mov(scratch, edx); __ mov(scratch, edx);
} else { } else {
@ -1857,7 +1857,7 @@ void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float, Label* non_float,
Register scratch) { Register scratch) {
Label test_other, done; NearLabel test_other, done;
// Test if both operands are floats or smi -> scratch=k_is_float; // Test if both operands are floats or smi -> scratch=k_is_float;
// Otherwise scratch = k_not_float. // Otherwise scratch = k_not_float.
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
@ -1884,7 +1884,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
if (op_ == Token::SUB) { if (op_ == Token::SUB) {
if (include_smi_code_) { if (include_smi_code_) {
// Check whether the value is a smi. // Check whether the value is a smi.
Label try_float; NearLabel try_float;
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &try_float, not_taken); __ j(not_zero, &try_float, not_taken);
@ -1953,7 +1953,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
&slow); &slow);
// Do the bitwise operation and check if the result fits in a smi. // Do the bitwise operation and check if the result fits in a smi.
Label try_float; NearLabel try_float;
__ not_(ecx); __ not_(ecx);
__ cmp(ecx, 0xc0000000); __ cmp(ecx, 0xc0000000);
__ j(sign, &try_float, not_taken); __ j(sign, &try_float, not_taken);
@ -2026,7 +2026,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Check if the calling frame is an arguments adaptor frame. // Check if the calling frame is an arguments adaptor frame.
Label adaptor; NearLabel adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@ -2103,7 +2103,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Try the new space allocation. Start out with computing the size of // Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array. // the arguments object and the elements array.
Label add_arguments_object; NearLabel add_arguments_object;
__ bind(&try_allocate); __ bind(&try_allocate);
__ test(ecx, Operand(ecx)); __ test(ecx, Operand(ecx));
__ j(zero, &add_arguments_object); __ j(zero, &add_arguments_object);
@ -2155,7 +2155,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ SmiUntag(ecx); __ SmiUntag(ecx);
// Copy the fixed array slots. // Copy the fixed array slots.
Label loop; NearLabel loop;
__ bind(&loop); __ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
@ -2383,7 +2383,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 4: End of string data // Argument 4: End of string data
// Argument 3: Start of string data // Argument 3: Start of string data
Label setup_two_byte, setup_rest; NearLabel setup_two_byte, setup_rest;
__ test(edi, Operand(edi)); __ test(edi, Operand(edi));
__ mov(edi, FieldOperand(eax, String::kLengthOffset)); __ mov(edi, FieldOperand(eax, String::kLengthOffset));
__ j(zero, &setup_two_byte); __ j(zero, &setup_two_byte);
@ -2477,7 +2477,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ebx: last_match_info backing store (FixedArray) // ebx: last_match_info backing store (FixedArray)
// ecx: offsets vector // ecx: offsets vector
// edx: number of capture registers // edx: number of capture registers
Label next_capture, done; NearLabel next_capture, done;
// Capture register counter starts from number of capture registers and // Capture register counter starts from number of capture registers and
// counts down until wraping after zero. // counts down until wraping after zero.
__ bind(&next_capture); __ bind(&next_capture);
@ -2533,13 +2533,13 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// number string cache for smis is just the smi value, and the hash for // number string cache for smis is just the smi value, and the hash for
// doubles is the xor of the upper and lower words. See // doubles is the xor of the upper and lower words. See
// Heap::GetNumberStringCache. // Heap::GetNumberStringCache.
Label smi_hash_calculated; NearLabel smi_hash_calculated;
Label load_result_from_cache; NearLabel load_result_from_cache;
if (object_is_smi) { if (object_is_smi) {
__ mov(scratch, object); __ mov(scratch, object);
__ SmiUntag(scratch); __ SmiUntag(scratch);
} else { } else {
Label not_smi, hash_calculated; NearLabel not_smi, hash_calculated;
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(object, Immediate(kSmiTagMask)); __ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &not_smi); __ j(not_zero, &not_smi);
@ -2663,7 +2663,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
if (cc_ != equal) { if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though // Check for undefined. undefined OP undefined is false even though
// undefined == undefined. // undefined == undefined.
Label check_for_nan; NearLabel check_for_nan;
__ cmp(edx, Factory::undefined_value()); __ cmp(edx, Factory::undefined_value());
__ j(not_equal, &check_for_nan); __ j(not_equal, &check_for_nan);
__ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
@ -2678,7 +2678,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0); __ ret(0);
} else { } else {
Label heap_number; NearLabel heap_number;
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map())); Immediate(Factory::heap_number_map()));
__ j(equal, &heap_number); __ j(equal, &heap_number);
@ -2713,7 +2713,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ setcc(above_equal, eax); __ setcc(above_equal, eax);
__ ret(0); __ ret(0);
} else { } else {
Label nan; NearLabel nan;
__ j(above_equal, &nan); __ j(above_equal, &nan);
__ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0); __ ret(0);
@ -2730,7 +2730,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict object equality is slower, so it is handled later in the stub. // Non-strict object equality is slower, so it is handled later in the stub.
if (cc_ == equal && strict_) { if (cc_ == equal && strict_) {
Label slow; // Fallthrough label. Label slow; // Fallthrough label.
Label not_smis; NearLabel not_smis;
// If we're doing a strict equality comparison, we don't have to do // If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects // type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual // and oddballs. Non-smi numbers and strings still go through the usual
@ -2771,13 +2771,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Get the type of the first operand. // Get the type of the first operand.
// If the first object is a JS object, we have done pointer comparison. // If the first object is a JS object, we have done pointer comparison.
Label first_non_object; NearLabel first_non_object;
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
__ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
__ j(below, &first_non_object); __ j(below, &first_non_object);
// Return non-zero (eax is not zero) // Return non-zero (eax is not zero)
Label return_not_equal; NearLabel return_not_equal;
STATIC_ASSERT(kHeapObjectTag != 0); STATIC_ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal); __ bind(&return_not_equal);
__ ret(0); __ ret(0);
@ -2828,7 +2828,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Don't base result on EFLAGS when a NaN is involved. // Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken); __ j(parity_even, &unordered, not_taken);
Label below_label, above_label; NearLabel below_label, above_label;
// Return a result of -1, 0, or 1, based on EFLAGS. // Return a result of -1, 0, or 1, based on EFLAGS.
__ j(below, &below_label, not_taken); __ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken); __ j(above, &above_label, not_taken);
@ -2893,8 +2893,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if // Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable, // they are both JSObjects and not undetectable,
// and their pointers are different. // and their pointers are different.
Label not_both_objects; NearLabel not_both_objects;
Label return_unequal; NearLabel return_unequal;
// At most one is a smi, so we can test for smi by adding the two. // At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus // A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear. // a heap object has the low bit clear.
@ -3056,7 +3056,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// not NULL. The frame pointer is NULL in the exception handler of // not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame. // a JS entry frame.
__ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL. __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
Label skip; NearLabel skip;
__ cmp(ebp, 0); __ cmp(ebp, 0);
__ j(equal, &skip, not_taken); __ j(equal, &skip, not_taken);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@ -3188,7 +3188,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Make sure we're not trying to return 'the hole' from the runtime // Make sure we're not trying to return 'the hole' from the runtime
// call as this may lead to crashes in the IC code later. // call as this may lead to crashes in the IC code later.
if (FLAG_debug_code) { if (FLAG_debug_code) {
Label okay; NearLabel okay;
__ cmp(eax, Factory::the_hole_value()); __ cmp(eax, Factory::the_hole_value());
__ j(not_equal, &okay); __ j(not_equal, &okay);
__ int3(); __ int3();
@ -3250,7 +3250,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
__ mov(esp, Operand::StaticVariable(handler_address)); __ mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found. // Unwind the handlers until the ENTRY handler is found.
Label loop, done; NearLabel loop, done;
__ bind(&loop); __ bind(&loop);
// Load the type of the current stack handler. // Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset; const int kStateOffset = StackHandlerConstants::kStateOffset;
@ -3468,7 +3468,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// edx is function, eax is map. // edx is function, eax is map.
// Look up the function and the map in the instanceof cache. // Look up the function and the map in the instanceof cache.
Label miss; NearLabel miss;
ExternalReference roots_address = ExternalReference::roots_address(); ExternalReference roots_address = ExternalReference::roots_address();
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
@ -3500,7 +3500,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype. // Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance; NearLabel loop, is_instance, is_not_instance;
__ bind(&loop); __ bind(&loop);
__ cmp(ecx, Operand(ebx)); __ cmp(ecx, Operand(ebx));
__ j(equal, &is_instance); __ j(equal, &is_instance);
@ -3837,7 +3837,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: first string // eax: first string
// edx: second string // edx: second string
// Check if either of the strings are empty. In that case return the other. // Check if either of the strings are empty. In that case return the other.
Label second_not_zero_length, both_not_zero_length; NearLabel second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset)); __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
__ test(ecx, Operand(ecx)); __ test(ecx, Operand(ecx));
@ -4123,7 +4123,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register count, Register count,
Register scratch, Register scratch,
bool ascii) { bool ascii) {
Label loop; NearLabel loop;
__ bind(&loop); __ bind(&loop);
// This loop just copies one character at a time, as it is only used for very // This loop just copies one character at a time, as it is only used for very
// short strings. // short strings.
@ -4170,7 +4170,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
} }
// Don't enter the rep movs if there are less than 4 bytes to copy. // Don't enter the rep movs if there are less than 4 bytes to copy.
Label last_bytes; NearLabel last_bytes;
__ test(count, Immediate(~3)); __ test(count, Immediate(~3));
__ j(zero, &last_bytes); __ j(zero, &last_bytes);
@ -4190,7 +4190,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ j(zero, &done); __ j(zero, &done);
// Copy remaining characters. // Copy remaining characters.
Label loop; NearLabel loop;
__ bind(&loop); __ bind(&loop);
__ mov_b(scratch, Operand(src, 0)); __ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch); __ mov_b(Operand(dest, 0), scratch);
@ -4216,7 +4216,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a // Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table. // different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index; NearLabel not_array_index;
__ mov(scratch, c1); __ mov(scratch, c1);
__ sub(Operand(scratch), Immediate(static_cast<int>('0'))); __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
__ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0'))); __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
@ -4374,7 +4374,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ add(hash, Operand(scratch)); __ add(hash, Operand(scratch));
// if (hash == 0) hash = 27; // if (hash == 0) hash = 27;
Label hash_not_zero; NearLabel hash_not_zero;
__ test(hash, Operand(hash)); __ test(hash, Operand(hash));
__ j(not_zero, &hash_not_zero); __ j(not_zero, &hash_not_zero);
__ mov(hash, Immediate(27)); __ mov(hash, Immediate(27));
@ -4543,7 +4543,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ IncrementCounter(&Counters::string_compare_native, 1); __ IncrementCounter(&Counters::string_compare_native, 1);
// Find minimum length. // Find minimum length.
Label left_shorter; NearLabel left_shorter;
__ mov(scratch1, FieldOperand(left, String::kLengthOffset)); __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
__ mov(scratch3, scratch1); __ mov(scratch3, scratch1);
__ sub(scratch3, FieldOperand(right, String::kLengthOffset)); __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
@ -4579,7 +4579,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
{ {
// Compare loop. // Compare loop.
Label loop; NearLabel loop;
__ bind(&loop); __ bind(&loop);
// Compare characters. // Compare characters.
__ mov_b(scratch2, Operand(left, index, times_1, 0)); __ mov_b(scratch2, Operand(left, index, times_1, 0));
@ -4625,7 +4625,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ mov(edx, Operand(esp, 2 * kPointerSize)); // left __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
Label not_same; NearLabel not_same;
__ cmp(edx, Operand(eax)); __ cmp(edx, Operand(eax));
__ j(not_equal, &not_same); __ j(not_equal, &not_same);
STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(EQUAL == 0);

142
deps/v8/src/ia32/codegen-ia32.cc

@ -249,7 +249,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// the function. // the function.
for (int i = 0; i < scope()->num_parameters(); i++) { for (int i = 0; i < scope()->num_parameters(); i++) {
Variable* par = scope()->parameter(i); Variable* par = scope()->parameter(i);
Slot* slot = par->slot(); Slot* slot = par->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
// The use of SlotOperand below is safe in unspilled code // The use of SlotOperand below is safe in unspilled code
// because the slot is guaranteed to be a context slot. // because the slot is guaranteed to be a context slot.
@ -285,7 +285,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Initialize ThisFunction reference if present. // Initialize ThisFunction reference if present.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
frame_->Push(Factory::the_hole_value()); frame_->Push(Factory::the_hole_value());
StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT); StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
} }
@ -717,10 +717,10 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
Property property(&global, &key, RelocInfo::kNoPosition); Property property(&global, &key, RelocInfo::kNoPosition);
Reference ref(this, &property); Reference ref(this, &property);
ref.GetValue(); ref.GetValue();
} else if (variable != NULL && variable->slot() != NULL) { } else if (variable != NULL && variable->AsSlot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate // For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof. // subexpression of a typeof.
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
} else { } else {
// Anything else can be handled normally. // Anything else can be handled normally.
Load(expr); Load(expr);
@ -759,17 +759,17 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result); frame_->Push(&result);
} }
Variable* arguments = scope()->arguments()->var(); Variable* arguments = scope()->arguments();
Variable* shadow = scope()->arguments_shadow()->var(); Variable* shadow = scope()->arguments_shadow();
ASSERT(arguments != NULL && arguments->slot() != NULL); ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL); ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
JumpTarget done; JumpTarget done;
bool skip_arguments = false; bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has // We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function // already been written to. This can happen if the a function
// has a local variable named 'arguments'. // has a local variable named 'arguments'.
LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
Result probe = frame_->Pop(); Result probe = frame_->Pop();
if (probe.is_constant()) { if (probe.is_constant()) {
// We have to skip updating the arguments object if it has // We have to skip updating the arguments object if it has
@ -782,10 +782,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
} }
} }
if (!skip_arguments) { if (!skip_arguments) {
StoreToSlot(arguments->slot(), NOT_CONST_INIT); StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
} }
StoreToSlot(shadow->slot(), NOT_CONST_INIT); StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
return frame_->Pop(); return frame_->Pop();
} }
@ -842,7 +842,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
LoadGlobal(); LoadGlobal();
ref->set_type(Reference::NAMED); ref->set_type(Reference::NAMED);
} else { } else {
ASSERT(var->slot() != NULL); ASSERT(var->AsSlot() != NULL);
ref->set_type(Reference::SLOT); ref->set_type(Reference::SLOT);
} }
} else { } else {
@ -3274,7 +3274,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the // Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here. // expression stack. Avoid allocating the arguments object here.
Load(receiver); Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF); LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the // Emit the source position information after having loaded the
// receiver and the arguments. // receiver and the arguments.
@ -3536,7 +3536,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration"); Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var(); Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved ASSERT(var != NULL); // must have been resolved
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile time, // If it was not possible to allocate the variable at compile time,
// we need to "declare" it at runtime to make sure it actually // we need to "declare" it at runtime to make sure it actually
@ -4252,7 +4252,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// the bottom check of the loop condition. // the bottom check of the loop condition.
if (node->is_fast_smi_loop()) { if (node->is_fast_smi_loop()) {
// Set number type of the loop variable to smi. // Set number type of the loop variable to smi.
SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi()); SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
} }
Visit(node->body()); Visit(node->body());
@ -4278,7 +4278,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// expression if we are in a fast smi loop condition. // expression if we are in a fast smi loop condition.
if (node->is_fast_smi_loop() && has_valid_frame()) { if (node->is_fast_smi_loop() && has_valid_frame()) {
// Set number type of the loop variable to smi. // Set number type of the loop variable to smi.
SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi()); SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
} }
// Based on the condition analysis, compile the backward jump as // Based on the condition analysis, compile the backward jump as
@ -4577,8 +4577,8 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Store the caught exception in the catch variable. // Store the caught exception in the catch variable.
Variable* catch_var = node->catch_var()->var(); Variable* catch_var = node->catch_var()->var();
ASSERT(catch_var != NULL && catch_var->slot() != NULL); ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
StoreToSlot(catch_var->slot(), NOT_CONST_INIT); StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
// Remove the exception from the stack. // Remove the exception from the stack.
frame_->Drop(); frame_->Drop();
@ -5173,7 +5173,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
done->Jump(result); done->Jump(result);
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
if (potential_slot != NULL) { if (potential_slot != NULL) {
// Generate fast case for locals that rewrite to slots. // Generate fast case for locals that rewrite to slots.
@ -5206,7 +5206,7 @@ void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
Result arguments = allocator()->Allocate(); Result arguments = allocator()->Allocate();
ASSERT(arguments.is_valid()); ASSERT(arguments.is_valid());
__ mov(arguments.reg(), __ mov(arguments.reg(),
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(), ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
arguments, arguments,
slow)); slow));
frame_->Push(&arguments); frame_->Push(&arguments);
@ -5714,7 +5714,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
Comment cmnt(masm(), "[ Variable Assignment"); Comment cmnt(masm(), "[ Variable Assignment");
Variable* var = node->target()->AsVariableProxy()->AsVariable(); Variable* var = node->target()->AsVariableProxy()->AsVariable();
ASSERT(var != NULL); ASSERT(var != NULL);
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
// Evaluate the right-hand side. // Evaluate the right-hand side.
@ -6063,14 +6063,14 @@ void CodeGenerator::VisitCall(Call* node) {
// in generated code. If we succeed, there is no need to perform a // in generated code. If we succeed, there is no need to perform a
// context lookup in the runtime system. // context lookup in the runtime system.
JumpTarget done; JumpTarget done;
if (var->slot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
ASSERT(var->slot()->type() == Slot::LOOKUP); ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
JumpTarget slow; JumpTarget slow;
// Prepare the stack for the call to // Prepare the stack for the call to
// ResolvePossiblyDirectEvalNoLookup by pushing the loaded // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
// function, the first argument to the eval call and the // function, the first argument to the eval call and the
// receiver. // receiver.
Result fun = LoadFromGlobalSlotCheckExtensions(var->slot(), Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF, NOT_INSIDE_TYPEOF,
&slow); &slow);
frame_->Push(&fun); frame_->Push(&fun);
@ -6153,8 +6153,8 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->RestoreContextRegister(); frame_->RestoreContextRegister();
frame_->Push(&result); frame_->Push(&result);
} else if (var != NULL && var->slot() != NULL && } else if (var != NULL && var->AsSlot() != NULL &&
var->slot()->type() == Slot::LOOKUP) { var->AsSlot()->type() == Slot::LOOKUP) {
// ---------------------------------- // ----------------------------------
// JavaScript examples: // JavaScript examples:
// //
@ -6173,7 +6173,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Generate fast case for loading functions from slots that // Generate fast case for loading functions from slots that
// correspond to local/global variables or arguments unless they // correspond to local/global variables or arguments unless they
// are shadowed by eval-introduced bindings. // are shadowed by eval-introduced bindings.
EmitDynamicLoadFromSlotFastCase(var->slot(), EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
NOT_INSIDE_TYPEOF, NOT_INSIDE_TYPEOF,
&function, &function,
&slow, &slow,
@ -8053,7 +8053,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) { if (variable != NULL) {
Slot* slot = variable->slot(); Slot* slot = variable->AsSlot();
if (variable->is_global()) { if (variable->is_global()) {
LoadGlobal(); LoadGlobal();
frame_->Push(variable->name()); frame_->Push(variable->name());
@ -9144,9 +9144,15 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
public: public:
DeferredReferenceGetNamedValue(Register dst, DeferredReferenceGetNamedValue(Register dst,
Register receiver, Register receiver,
Handle<String> name) Handle<String> name,
: dst_(dst), receiver_(receiver), name_(name) { bool is_contextual)
set_comment("[ DeferredReferenceGetNamedValue"); : dst_(dst),
receiver_(receiver),
name_(name),
is_contextual_(is_contextual) {
set_comment(is_contextual
? "[ DeferredReferenceGetNamedValue (contextual)"
: "[ DeferredReferenceGetNamedValue");
} }
virtual void Generate(); virtual void Generate();
@ -9158,6 +9164,7 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
Register dst_; Register dst_;
Register receiver_; Register receiver_;
Handle<String> name_; Handle<String> name_;
bool is_contextual_;
}; };
@ -9167,9 +9174,15 @@ void DeferredReferenceGetNamedValue::Generate() {
} }
__ Set(ecx, Immediate(name_)); __ Set(ecx, Immediate(name_));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET); RelocInfo::Mode mode = is_contextual_
// The call must be followed by a test eax instruction to indicate ? RelocInfo::CODE_TARGET_CONTEXT
// that the inobject property case was inlined. : RelocInfo::CODE_TARGET;
__ call(ic, mode);
// The call must be followed by:
// - a test eax instruction to indicate that the inobject property
// case was inlined.
// - a mov ecx instruction to indicate that the contextual property
// load was inlined.
// //
// Store the delta to the map check instruction here in the test // Store the delta to the map check instruction here in the test
// instruction. Use masm_-> instead of the __ macro since the // instruction. Use masm_-> instead of the __ macro since the
@ -9177,8 +9190,13 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the // Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way. // instruction that gets patched and coverage code gets in the way.
if (is_contextual_) {
masm_->mov(ecx, -delta_to_patch_site);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
} else {
masm_->test(eax, Immediate(-delta_to_patch_site)); masm_->test(eax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1); __ IncrementCounter(&Counters::named_load_inline_miss, 1);
}
if (!dst_.is(eax)) __ mov(dst_, eax); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
@ -9349,12 +9367,17 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame()->height(); int original_height = frame()->height();
#endif #endif
bool contextual_load_in_builtin =
is_contextual &&
(Bootstrapper::IsActive() ||
(!info_->closure().is_null() && info_->closure()->IsBuiltin()));
Result result; Result result;
// Do not inline the inobject property case for loads from the global // Do not inline in the global code or when not in loop.
// object. Also do not inline for unoptimized code. This saves time in if (scope()->is_global_scope() ||
// the code generator. Unoptimized code is toplevel code or code that is loop_nesting() == 0 ||
// not in a loop. contextual_load_in_builtin) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property"); Comment cmnt(masm(), "[ Load from named Property");
frame()->Push(name); frame()->Push(name);
@ -9367,19 +9390,26 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// instruction here. // instruction here.
__ nop(); __ nop();
} else { } else {
// Inline the inobject property case. // Inline the property load.
Comment cmnt(masm(), "[ Inlined named property load"); Comment cmnt(masm(), is_contextual
? "[ Inlined contextual property load"
: "[ Inlined named property load");
Result receiver = frame()->Pop(); Result receiver = frame()->Pop();
receiver.ToRegister(); receiver.ToRegister();
result = allocator()->Allocate(); result = allocator()->Allocate();
ASSERT(result.is_valid()); ASSERT(result.is_valid());
DeferredReferenceGetNamedValue* deferred = DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name); new DeferredReferenceGetNamedValue(result.reg(),
receiver.reg(),
name,
is_contextual);
if (!is_contextual) {
// Check that the receiver is a heap object. // Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask)); __ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->Branch(zero); deferred->Branch(zero);
}
__ bind(deferred->patch_site()); __ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't // This is the map check instruction that will be patched (so we can't
@ -9391,17 +9421,33 @@ Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// which allows the assert below to succeed and patching to work. // which allows the assert below to succeed and patching to work.
deferred->Branch(not_equal); deferred->Branch(not_equal);
// The delta from the patch label to the load offset must be statically // The delta from the patch label to the actual load must be
// known. // statically known.
ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction); LoadIC::kOffsetToLoadInstruction);
if (is_contextual) {
// Load the (initialy invalid) cell and get its value.
masm()->mov(result.reg(), Factory::null_value());
if (FLAG_debug_code) {
__ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
Factory::global_property_cell_map());
__ Assert(equal, "Uninitialized inlined contextual load");
}
__ mov(result.reg(),
FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
__ cmp(result.reg(), Factory::the_hole_value());
deferred->Branch(equal);
__ IncrementCounter(&Counters::named_load_global_inline, 1);
} else {
// The initial (invalid) offset has to be large enough to force a 32-bit // The initial (invalid) offset has to be large enough to force a 32-bit
// instruction encoding to allow patching with an arbitrary offset. Use // instruction encoding to allow patching with an arbitrary offset. Use
// kMaxInt (minus kHeapObjectTag). // kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt; int offset = kMaxInt;
masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset)); masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1); __ IncrementCounter(&Counters::named_load_inline, 1);
}
deferred->BindExit(); deferred->BindExit();
} }
ASSERT(frame()->height() == original_height - 1); ASSERT(frame()->height() == original_height - 1);
@ -9741,7 +9787,7 @@ void Reference::GetValue() {
switch (type_) { switch (type_) {
case SLOT: { case SLOT: {
Comment cmnt(masm, "[ Load from Slot"); Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
if (!persist_after_get_) set_unloaded(); if (!persist_after_get_) set_unloaded();
@ -9786,7 +9832,7 @@ void Reference::TakeValue() {
return; return;
} }
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP || if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT || slot->type() == Slot::CONTEXT ||
@ -9819,7 +9865,7 @@ void Reference::SetValue(InitState init_state) {
switch (type_) { switch (type_) {
case SLOT: { case SLOT: {
Comment cmnt(masm, "[ Store to Slot"); Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
cgen_->StoreToSlot(slot, init_state); cgen_->StoreToSlot(slot, init_state);
set_unloaded(); set_unloaded();

57
deps/v8/src/ia32/disasm-ia32.cc

@ -685,7 +685,8 @@ int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
case 0xDD: switch (regop) { case 0xDD: switch (regop) {
case 0: mnem = "fld_d"; break; case 0: mnem = "fld_d"; break;
case 2: mnem = "fstp"; break; case 1: mnem = "fisttp_d"; break;
case 2: mnem = "fst_d"; break;
case 3: mnem = "fstp_d"; break; case 3: mnem = "fstp_d"; break;
default: UnimplementedInstruction(); default: UnimplementedInstruction();
} }
@ -717,6 +718,10 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xD9: case 0xD9:
switch (modrm_byte & 0xF8) { switch (modrm_byte & 0xF8) {
case 0xC0:
mnem = "fld";
has_register = true;
break;
case 0xC8: case 0xC8:
mnem = "fxch"; mnem = "fxch";
has_register = true; has_register = true;
@ -957,6 +962,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (f0byte == 0xA2 || f0byte == 0x31) { } else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem); AppendToBuffer("%s", f0mnem);
data += 2; data += 2;
} else if (f0byte == 0x28) {
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movaps %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if ((f0byte & 0xF0) == 0x80) { } else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint); data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
@ -1156,6 +1169,23 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop), NameOfXMMRegister(regop),
NameOfXMMRegister(rm)); NameOfXMMRegister(rm));
data++; data++;
} else if (*data == 0x73) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("psllq %s,%d",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x54) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("andpd %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }
@ -1168,12 +1198,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
{ data++; { data++;
int mod, regop, rm; int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm); get_modrm(*data, &mod, &regop, &rm);
if (mod == 3 && regop == ecx) { if (regop == ecx) {
AppendToBuffer("dec_b %s", NameOfCPURegister(rm)); AppendToBuffer("dec_b ");
data += PrintRightOperand(data);
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }
data++;
} }
break; break;
@ -1274,6 +1304,23 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm)); NameOfXMMRegister(rm));
data++; data++;
} }
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
const char* const pseudo_op[] = {
"cmpeqsd",
"cmpltsd",
"cmplesd",
"cmpunordsd",
"cmpneqsd",
"cmpnltsd",
"cmpnlesd",
"cmpordsd"
};
AppendToBuffer("%s %s,%s",
pseudo_op[data[1]],
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data += 2;
} else { } else {
if (mod != 0x3) { if (mod != 0x3) {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@ -1367,7 +1414,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
" %s", " %s",
tmp_buffer_.start()); tmp_buffer_.start());
return instr_len; return instr_len;
} } // NOLINT (function is too long)
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------

12
deps/v8/src/ia32/frames-ia32.cc

@ -35,16 +35,8 @@ namespace v8 {
namespace internal { namespace internal {
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { Address ExitFrame::ComputeStackPointer(Address fp) {
if (fp == 0) return NONE; return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
// Compute the stack pointer.
Address sp = Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
// Fill in the state.
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
ASSERT(*state->pc_address != NULL);
return EXIT;
} }

986
deps/v8/src/ia32/full-codegen-ia32.cc

File diff suppressed because it is too large

43
deps/v8/src/ia32/ic-ia32.cc

@ -692,7 +692,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address // -- esp[0] : return address
// ----------------------------------- // -----------------------------------
Label miss; Label miss;
Label index_out_of_range;
Register receiver = edx; Register receiver = edx;
Register index = eax; Register index = eax;
@ -707,7 +706,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
result, result,
&miss, // When not a string. &miss, // When not a string.
&miss, // When not a number. &miss, // When not a number.
&index_out_of_range, &miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX); STRING_INDEX_IS_ARRAY_INDEX);
char_at_generator.GenerateFast(masm); char_at_generator.GenerateFast(masm);
__ ret(0); __ ret(0);
@ -715,10 +714,6 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
ICRuntimeCallHelper call_helper; ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper); char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
__ bind(&miss); __ bind(&miss);
GenerateMiss(masm); GenerateMiss(masm);
} }
@ -890,8 +885,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken); __ j(zero, &slow, not_taken);
// Check that the key is a smi. // Check that the key is an array index, that is Uint32.
__ test(eax, Immediate(kSmiTagMask)); __ test(eax, Immediate(kSmiTagMask | kSmiSignMask));
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Get the map of the receiver. // Get the map of the receiver.
@ -1666,6 +1661,38 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
} }
// One byte opcode for mov ecx,0xXXXXXXXX.
static const byte kMovEcxByte = 0xB9;
bool LoadIC::PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell) {
// The address of the instruction following the call.
Address mov_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a cmp eax, nothing
// was inlined.
if (*mov_instruction_address != kMovEcxByte) return false;
Address delta_address = mov_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 4 bytes of the 7-byte
// operand-immediate compare instruction, so we add 3 to get the
// offset to the last 4 bytes.
Address map_address = mov_instruction_address + delta + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// The cell is in the last 4 bytes of a five byte mov reg, imm32
// instruction, so we add 1 to get the offset to the last 4 bytes.
Address offset_address =
mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
*reinterpret_cast<Object**>(offset_address) = cell;
return true;
}
bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) { bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = Address test_instruction_address =

18
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1361,6 +1361,13 @@ void MacroAssembler::Drop(int stack_elements) {
} }
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
}
}
void MacroAssembler::Move(Register dst, Handle<Object> value) { void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, value); mov(dst, value);
} }
@ -1553,6 +1560,17 @@ void MacroAssembler::ConvertToInt32(Register dst,
} }
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
Register scratch,
int power) {
ASSERT(is_uintn(power + HeapNumber::kExponentBias,
HeapNumber::kExponentBits));
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
movd(dst, Operand(scratch));
psllq(dst, HeapNumber::kMantissaBits);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type, Register instance_type,
Register scratch, Register scratch,

5
deps/v8/src/ia32/macro-assembler-ia32.h

@ -258,6 +258,8 @@ class MacroAssembler: public Assembler {
TypeInfo info, TypeInfo info,
Label* on_not_int32); Label* on_not_int32);
void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
// Abort execution if argument is not a number. Used in debug code. // Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object); void AbortIfNotNumber(Register object);
@ -503,6 +505,9 @@ class MacroAssembler: public Assembler {
void Call(Label* target) { call(target); } void Call(Label* target) { call(target); }
// Move if the registers are not identical.
void Move(Register target, Register source);
void Move(Register target, Handle<Object> value); void Move(Register target, Handle<Object> value);
Handle<Object> CodeObject() { return code_object_; } Handle<Object> CodeObject() { return code_object_; }

266
deps/v8/src/ia32/stub-cache-ia32.cc

@ -265,7 +265,11 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
MacroAssembler* masm, int index, Register prototype) { MacroAssembler* masm, int index, Register prototype, Label* miss) {
// Check we're still in the same context.
__ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
Top::global());
__ j(not_equal, miss);
// Get the global function with the given index. // Get the global function with the given index.
JSFunction* function = JSFunction::cast(Top::global_context()->get(index)); JSFunction* function = JSFunction::cast(Top::global_context()->get(index));
// Load its initial map. The global functions all have initial maps. // Load its initial map. The global functions all have initial maps.
@ -1626,7 +1630,8 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
eax); eax,
&miss);
ASSERT(object != holder); ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
@ -1695,7 +1700,8 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(), GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX, Context::STRING_FUNCTION_INDEX,
eax); eax,
&miss);
ASSERT(object != holder); ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
@ -1813,6 +1819,234 @@ Object* CallStubCompiler::CompileStringFromCharCodeCall(
} }
Object* CallStubCompiler::CompileMathFloorCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
if (!CpuFeatures::IsSupported(SSE2)) return Heap::undefined_value();
CpuFeatures::Scope use_sse2(SSE2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
if (cell == NULL) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
} else {
ASSERT(cell->value() == function);
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into eax.
__ mov(eax, Operand(esp, 1 * kPointerSize));
// Check if the argument is a smi.
Label smi;
STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &smi);
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, Factory::heap_number_map(), &slow, true);
__ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
__ xorpd(xmm1, xmm1);
__ ucomisd(xmm0, xmm1);
__ j(below_equal, &slow);
// Do a truncating conversion.
__ cvttsd2si(eax, Operand(xmm0));
// Check if the result fits into a smi. Note this also checks for
// 0x80000000 which signals a failed conversion.
Label wont_fit_into_smi;
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, &wont_fit_into_smi);
// Smi tag and return.
__ SmiTag(eax);
__ bind(&smi);
__ ret(2 * kPointerSize);
// Check if the argument is < 2^kMantissaBits.
Label already_round;
__ bind(&wont_fit_into_smi);
__ LoadPowerOf2(xmm1, ebx, HeapNumber::kMantissaBits);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
// Save a copy of the argument.
__ movaps(xmm2, xmm0);
// Compute (argument + 2^kMantissaBits) - 2^kMantissaBits.
__ addsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
// Compare the argument and the tentative result to get the right mask:
// if xmm2 < xmm0:
// xmm2 = 1...1
// else:
// xmm2 = 0...0
__ cmpltsd(xmm2, xmm0);
// Subtract 1 if the argument was less than the tentative result.
__ LoadPowerOf2(xmm1, ebx, 0);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
__ bind(&already_round);
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
__ bind(&miss);
// ecx: function name.
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
Object* CallStubCompiler::CompileMathAbsCall(Object* object,
JSObject* holder,
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
if (cell == NULL) {
__ mov(edx, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &miss);
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
&miss);
} else {
ASSERT(cell->value() == function);
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
GenerateLoadFunctionFromCell(cell, function, &miss);
}
// Load the (only) argument into eax.
__ mov(eax, Operand(esp, 1 * kPointerSize));
// Check if the argument is a smi.
Label not_smi;
STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &not_smi);
// Set ebx to 1...1 (== -1) if the argument is negative, or to 0...0
// otherwise.
__ mov(ebx, eax);
__ sar(ebx, kBitsPerInt - 1);
// Do bitwise not or do nothing depending on ebx.
__ xor_(eax, Operand(ebx));
// Add 1 or do nothing depending on ebx.
__ sub(eax, Operand(ebx));
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
Label slow;
__ j(negative, &slow);
// Smi case done.
__ ret(2 * kPointerSize);
// Check if the argument is a heap number and load its exponent and
// sign into ebx.
__ bind(&not_smi);
__ CheckMap(eax, Factory::heap_number_map(), &slow, true);
__ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
// just return it.
Label negative_sign;
__ test(ebx, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
// If the argument is negative, clear the sign, and return a new
// number.
__ bind(&negative_sign);
__ and_(ebx, ~HeapNumber::kSignMask);
__ mov(ecx, FieldOperand(eax, HeapNumber::kMantissaOffset));
__ AllocateHeapNumber(eax, edi, edx, &slow);
__ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ebx);
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
__ ret(2 * kPointerSize);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
__ bind(&miss);
// ecx: function name.
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
// Return the generated code.
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
}
Object* CallStubCompiler::CompileCallConstant(Object* object, Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder, JSObject* holder,
JSFunction* function, JSFunction* function,
@ -1894,7 +2128,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ j(above_equal, &miss, not_taken); __ j(above_equal, &miss, not_taken);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, eax); masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
} }
@ -1914,7 +2148,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::NUMBER_FUNCTION_INDEX, eax); masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
} }
@ -1935,7 +2169,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype( GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::BOOLEAN_FUNCTION_INDEX, eax); masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder, CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
ebx, edx, edi, name, &miss); ebx, edx, edi, name, &miss);
} }
@ -2324,7 +2558,10 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
name, name,
edx, edx,
&miss); &miss);
if (cell->IsFailure()) return cell; if (cell->IsFailure()) {
miss.Unuse();
return cell;
}
} }
// Return undefined if maps of the full prototype chain are still the // Return undefined if maps of the full prototype chain are still the
@ -2374,7 +2611,10 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError(); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, bool success = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi,
callback, name, &miss, &failure); callback, name, &miss, &failure);
if (!success) return failure; if (!success) {
miss.Unuse();
return failure;
}
__ bind(&miss); __ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
@ -2474,12 +2714,12 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole"); __ Check(not_equal, "DontDelete cells can't contain the hole");
} }
__ IncrementCounter(&Counters::named_load_global_inline, 1); __ IncrementCounter(&Counters::named_load_global_stub, 1);
__ mov(eax, ebx); __ mov(eax, ebx);
__ ret(0); __ ret(0);
__ bind(&miss); __ bind(&miss);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1); __ IncrementCounter(&Counters::named_load_global_stub_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC); GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code. // Return the generated code.
@ -2535,9 +2775,13 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
Failure* failure = Failure::InternalError(); Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, bool success = GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi,
callback, name, &miss, &failure); callback, name, &miss, &failure);
if (!success) return failure; if (!success) {
miss.Unuse();
return failure;
}
__ bind(&miss); __ bind(&miss);
__ DecrementCounter(&Counters::keyed_load_callback, 1); __ DecrementCounter(&Counters::keyed_load_callback, 1);
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);

2
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1313,7 +1313,7 @@ void VirtualFrame::Push(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy(); VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL) { if (proxy != NULL) {
Slot* slot = proxy->var()->slot(); Slot* slot = proxy->var()->AsSlot();
if (slot->type() == Slot::LOCAL) { if (slot->type() == Slot::LOCAL) {
PushLocalAt(slot->index()); PushLocalAt(slot->index());
return; return;

72
deps/v8/src/ic.cc

@ -299,6 +299,7 @@ void LoadIC::ClearInlinedVersion(Address address) {
// present) to guarantee failure by holding an invalid map (the null // present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything. // value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), 0); PatchInlinedLoad(address, Heap::null_value(), 0);
PatchInlinedContextualLoad(address, Heap::null_value(), Heap::null_value());
} }
@ -720,6 +721,14 @@ Object* KeyedCallIC::LoadFunction(State state,
} }
#ifdef DEBUG
#define TRACE_IC_NAMED(msg, name) \
if (FLAG_trace_ic) PrintF(msg, *(name)->ToCString())
#else
#define TRACE_IC_NAMED(msg, name)
#endif
Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) { Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
// If the object is undefined or null it's illegal to try to get any // If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case. // of its properties; throw a TypeError in that case.
@ -797,15 +806,24 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LOG(SuspectReadEvent(*name, *object)); LOG(SuspectReadEvent(*name, *object));
} }
bool can_be_inlined = bool can_be_inlined_precheck =
FLAG_use_ic && FLAG_use_ic &&
state == PREMONOMORPHIC &&
lookup.IsProperty() && lookup.IsProperty() &&
lookup.IsCacheable() && lookup.IsCacheable() &&
lookup.holder() == *object && lookup.holder() == *object &&
lookup.type() == FIELD &&
!object->IsAccessCheckNeeded(); !object->IsAccessCheckNeeded();
bool can_be_inlined =
can_be_inlined_precheck &&
state == PREMONOMORPHIC &&
lookup.type() == FIELD;
bool can_be_inlined_contextual =
can_be_inlined_precheck &&
state == UNINITIALIZED &&
lookup.holder()->IsGlobalObject() &&
lookup.type() == NORMAL;
if (can_be_inlined) { if (can_be_inlined) {
Map* map = lookup.holder()->map(); Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have // Property's index in the properties array. If negative we have
@ -816,32 +834,29 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
int offset = map->instance_size() + (index * kPointerSize); int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) { if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub()); set_target(megamorphic_stub());
#ifdef DEBUG TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
if (FLAG_trace_ic) {
PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
}
#endif
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex()); return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
#ifdef DEBUG
} else { } else {
if (FLAG_trace_ic) { TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
PrintF("[LoadIC : no inline patch %s (patching failed)]\n", name);
*name->ToCString());
}
} }
} else { } else {
if (FLAG_trace_ic) { TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
*name->ToCString());
} }
} else if (can_be_inlined_contextual) {
Map* map = lookup.holder()->map();
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
lookup.holder()->property_dictionary()->ValueAt(
lookup.GetDictionaryEntry()));
if (PatchInlinedContextualLoad(address(), map, cell)) {
set_target(megamorphic_stub());
TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
ASSERT(cell->value() != Heap::the_hole_value());
return cell->value();
} }
} else { } else {
if (FLAG_use_ic && state == PREMONOMORPHIC) { if (FLAG_use_ic && state == PREMONOMORPHIC) {
if (FLAG_trace_ic) { TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
*name->ToCString());
#endif
}
} }
} }
@ -1526,18 +1541,17 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
// Static IC stub generators. // Static IC stub generators.
// //
static Object* CompileFunction(Object* result, static JSFunction* CompileFunction(JSFunction* function,
Handle<Object> object,
InLoopFlag in_loop) { InLoopFlag in_loop) {
// Compile now with optimization. // Compile now with optimization.
HandleScope scope; HandleScope scope;
Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(result)); Handle<JSFunction> function_handle(function);
if (in_loop == IN_LOOP) { if (in_loop == IN_LOOP) {
CompileLazyInLoop(function, object, CLEAR_EXCEPTION); CompileLazyInLoop(function_handle, CLEAR_EXCEPTION);
} else { } else {
CompileLazy(function, object, CLEAR_EXCEPTION); CompileLazy(function_handle, CLEAR_EXCEPTION);
} }
return *function; return *function_handle;
} }
@ -1560,7 +1574,7 @@ Object* CallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) { if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result; return result;
} }
return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop()); return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
} }
@ -1576,7 +1590,7 @@ Object* KeyedCallIC_Miss(Arguments args) {
if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) { if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
return result; return result;
} }
return CompileFunction(result, args.at<Object>(0), ic.target()->ic_in_loop()); return CompileFunction(JSFunction::cast(result), ic.target()->ic_in_loop());
} }

4
deps/v8/src/ic.h

@ -298,6 +298,10 @@ class LoadIC: public IC {
static bool PatchInlinedLoad(Address address, Object* map, int index); static bool PatchInlinedLoad(Address address, Object* map, int index);
static bool PatchInlinedContextualLoad(Address address,
Object* map,
Object* cell);
friend class IC; friend class IC;
}; };

10
deps/v8/src/liveedit.cc

@ -408,6 +408,7 @@ static void CompileScriptForTracker(Handle<Script> script) {
// Build AST. // Build AST.
ScriptDataImpl* pre_data = NULL; ScriptDataImpl* pre_data = NULL;
EagerCompilationInfo info(script, is_eval);
FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data); FunctionLiteral* lit = MakeAST(is_global, script, extension, pre_data);
// Check for parse errors. // Check for parse errors.
@ -415,10 +416,9 @@ static void CompileScriptForTracker(Handle<Script> script) {
ASSERT(Top::has_pending_exception()); ASSERT(Top::has_pending_exception());
return; return;
} }
info.set_function(lit);
// Compile the code. // Compile the code.
CompilationInfo info(lit, script, is_eval);
LiveEditFunctionTracker tracker(lit); LiveEditFunctionTracker tracker(lit);
Handle<Code> code = MakeCodeForLiveEdit(&info); Handle<Code> code = MakeCodeForLiveEdit(&info);
@ -664,7 +664,7 @@ class FunctionInfoListener {
int j = 0; int j = 0;
for (int i = 0; i < list.length(); i++) { for (int i = 0; i < list.length(); i++) {
Variable* var1 = list[i]; Variable* var1 = list[i];
Slot* slot = var1->slot(); Slot* slot = var1->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) { if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (j != i) { if (j != i) {
list[j] = var1; list[j] = var1;
@ -677,7 +677,7 @@ class FunctionInfoListener {
for (int k = 1; k < j; k++) { for (int k = 1; k < j; k++) {
int l = k; int l = k;
for (int m = k + 1; m < j; m++) { for (int m = k + 1; m < j; m++) {
if (list[l]->slot()->index() > list[m]->slot()->index()) { if (list[l]->AsSlot()->index() > list[m]->AsSlot()->index()) {
l = m; l = m;
} }
} }
@ -687,7 +687,7 @@ class FunctionInfoListener {
SetElement(scope_info_list, scope_info_length, list[i]->name()); SetElement(scope_info_list, scope_info_length, list[i]->name());
scope_info_length++; scope_info_length++;
SetElement(scope_info_list, scope_info_length, SetElement(scope_info_list, scope_info_length,
Handle<Smi>(Smi::FromInt(list[i]->slot()->index()))); Handle<Smi>(Smi::FromInt(list[i]->AsSlot()->index())));
scope_info_length++; scope_info_length++;
} }
SetElement(scope_info_list, scope_info_length, SetElement(scope_info_list, scope_info_length,

43
deps/v8/src/log.cc

@ -171,7 +171,9 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp, SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp); sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) { while (!it.done() && i < TickSample::kMaxFramesCount) {
sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function()); sample->stack[i++] =
reinterpret_cast<Address>(it.frame()->function_slot_object()) -
kHeapObjectTag;
it.Advance(); it.Advance();
} }
sample->frames_count = i; sample->frames_count = i;
@ -391,6 +393,13 @@ void Logger::IntEvent(const char* name, int value) {
} }
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log) UncheckedIntPtrTEvent(name, value);
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntEvent(const char* name, int value) { void Logger::UncheckedIntEvent(const char* name, int value) {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
@ -401,6 +410,16 @@ void Logger::UncheckedIntEvent(const char* name, int value) {
#endif #endif
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
msg.WriteToLogFile();
}
#endif
void Logger::HandleEvent(const char* name, Object** location) { void Logger::HandleEvent(const char* name, Object** location) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_handles) return; if (!Log::IsEnabled() || !FLAG_log_handles) return;
@ -869,14 +888,17 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
void Logger::FunctionCreateEvent(JSFunction* function) { void Logger::FunctionCreateEvent(JSFunction* function) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
// This function can be called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
static Address prev_code = NULL; static Address prev_code = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]); msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
msg.AppendAddress(function->address()); msg.AppendAddress(function->address());
msg.Append(','); msg.Append(',');
msg.AppendAddress(function->code()->address(), prev_code); msg.AppendAddress(function->unchecked_code()->address(), prev_code);
prev_code = function->code()->address(); prev_code = function->unchecked_code()->address();
if (FLAG_compress_log) { if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL); ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return; if (!compression_helper_->HandleMessage(&msg)) return;
@ -887,6 +909,16 @@ void Logger::FunctionCreateEvent(JSFunction* function) {
} }
void Logger::FunctionCreateEventFromMove(JSFunction* function,
HeapObject*) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
FunctionCreateEvent(function);
}
#endif
}
void Logger::FunctionMoveEvent(Address from, Address to) { void Logger::FunctionMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
MoveEventInternal(FUNCTION_MOVE_EVENT, from, to); MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
@ -990,11 +1022,12 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
void Logger::HeapSampleStats(const char* space, const char* kind, void Logger::HeapSampleStats(const char* space, const char* kind,
int capacity, int used) { intptr_t capacity, intptr_t used) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_gc) return; if (!Log::IsEnabled() || !FLAG_log_gc) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n", msg.Append("heap-sample-stats,\"%s\",\"%s\","
"%" V8_PTR_PREFIX "d,%" V8_PTR_PREFIX "d\n",
space, kind, capacity, used); space, kind, capacity, used);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif

6
deps/v8/src/log.h

@ -159,6 +159,7 @@ class Logger {
// Emits an event with an int value -> (name, value). // Emits an event with an int value -> (name, value).
static void IntEvent(const char* name, int value); static void IntEvent(const char* name, int value);
static void IntPtrTEvent(const char* name, intptr_t value);
// Emits an event with an handle value -> (name, location). // Emits an event with an handle value -> (name, location).
static void HandleEvent(const char* name, Object** location); static void HandleEvent(const char* name, Object** location);
@ -216,6 +217,8 @@ class Logger {
static void CodeDeleteEvent(Address from); static void CodeDeleteEvent(Address from);
// Emits a function object create event. // Emits a function object create event.
static void FunctionCreateEvent(JSFunction* function); static void FunctionCreateEvent(JSFunction* function);
static void FunctionCreateEventFromMove(JSFunction* function,
HeapObject*);
// Emits a function move event. // Emits a function move event.
static void FunctionMoveEvent(Address from, Address to); static void FunctionMoveEvent(Address from, Address to);
// Emits a function delete event. // Emits a function delete event.
@ -235,7 +238,7 @@ class Logger {
static void HeapSampleJSProducerEvent(const char* constructor, static void HeapSampleJSProducerEvent(const char* constructor,
Address* stack); Address* stack);
static void HeapSampleStats(const char* space, const char* kind, static void HeapSampleStats(const char* space, const char* kind,
int capacity, int used); intptr_t capacity, intptr_t used);
static void SharedLibraryEvent(const char* library_path, static void SharedLibraryEvent(const char* library_path,
uintptr_t start, uintptr_t start,
@ -324,6 +327,7 @@ class Logger {
// Logs an IntEvent regardless of whether FLAG_log is true. // Logs an IntEvent regardless of whether FLAG_log is true.
static void UncheckedIntEvent(const char* name, int value); static void UncheckedIntEvent(const char* name, int value);
static void UncheckedIntPtrTEvent(const char* name, intptr_t value);
// Stops logging and profiling in case of insufficient resources. // Stops logging and profiling in case of insufficient resources.
static void StopLoggingAndProfiling(); static void StopLoggingAndProfiling();

40
deps/v8/src/mark-compact.cc

@ -167,8 +167,8 @@ void MarkCompactCollector::Finish() {
// reclaiming the waste and free list blocks). // reclaiming the waste and free list blocks).
static const int kFragmentationLimit = 15; // Percent. static const int kFragmentationLimit = 15; // Percent.
static const int kFragmentationAllowed = 1 * MB; // Absolute. static const int kFragmentationAllowed = 1 * MB; // Absolute.
int old_gen_recoverable = 0; intptr_t old_gen_recoverable = 0;
int old_gen_used = 0; intptr_t old_gen_used = 0;
OldSpaces spaces; OldSpaces spaces;
for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
@ -282,10 +282,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
FixedArray::BodyDescriptor, FixedArray::BodyDescriptor,
void>::Visit); void>::Visit);
table_.Register(kVisitSharedFunctionInfo, table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
&FixedBodyVisitor<StaticMarkingVisitor,
SharedFunctionInfo::BodyDescriptor,
void>::Visit);
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
@ -537,6 +534,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
} }
static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
if (shared->IsInobjectSlackTrackingInProgress()) {
shared->DetachInitialMap();
}
FixedBodyVisitor<StaticMarkingVisitor,
SharedFunctionInfo::BodyDescriptor,
void>::Visit(map, object);
}
static void VisitCodeEntry(Address entry_address) { static void VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address); Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code; Object* old_code = code;
@ -1139,6 +1147,14 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// Only JSObject and subtypes have map transitions and back pointers. // Only JSObject and subtypes have map transitions and back pointers.
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (map->instance_type() > JS_FUNCTION_TYPE) continue; if (map->instance_type() > JS_FUNCTION_TYPE) continue;
if (map->IsMarked() && map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
}
// Follow the chain of back pointers to find the prototype. // Follow the chain of back pointers to find the prototype.
Map* current = map; Map* current = map;
while (SafeIsMap(current)) { while (SafeIsMap(current)) {
@ -1992,8 +2008,10 @@ class MapCompact {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_gc_verbose) { if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n", obj->address(), PrintF("update %p : %p -> %p\n",
map, new_map); obj->address(),
reinterpret_cast<void*>(map),
reinterpret_cast<void*>(new_map));
} }
#endif #endif
} }
@ -2052,8 +2070,8 @@ void MarkCompactCollector::SweepSpaces() {
&UpdatePointerToNewGen, &UpdatePointerToNewGen,
Heap::WATERMARK_SHOULD_BE_VALID); Heap::WATERMARK_SHOULD_BE_VALID);
int live_maps_size = Heap::map_space()->Size(); intptr_t live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize; int live_maps = static_cast<int>(live_maps_size / Map::kSize);
ASSERT(live_map_objects_size_ == live_maps_size); ASSERT(live_map_objects_size_ == live_maps_size);
if (Heap::map_space()->NeedsCompaction(live_maps)) { if (Heap::map_space()->NeedsCompaction(live_maps)) {
@ -2504,6 +2522,7 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
HeapObject* copied_to = HeapObject::FromAddress(new_addr); HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) { if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr)); PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
} }
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@ -2596,6 +2615,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
HeapObject* copied_to = HeapObject::FromAddress(new_addr); HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) { if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr)); PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to), obj));
} }
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr)); HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));

26
deps/v8/src/messages.js

@ -684,6 +684,11 @@ CallSite.prototype.getEvalOrigin = function () {
return FormatEvalOrigin(script); return FormatEvalOrigin(script);
}; };
CallSite.prototype.getScriptNameOrSourceURL = function () {
var script = %FunctionGetScript(this.fun);
return script ? script.nameOrSourceURL() : null;
};
CallSite.prototype.getFunction = function () { CallSite.prototype.getFunction = function () {
return this.fun; return this.fun;
}; };
@ -775,7 +780,11 @@ CallSite.prototype.isConstructor = function () {
}; };
function FormatEvalOrigin(script) { function FormatEvalOrigin(script) {
var eval_origin = ""; var sourceURL = script.nameOrSourceURL();
if (sourceURL)
return sourceURL;
var eval_origin = "eval at ";
if (script.eval_from_function_name) { if (script.eval_from_function_name) {
eval_origin += script.eval_from_function_name; eval_origin += script.eval_from_function_name;
} else { } else {
@ -786,9 +795,9 @@ function FormatEvalOrigin(script) {
if (eval_from_script) { if (eval_from_script) {
if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) { if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) {
// eval script originated from another eval. // eval script originated from another eval.
eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")"; eval_origin += " (" + FormatEvalOrigin(eval_from_script) + ")";
} else { } else {
// eval script originated from "real" scource. // eval script originated from "real" source.
if (eval_from_script.name) { if (eval_from_script.name) {
eval_origin += " (" + eval_from_script.name; eval_origin += " (" + eval_from_script.name;
var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true); var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
@ -807,13 +816,18 @@ function FormatEvalOrigin(script) {
}; };
function FormatSourcePosition(frame) { function FormatSourcePosition(frame) {
var fileName;
var fileLocation = ""; var fileLocation = "";
if (frame.isNative()) { if (frame.isNative()) {
fileLocation = "native"; fileLocation = "native";
} else if (frame.isEval()) { } else if (frame.isEval()) {
fileLocation = "eval at " + frame.getEvalOrigin(); fileName = frame.getScriptNameOrSourceURL();
if (!fileName)
fileLocation = frame.getEvalOrigin();
} else { } else {
var fileName = frame.getFileName(); fileName = frame.getFileName();
}
if (fileName) { if (fileName) {
fileLocation += fileName; fileLocation += fileName;
var lineNumber = frame.getLineNumber(); var lineNumber = frame.getLineNumber();
@ -825,7 +839,7 @@ function FormatSourcePosition(frame) {
} }
} }
} }
}
if (!fileLocation) { if (!fileLocation) {
fileLocation = "unknown source"; fileLocation = "unknown source";
} }

1
deps/v8/src/mips/assembler-mips.h

@ -665,4 +665,3 @@ class Assembler : public Malloced {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_MIPS_H_ #endif // V8_ARM_ASSEMBLER_MIPS_H_

10
deps/v8/src/mips/frames-mips.cc

@ -52,9 +52,7 @@ StackFrame::Type StackFrame::ComputeType(State* state) {
} }
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { Address ExitFrame::ComputeStackPointer(Address fp) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement; Address sp = fp + ExitFrameConstants::kSPDisplacement;
const int offset = ExitFrameConstants::kCodeOffset; const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset); Object* code = Memory::Object_at(fp + offset);
@ -62,11 +60,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (is_debug_exit) { if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize; sp -= kNumJSCallerSaved * kPointerSize;
} }
// Fill in the state. return sp;
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
return EXIT;
} }

23
deps/v8/src/objects-debug.cc

@ -89,7 +89,7 @@ void Failure::FailureVerify() {
void HeapObject::PrintHeader(const char* id) { void HeapObject::PrintHeader(const char* id) {
PrintF("%p: [%s]\n", this, id); PrintF("%p: [%s]\n", reinterpret_cast<void*>(this), id);
} }
@ -522,9 +522,9 @@ void JSObject::PrintElements() {
void JSObject::JSObjectPrint() { void JSObject::JSObjectPrint() {
PrintF("%p: [JSObject]\n", this); PrintF("%p: [JSObject]\n", reinterpret_cast<void*>(this));
PrintF(" - map = %p\n", map()); PrintF(" - map = %p\n", reinterpret_cast<void*>(map()));
PrintF(" - prototype = %p\n", GetPrototype()); PrintF(" - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
PrintF(" {\n"); PrintF(" {\n");
PrintProperties(); PrintProperties();
PrintElements(); PrintElements();
@ -649,8 +649,9 @@ void Map::MapVerify() {
} }
void Map::NormalizedMapVerify() { void Map::SharedMapVerify() {
MapVerify(); MapVerify();
ASSERT(is_shared());
ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors()); ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors());
ASSERT_EQ(Heap::empty_fixed_array(), code_cache()); ASSERT_EQ(Heap::empty_fixed_array(), code_cache());
ASSERT_EQ(0, pre_allocated_property_fields()); ASSERT_EQ(0, pre_allocated_property_fields());
@ -743,7 +744,7 @@ void String::StringVerify() {
void JSFunction::JSFunctionPrint() { void JSFunction::JSFunctionPrint() {
HeapObject::PrintHeader("Function"); HeapObject::PrintHeader("Function");
PrintF(" - map = 0x%p\n", map()); PrintF(" - map = 0x%p\n", reinterpret_cast<void*>(map()));
PrintF(" - initial_map = "); PrintF(" - initial_map = ");
if (has_initial_map()) { if (has_initial_map()) {
initial_map()->ShortPrint(); initial_map()->ShortPrint();
@ -904,7 +905,7 @@ void Code::CodePrint() {
void Code::CodeVerify() { void Code::CodeVerify() {
CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()), CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
static_cast<intptr_t>(kCodeAlignment))); kCodeAlignment));
Address last_gc_pc = NULL; Address last_gc_pc = NULL;
for (RelocIterator it(this); !it.done(); it.next()) { for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Verify(); it.rinfo()->Verify();
@ -1223,9 +1224,9 @@ void BreakPointInfo::BreakPointInfoVerify() {
void BreakPointInfo::BreakPointInfoPrint() { void BreakPointInfo::BreakPointInfoPrint() {
HeapObject::PrintHeader("BreakPointInfo"); HeapObject::PrintHeader("BreakPointInfo");
PrintF("\n - code_position: %d", code_position()); PrintF("\n - code_position: %d", code_position()->value());
PrintF("\n - source_position: %d", source_position()); PrintF("\n - source_position: %d", source_position()->value());
PrintF("\n - statement_position: %d", statement_position()); PrintF("\n - statement_position: %d", statement_position()->value());
PrintF("\n - break_point_objects: "); PrintF("\n - break_point_objects: ");
break_point_objects()->ShortPrint(); break_point_objects()->ShortPrint();
} }
@ -1381,7 +1382,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
for (int i = 0; i < length(); i++) { for (int i = 0; i < length(); i++) {
Object* e = get(i); Object* e = get(i);
if (e->IsMap()) { if (e->IsMap()) {
Map::cast(e)->NormalizedMapVerify(); Map::cast(e)->SharedMapVerify();
} else { } else {
ASSERT(e->IsUndefined()); ASSERT(e->IsUndefined());
} }

71
deps/v8/src/objects-inl.h

@ -83,7 +83,6 @@ PropertyDetails PropertyDetails::AsDeleted() {
} }
#define SMI_ACCESSORS(holder, name, offset) \ #define SMI_ACCESSORS(holder, name, offset) \
int holder::name() { \ int holder::name() { \
Object* value = READ_FIELD(this, offset); \ Object* value = READ_FIELD(this, offset); \
@ -1343,8 +1342,8 @@ Object* JSObject::InObjectPropertyAtPut(int index,
void JSObject::InitializeBody(int object_size) { void JSObject::InitializeBody(int object_size, Object* value) {
Object* value = Heap::undefined_value(); ASSERT(!value->IsHeapObject() || !Heap::InNewSpace(value));
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) { for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value); WRITE_FIELD(this, offset, value);
} }
@ -2279,6 +2278,36 @@ bool Map::is_extensible() {
} }
void Map::set_attached_to_shared_function_info(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
} else {
set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
}
}
bool Map::attached_to_shared_function_info() {
return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
}
void Map::set_is_shared(bool value) {
if (value) {
set_bit_field2(bit_field2() | (1 << kIsShared));
} else {
set_bit_field2(bit_field2() & ~(1 << kIsShared));
}
}
bool Map::is_shared() {
return ((1 << kIsShared) & bit_field2()) != 0;
}
JSFunction* Map::unchecked_constructor() {
return reinterpret_cast<JSFunction*>(READ_FIELD(this, kConstructorOffset));
}
Code::Flags Code::flags() { Code::Flags Code::flags() {
return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset)); return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
@ -2571,6 +2600,7 @@ ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object, ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset) kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset) ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@ -2662,6 +2692,37 @@ PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset) kThisPropertyAssignmentsCountOffset)
#endif #endif
int SharedFunctionInfo::construction_count() {
return READ_BYTE_FIELD(this, kConstructionCountOffset);
}
void SharedFunctionInfo::set_construction_count(int value) {
ASSERT(0 <= value && value < 256);
WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
}
bool SharedFunctionInfo::live_objects_may_exist() {
return (compiler_hints() & (1 << kLiveObjectsMayExist)) != 0;
}
void SharedFunctionInfo::set_live_objects_may_exist(bool value) {
if (value) {
set_compiler_hints(compiler_hints() | (1 << kLiveObjectsMayExist));
} else {
set_compiler_hints(compiler_hints() & ~(1 << kLiveObjectsMayExist));
}
}
bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
return initial_map() != Heap::undefined_value();
}
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset) ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset) ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@ -3138,9 +3199,9 @@ Object* JSObject::EnsureWritableFastElements() {
ASSERT(HasFastElements()); ASSERT(HasFastElements());
FixedArray* elems = FixedArray::cast(elements()); FixedArray* elems = FixedArray::cast(elements());
if (elems->map() != Heap::fixed_cow_array_map()) return elems; if (elems->map() != Heap::fixed_cow_array_map()) return elems;
Object* writable_elems = Heap::CopyFixedArray(elems); Object* writable_elems = Heap::CopyFixedArrayWithMap(elems,
Heap::fixed_array_map());
if (writable_elems->IsFailure()) return writable_elems; if (writable_elems->IsFailure()) return writable_elems;
FixedArray::cast(writable_elems)->set_map(Heap::fixed_array_map());
set_elements(FixedArray::cast(writable_elems)); set_elements(FixedArray::cast(writable_elems));
Counters::cow_arrays_converted.Increment(); Counters::cow_arrays_converted.Increment();
return writable_elems; return writable_elems;

257
deps/v8/src/objects.cc

@ -2099,32 +2099,15 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) {
} }
bool NormalizedMapCache::IsCacheable(JSObject* object) {
// Caching for global objects is not worth it (there are too few of them).
return !object->IsGlobalObject();
}
Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) { Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
Object* result;
Map* fast = obj->map(); Map* fast = obj->map();
if (!IsCacheable(obj)) {
result = fast->CopyNormalized(mode);
if (result->IsFailure()) return result;
} else {
int index = Hash(fast) % kEntries; int index = Hash(fast) % kEntries;
result = get(index); Object* result = get(index);
if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) { if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
// Make sure that the new slow map has exactly the same hash as the
// original fast map. This way we can use hash to check if a slow map
// is already in the hash (see Contains method).
ASSERT(Hash(fast) == Hash(Map::cast(result)));
// The cached map should match newly created normalized map bit-by-bit. // The cached map should match newly created normalized map bit-by-bit.
Object* fresh = fast->CopyNormalized(mode); Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (!fresh->IsFailure()) { if (!fresh->IsFailure()) {
ASSERT(memcmp(Map::cast(fresh)->address(), ASSERT(memcmp(Map::cast(fresh)->address(),
Map::cast(result)->address(), Map::cast(result)->address(),
@ -2135,25 +2118,15 @@ Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
return result; return result;
} }
result = fast->CopyNormalized(mode); result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
set(index, result); set(index, result);
}
Counters::normalized_maps.Increment(); Counters::normalized_maps.Increment();
return result; return result;
} }
bool NormalizedMapCache::Contains(Map* map) {
// If the map is present in the cache it can only be at one place:
// at the index calculated from the hash. We assume that a slow map has the
// same hash as a fast map it has been generated from.
int index = Hash(map) % kEntries;
return get(index) == map;
}
void NormalizedMapCache::Clear() { void NormalizedMapCache::Clear() {
int entries = length(); int entries = length();
for (int i = 0; i != entries; i++) { for (int i = 0; i != entries; i++) {
@ -2184,7 +2157,7 @@ bool NormalizedMapCache::CheckHit(Map* slow,
Map* fast, Map* fast,
PropertyNormalizationMode mode) { PropertyNormalizationMode mode) {
#ifdef DEBUG #ifdef DEBUG
slow->NormalizedMapVerify(); slow->SharedMapVerify();
#endif #endif
return return
slow->constructor() == fast->constructor() && slow->constructor() == fast->constructor() &&
@ -2194,17 +2167,17 @@ bool NormalizedMapCache::CheckHit(Map* slow,
fast->inobject_properties()) && fast->inobject_properties()) &&
slow->instance_type() == fast->instance_type() && slow->instance_type() == fast->instance_type() &&
slow->bit_field() == fast->bit_field() && slow->bit_field() == fast->bit_field() &&
slow->bit_field2() == fast->bit_field2(); (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
} }
Object* JSObject::UpdateMapCodeCache(String* name, Code* code) { Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
if (!HasFastProperties() && if (map()->is_shared()) {
NormalizedMapCache::IsCacheable(this) && // Fast case maps are never marked as shared.
Top::context()->global_context()->normalized_map_cache()-> ASSERT(!HasFastProperties());
Contains(map())) { // Replace the map with an identical copy that can be safely modified.
// Replace the map with the identical copy that can be safely modified. Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES); UNIQUE_NORMALIZED_MAP);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
Counters::normalized_maps.Increment(); Counters::normalized_maps.Increment();
@ -3189,12 +3162,14 @@ Object* Map::CopyDropDescriptors() {
} }
Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2()); Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_is_shared(false);
Map::cast(result)->ClearCodeCache(); Map::cast(result)->ClearCodeCache();
return result; return result;
} }
Object* Map::CopyNormalized(PropertyNormalizationMode mode) { Object* Map::CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size(); int new_instance_size = instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) { if (mode == CLEAR_INOBJECT_PROPERTIES) {
new_instance_size -= inobject_properties() * kPointerSize; new_instance_size -= inobject_properties() * kPointerSize;
@ -3213,8 +3188,12 @@ Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
Map::cast(result)->set_bit_field(bit_field()); Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2()); Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
#ifdef DEBUG #ifdef DEBUG
Map::cast(result)->NormalizedMapVerify(); if (Map::cast(result)->is_shared()) {
Map::cast(result)->SharedMapVerify();
}
#endif #endif
return result; return result;
@ -3271,6 +3250,47 @@ void Map::RemoveFromCodeCache(String* name, Code* code, int index) {
} }
void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Map* current = this;
while (current != Heap::meta_map()) {
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
*RawField(current, Map::kInstanceDescriptorsOffset));
if (d == Heap::empty_descriptor_array()) {
Map* prev = current->map();
current->set_map(Heap::meta_map());
callback(current, data);
current = prev;
continue;
}
FixedArray* contents = reinterpret_cast<FixedArray*>(
d->get(DescriptorArray::kContentArrayIndex));
Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
Object* map_or_index = *map_or_index_field;
bool map_done = true;
for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
i < contents->length();
i += 2) {
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.IsTransition()) {
Map* next = reinterpret_cast<Map*>(contents->get(i));
next->set_map(current);
*map_or_index_field = Smi::FromInt(i + 2);
current = next;
map_done = false;
break;
}
}
if (!map_done) continue;
*map_or_index_field = Heap::fixed_array_map();
Map* prev = current->map();
current->set_map(Heap::meta_map());
callback(current, data);
current = prev;
}
}
Object* CodeCache::Update(String* name, Code* code) { Object* CodeCache::Update(String* name, Code* code) {
ASSERT(code->ic_state() == MONOMORPHIC); ASSERT(code->ic_state() == MONOMORPHIC);
@ -3825,7 +3845,7 @@ Object* DescriptorArray::RemoveTransitions() {
} }
void DescriptorArray::Sort() { void DescriptorArray::SortUnchecked() {
// In-place heap sort. // In-place heap sort.
int len = number_of_descriptors(); int len = number_of_descriptors();
@ -3875,7 +3895,11 @@ void DescriptorArray::Sort() {
parent_index = child_index; parent_index = child_index;
} }
} }
}
void DescriptorArray::Sort() {
SortUnchecked();
SLOW_ASSERT(IsSortedNoDuplicates()); SLOW_ASSERT(IsSortedNoDuplicates());
} }
@ -5269,6 +5293,13 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
} }
void SharedFunctionInfo::ForbidInlineConstructor() {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
}
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo( void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_simple_this_property_assignments, bool only_simple_this_property_assignments,
FixedArray* assignments) { FixedArray* assignments) {
@ -5366,6 +5397,107 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
} }
void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
ASSERT(!IsInobjectSlackTrackingInProgress());
// Only initiate the tracking the first time.
if (live_objects_may_exist()) return;
set_live_objects_may_exist(true);
// No tracking during the snapshot construction phase.
if (Serializer::enabled()) return;
if (map->unused_property_fields() == 0) return;
// Nonzero counter is a leftover from the previous attempt interrupted
// by GC, keep it.
if (construction_count() == 0) {
set_construction_count(kGenerousAllocationCount);
}
set_initial_map(map);
ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
construct_stub());
set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
}
// Called from GC, hence reinterpret_cast and unchecked accessors.
void SharedFunctionInfo::DetachInitialMap() {
Map* map = reinterpret_cast<Map*>(initial_map());
// Make the map remember to restore the link if it survives the GC.
map->set_bit_field2(
map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
// Undo state changes made by StartInobjectTracking (except the
// construction_count). This way if the initial map does not survive the GC
// then StartInobjectTracking will be called again the next time the
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
set_initial_map(Heap::raw_unchecked_undefined_value());
ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
// It is safe to clear the flag: it will be set again if the map is live.
set_live_objects_may_exist(false);
}
// Called from GC, hence reinterpret_cast and unchecked accessors.
void SharedFunctionInfo::AttachInitialMap(Map* map) {
map->set_bit_field2(
map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
// Resume inobject slack tracking.
set_initial_map(map);
ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
*RawField(this, kConstructStubOffset));
set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
// The map survived the gc, so there may be objects referencing it.
set_live_objects_may_exist(true);
}
static void GetMinInobjectSlack(Map* map, void* data) {
int slack = map->unused_property_fields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
}
}
static void ShrinkInstanceSize(Map* map, void* data) {
int slack = *reinterpret_cast<int*>(data);
map->set_inobject_properties(map->inobject_properties() - slack);
map->set_unused_property_fields(map->unused_property_fields() - slack);
map->set_instance_size(map->instance_size() - slack * kPointerSize);
// Visitor id might depend on the instance size, recalculate it.
map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
}
void SharedFunctionInfo::CompleteInobjectSlackTracking() {
ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
Map* map = Map::cast(initial_map());
set_initial_map(Heap::undefined_value());
ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
construct_stub());
set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
int slack = map->unused_property_fields();
map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
// Give the correct expected_nof_properties to initial maps created later.
ASSERT(expected_nof_properties() >= slack);
set_expected_nof_properties(expected_nof_properties() - slack);
}
}
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@ -5919,21 +6051,24 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
} }
bool JSObject::HasLocalElement(uint32_t index) { JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
// Check access rights if needed. // Check access rights if needed.
if (IsAccessCheckNeeded() && if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) { !Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS); Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false; return UNDEFINED_ELEMENT;
} }
// Check for lookup interceptor // Check for lookup interceptor
if (HasIndexedInterceptor()) { if (HasIndexedInterceptor()) {
return HasElementWithInterceptor(this, index); return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
: UNDEFINED_ELEMENT;
} }
// Handle [] on String objects. // Handle [] on String objects.
if (this->IsStringObjectWithCharacterAt(index)) return true; if (this->IsStringObjectWithCharacterAt(index)) {
return STRING_CHARACTER_ELEMENT;
}
switch (GetElementsKind()) { switch (GetElementsKind()) {
case FAST_ELEMENTS: { case FAST_ELEMENTS: {
@ -5941,12 +6076,16 @@ bool JSObject::HasLocalElement(uint32_t index) {
static_cast<uint32_t> static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) : (Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length()); static_cast<uint32_t>(FixedArray::cast(elements())->length());
return (index < length) && if ((index < length) &&
!FixedArray::cast(elements())->get(index)->IsTheHole(); !FixedArray::cast(elements())->get(index)->IsTheHole()) {
return FAST_ELEMENT;
}
break;
} }
case PIXEL_ELEMENTS: { case PIXEL_ELEMENTS: {
PixelArray* pixels = PixelArray::cast(elements()); PixelArray* pixels = PixelArray::cast(elements());
return (index < static_cast<uint32_t>(pixels->length())); if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
break;
} }
case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@ -5956,18 +6095,22 @@ bool JSObject::HasLocalElement(uint32_t index) {
case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: { case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements()); ExternalArray* array = ExternalArray::cast(elements());
return (index < static_cast<uint32_t>(array->length())); if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
break;
} }
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
return element_dictionary()->FindEntry(index) if (element_dictionary()->FindEntry(index) !=
!= NumberDictionary::kNotFound; NumberDictionary::kNotFound) {
return DICTIONARY_ELEMENT;
}
break;
} }
default: default:
UNREACHABLE(); UNREACHABLE();
break; break;
} }
UNREACHABLE();
return Heap::null_value(); return UNDEFINED_ELEMENT;
} }
@ -8710,11 +8853,11 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
// No free slot - extend break point info array. // No free slot - extend break point info array.
Handle<FixedArray> old_break_points = Handle<FixedArray> old_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points())); Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
debug_info->set_break_points(*Factory::NewFixedArray(
old_break_points->length() +
Debug::kEstimatedNofBreakPointsInFunction));
Handle<FixedArray> new_break_points = Handle<FixedArray> new_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points())); Factory::NewFixedArray(old_break_points->length() +
Debug::kEstimatedNofBreakPointsInFunction);
debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) { for (int i = 0; i < old_break_points->length(); i++) {
new_break_points->set(i, old_break_points->get(i)); new_break_points->set(i, old_break_points->get(i));
} }

193
deps/v8/src/objects.h

@ -200,6 +200,14 @@ enum PropertyNormalizationMode {
}; };
// NormalizedMapSharingMode is used to specify whether a map may be shared
// by different objects with normalized properties.
enum NormalizedMapSharingMode {
UNIQUE_NORMALIZED_MAP,
SHARED_NORMALIZED_MAP
};
// Instance size sentinel for objects of variable size. // Instance size sentinel for objects of variable size.
static const int kVariableSizeSentinel = 0; static const int kVariableSizeSentinel = 0;
@ -1417,7 +1425,26 @@ class JSObject: public HeapObject {
// Tells whether the index'th element is present. // Tells whether the index'th element is present.
inline bool HasElement(uint32_t index); inline bool HasElement(uint32_t index);
bool HasElementWithReceiver(JSObject* receiver, uint32_t index); bool HasElementWithReceiver(JSObject* receiver, uint32_t index);
bool HasLocalElement(uint32_t index);
// Tells whether the index'th element is present and how it is stored.
enum LocalElementType {
// There is no element with given index.
UNDEFINED_ELEMENT,
// Element with given index is handled by interceptor.
INTERCEPTED_ELEMENT,
// Element with given index is character in string.
STRING_CHARACTER_ELEMENT,
// Element with given index is stored in fast backing store.
FAST_ELEMENT,
// Element with given index is stored in slow backing store.
DICTIONARY_ELEMENT
};
LocalElementType HasLocalElement(uint32_t index);
bool HasElementWithInterceptor(JSObject* receiver, uint32_t index); bool HasElementWithInterceptor(JSObject* receiver, uint32_t index);
bool HasElementPostInterceptor(JSObject* receiver, uint32_t index); bool HasElementPostInterceptor(JSObject* receiver, uint32_t index);
@ -1576,7 +1603,7 @@ class JSObject: public HeapObject {
// initialized by set_properties // initialized by set_properties
// Note: this call does not update write barrier, it is caller's // Note: this call does not update write barrier, it is caller's
// reponsibility to ensure that *v* can be collected without WB here. // reponsibility to ensure that *v* can be collected without WB here.
inline void InitializeBody(int object_size); inline void InitializeBody(int object_size, Object* value);
// Check whether this object references another object // Check whether this object references another object
bool ReferencesObject(Object* obj); bool ReferencesObject(Object* obj);
@ -1892,6 +1919,11 @@ class DescriptorArray: public FixedArray {
MUST_USE_RESULT Object* RemoveTransitions(); MUST_USE_RESULT Object* RemoveTransitions();
// Sort the instance descriptors by the hash codes of their keys. // Sort the instance descriptors by the hash codes of their keys.
// Does not check for duplicates.
void SortUnchecked();
// Sort the instance descriptors by the hash codes of their keys.
// Checks the result for duplicates.
void Sort(); void Sort();
// Search the instance descriptors for given name. // Search the instance descriptors for given name.
@ -2485,12 +2517,8 @@ class NormalizedMapCache: public FixedArray {
public: public:
static const int kEntries = 64; static const int kEntries = 64;
static bool IsCacheable(JSObject* object);
Object* Get(JSObject* object, PropertyNormalizationMode mode); Object* Get(JSObject* object, PropertyNormalizationMode mode);
bool Contains(Map* map);
void Clear(); void Clear();
// Casting // Casting
@ -2985,11 +3013,6 @@ class Code: public HeapObject {
void CodePrint(); void CodePrint();
void CodeVerify(); void CodeVerify();
#endif #endif
// Code entry points are aligned to 32 bytes.
static const int kCodeAlignmentBits = 5;
static const int kCodeAlignment = 1 << kCodeAlignmentBits;
static const int kCodeAlignmentMask = kCodeAlignment - 1;
// Layout description. // Layout description.
static const int kInstructionSizeOffset = HeapObject::kHeaderSize; static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize; static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
@ -2998,8 +3021,7 @@ class Code: public HeapObject {
// Add padding to align the instruction start following right after // Add padding to align the instruction start following right after
// the Code object header. // the Code object header.
static const int kHeaderSize = static const int kHeaderSize =
(kKindSpecificFlagsOffset + kIntSize + kCodeAlignmentMask) & CODE_POINTER_ALIGN(kKindSpecificFlagsOffset + kIntSize);
~kCodeAlignmentMask;
// Byte offsets within kKindSpecificFlagsOffset. // Byte offsets within kKindSpecificFlagsOffset.
static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1; static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
@ -3146,6 +3168,19 @@ class Map: public HeapObject {
return ((1 << kHasFastElements) & bit_field2()) != 0; return ((1 << kHasFastElements) & bit_field2()) != 0;
} }
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
inline bool attached_to_shared_function_info();
// Tells whether the map is shared between objects that may have different
// behavior. If true, the map should never be modified, instead a clone
// should be created and modified.
inline void set_is_shared(bool value);
inline bool is_shared();
// Tells whether the instance needs security checks when accessing its // Tells whether the instance needs security checks when accessing its
// properties. // properties.
inline void set_is_access_check_needed(bool access_check_needed); inline void set_is_access_check_needed(bool access_check_needed);
@ -3157,6 +3192,8 @@ class Map: public HeapObject {
// [constructor]: points back to the function responsible for this map. // [constructor]: points back to the function responsible for this map.
DECL_ACCESSORS(constructor, Object) DECL_ACCESSORS(constructor, Object)
inline JSFunction* unchecked_constructor();
// [instance descriptors]: describes the object. // [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray) DECL_ACCESSORS(instance_descriptors, DescriptorArray)
@ -3165,7 +3202,8 @@ class Map: public HeapObject {
MUST_USE_RESULT Object* CopyDropDescriptors(); MUST_USE_RESULT Object* CopyDropDescriptors();
MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode); MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode,
NormalizedMapSharingMode sharing);
// Returns a copy of the map, with all transitions dropped from the // Returns a copy of the map, with all transitions dropped from the
// instance descriptors. // instance descriptors.
@ -3229,12 +3267,16 @@ class Map: public HeapObject {
#ifdef DEBUG #ifdef DEBUG
void MapPrint(); void MapPrint();
void MapVerify(); void MapVerify();
void NormalizedMapVerify(); void SharedMapVerify();
#endif #endif
inline int visitor_id(); inline int visitor_id();
inline void set_visitor_id(int visitor_id); inline void set_visitor_id(int visitor_id);
typedef void (*TraverseCallback)(Map* map, void* data);
void TraverseTransitionTree(TraverseCallback callback, void* data);
static const int kMaxPreAllocatedPropertyFields = 255; static const int kMaxPreAllocatedPropertyFields = 255;
// Layout description. // Layout description.
@ -3288,6 +3330,8 @@ class Map: public HeapObject {
static const int kFunctionWithPrototype = 1; static const int kFunctionWithPrototype = 1;
static const int kHasFastElements = 2; static const int kHasFastElements = 2;
static const int kStringWrapperSafeForDefaultValueOf = 3; static const int kStringWrapperSafeForDefaultValueOf = 3;
static const int kAttachedToSharedFunctionInfo = 4;
static const int kIsShared = 5;
// Layout of the default cache. It holds alternating name and code objects. // Layout of the default cache. It holds alternating name and code objects.
static const int kCodeCacheEntrySize = 2; static const int kCodeCacheEntrySize = 2;
@ -3442,6 +3486,100 @@ class SharedFunctionInfo: public HeapObject {
inline int expected_nof_properties(); inline int expected_nof_properties();
inline void set_expected_nof_properties(int value); inline void set_expected_nof_properties(int value);
// Inobject slack tracking is the way to reclaim unused inobject space.
//
// The instance size is initially determined by adding some slack to
// expected_nof_properties (to allow for a few extra properties added
// after the constructor). There is no guarantee that the extra space
// will not be wasted.
//
// Here is the algorithm to reclaim the unused inobject space:
// - Detect the first constructor call for this SharedFunctionInfo.
// When it happens enter the "in progress" state: remember the
// constructor's initial_map and install a special construct stub that
// counts constructor calls.
// - While the tracking is in progress create objects filled with
// one_pointer_filler_map instead of undefined_value. This way they can be
// resized quickly and safely.
// - Once enough (kGenerousAllocationCount) objects have been created
// compute the 'slack' (traverse the map transition tree starting from the
// initial_map and find the lowest value of unused_property_fields).
// - Traverse the transition tree again and decrease the instance size
// of every map. Existing objects will resize automatically (they are
// filled with one_pointer_filler_map). All further allocations will
// use the adjusted instance size.
// - Decrease expected_nof_properties so that an allocations made from
// another context will use the adjusted instance size too.
// - Exit "in progress" state by clearing the reference to the initial_map
// and setting the regular construct stub (generic or inline).
//
// The above is the main event sequence. Some special cases are possible
// while the tracking is in progress:
//
// - GC occurs.
// Check if the initial_map is referenced by any live objects (except this
// SharedFunctionInfo). If it is, continue tracking as usual.
// If it is not, clear the reference and reset the tracking state. The
// tracking will be initiated again on the next constructor call.
//
// - The constructor is called from another context.
// Immediately complete the tracking, perform all the necessary changes
// to maps. This is necessary because there is no efficient way to track
// multiple initial_maps.
// Proceed to create an object in the current context (with the adjusted
// size).
//
// - A different constructor function sharing the same SharedFunctionInfo is
// called in the same context. This could be another closure in the same
// context, or the first function could have been disposed.
// This is handled the same way as the previous case.
//
// Important: inobject slack tracking is not attempted during the snapshot
// creation.
static const int kGenerousAllocationCount = 16;
// [construction_count]: Counter for constructor calls made during
// the tracking phase.
inline int construction_count();
inline void set_construction_count(int value);
// [initial_map]: initial map of the first function called as a constructor.
// Saved for the duration of the tracking phase.
// This is a weak link (GC resets it to undefined_value if no other live
// object reference this map).
DECL_ACCESSORS(initial_map, Object)
// True if the initial_map is not undefined and the countdown stub is
// installed.
inline bool IsInobjectSlackTrackingInProgress();
// Starts the tracking.
// Stores the initial map and installs the countdown stub.
// IsInobjectSlackTrackingInProgress is normally true after this call,
// except when tracking have not been started (e.g. the map has no unused
// properties or the snapshot is being built).
void StartInobjectSlackTracking(Map* map);
// Completes the tracking.
// IsInobjectSlackTrackingInProgress is false after this call.
void CompleteInobjectSlackTracking();
// Clears the initial_map before the GC marking phase to ensure the reference
// is weak. IsInobjectSlackTrackingInProgress is false after this call.
void DetachInitialMap();
// Restores the link to the initial map after the GC marking phase.
// IsInobjectSlackTrackingInProgress is true after this call.
void AttachInitialMap(Map* map);
// False if there are definitely no live objects created from this function.
// True if live objects _may_ exist (existence not guaranteed).
// May go back from true to false after GC.
inline bool live_objects_may_exist();
inline void set_live_objects_may_exist(bool value);
// [instance class name]: class name for instances. // [instance class name]: class name for instances.
DECL_ACCESSORS(instance_class_name, Object) DECL_ACCESSORS(instance_class_name, Object)
@ -3542,6 +3680,10 @@ class SharedFunctionInfo: public HeapObject {
// prototype. // prototype.
bool CanGenerateInlineConstructor(Object* prototype); bool CanGenerateInlineConstructor(Object* prototype);
// Prevents further attempts to generate inline constructors.
// To be called if generation failed for any reason.
void ForbidInlineConstructor();
// For functions which only contains this property assignments this provides // For functions which only contains this property assignments this provides
// access to the names for the properties assigned. // access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object) DECL_ACCESSORS(this_property_assignments, Object)
@ -3589,8 +3731,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kScriptOffset = kFunctionDataOffset + kPointerSize; static const int kScriptOffset = kFunctionDataOffset + kPointerSize;
static const int kDebugInfoOffset = kScriptOffset + kPointerSize; static const int kDebugInfoOffset = kScriptOffset + kPointerSize;
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize; static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset = static const int kInitialMapOffset =
kInferredNameOffset + kPointerSize; kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
#if V8_HOST_ARCH_32_BIT #if V8_HOST_ARCH_32_BIT
// Smi fields. // Smi fields.
static const int kLengthOffset = static const int kLengthOffset =
@ -3614,7 +3758,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize; static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
#else #else
// The only reason to use smi fields instead of int fields // The only reason to use smi fields instead of int fields
// is to allow interation without maps decoding during // is to allow iteration without maps decoding during
// garbage collections. // garbage collections.
// To avoid wasting space on 64-bit architectures we use // To avoid wasting space on 64-bit architectures we use
// the following trick: we group integer fields into pairs // the following trick: we group integer fields into pairs
@ -3649,6 +3793,18 @@ class SharedFunctionInfo: public HeapObject {
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize; static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
#endif #endif
// The construction counter for inobject slack tracking is stored in the
// most significant byte of compiler_hints which is otherwise unused.
// Its offset depends on the endian-ness of the architecture.
#if __BYTE_ORDER == __LITTLE_ENDIAN
static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
#elif __BYTE_ORDER == __BIG_ENDIAN
static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
#else
#error Unknown byte ordering
#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize); static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
typedef FixedBodyDescriptor<kNameOffset, typedef FixedBodyDescriptor<kNameOffset,
@ -3668,7 +3824,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasOnlySimpleThisPropertyAssignments = 0; static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1; static const int kTryFullCodegen = 1;
static const int kAllowLazyCompilation = 2; static const int kAllowLazyCompilation = 2;
static const int kCodeAgeShift = 3; static const int kLiveObjectsMayExist = 3;
static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 7; static const int kCodeAgeMask = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo); DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);

157
deps/v8/src/parser.cc

@ -115,11 +115,7 @@ class Parser {
// Returns NULL if parsing failed. // Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Handle<String> source, FunctionLiteral* ParseProgram(Handle<String> source,
bool in_global_context); bool in_global_context);
FunctionLiteral* ParseLazy(Handle<String> source, FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info);
Handle<String> name,
int start_position,
int end_position,
bool is_expression);
FunctionLiteral* ParseJson(Handle<String> source); FunctionLiteral* ParseJson(Handle<String> source);
// The minimum number of contiguous assignment that will // The minimum number of contiguous assignment that will
@ -877,12 +873,30 @@ class ParserLog BASE_EMBEDDED {
virtual int function_position() { return 0; } virtual int function_position() { return 0; }
virtual int symbol_position() { return 0; } virtual int symbol_position() { return 0; }
virtual int symbol_ids() { return 0; } virtual int symbol_ids() { return 0; }
virtual void PauseRecording() {}
virtual void ResumeRecording() {}
virtual Vector<unsigned> ExtractData() { virtual Vector<unsigned> ExtractData() {
return Vector<unsigned>(); return Vector<unsigned>();
}; };
}; };
class ConditionalLogPauseScope {
public:
ConditionalLogPauseScope(bool pause, ParserLog* log)
: log_(log), pause_(pause) {
if (pause) log->PauseRecording();
}
~ConditionalLogPauseScope() {
if (pause_) log_->ResumeRecording();
}
private:
ParserLog* log_;
bool pause_;
};
class AstBuildingParserFactory : public ParserFactory { class AstBuildingParserFactory : public ParserFactory {
public: public:
explicit AstBuildingParserFactory(int expected_symbols) explicit AstBuildingParserFactory(int expected_symbols)
@ -970,15 +984,31 @@ class PartialParserRecorder: public ParserLog {
return data; return data;
} }
virtual void PauseRecording() {
pause_count_++;
is_recording_ = false;
}
virtual void ResumeRecording() {
ASSERT(pause_count_ > 0);
if (--pause_count_ == 0) is_recording_ = !has_error();
}
protected: protected:
bool has_error() { bool has_error() {
return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]); return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
} }
bool is_recording() {
return is_recording_;
}
void WriteString(Vector<const char> str); void WriteString(Vector<const char> str);
Collector<unsigned> function_store_; Collector<unsigned> function_store_;
unsigned preamble_[ScriptDataImpl::kHeaderSize]; unsigned preamble_[ScriptDataImpl::kHeaderSize];
bool is_recording_;
int pause_count_;
#ifdef DEBUG #ifdef DEBUG
int prev_start; int prev_start;
#endif #endif
@ -991,6 +1021,7 @@ class CompleteParserRecorder: public PartialParserRecorder {
CompleteParserRecorder(); CompleteParserRecorder();
virtual void LogSymbol(int start, Vector<const char> literal) { virtual void LogSymbol(int start, Vector<const char> literal) {
if (!is_recording_) return;
int hash = vector_hash(literal); int hash = vector_hash(literal);
HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true); HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
@ -1001,7 +1032,7 @@ class CompleteParserRecorder: public PartialParserRecorder {
Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal); Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
entry->key = &symbol[0]; entry->key = &symbol[0];
} }
symbol_store_.Add(id - 1); WriteNumber(id - 1);
} }
virtual Vector<unsigned> ExtractData() { virtual Vector<unsigned> ExtractData() {
@ -1061,13 +1092,6 @@ class CompleteParserRecorder: public PartialParserRecorder {
}; };
void ScriptDataImpl::SkipFunctionEntry(int start) {
ASSERT(function_index_ + FunctionEntry::kSize <= store_.length());
ASSERT(static_cast<int>(store_[function_index_]) == start);
function_index_ += FunctionEntry::kSize;
}
FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) { FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
// The current pre-data entry must be a FunctionEntry with the given // The current pre-data entry must be a FunctionEntry with the given
// start position. // start position.
@ -1126,7 +1150,10 @@ bool ScriptDataImpl::SanityCheck() {
PartialParserRecorder::PartialParserRecorder() : function_store_(0) { PartialParserRecorder::PartialParserRecorder()
: function_store_(0),
is_recording_(true),
pause_count_(0) {
preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber; preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion; preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
preamble_[ScriptDataImpl::kHasErrorOffset] = false; preamble_[ScriptDataImpl::kHasErrorOffset] = false;
@ -1202,6 +1229,7 @@ void PartialParserRecorder::LogMessage(Scanner::Location loc,
for (int i = 0; i < args.length(); i++) { for (int i = 0; i < args.length(); i++) {
WriteString(CStrVector(args[i])); WriteString(CStrVector(args[i]));
} }
is_recording_ = false;
} }
@ -1248,7 +1276,7 @@ FunctionEntry PartialParserRecorder::LogFunction(int start) {
ASSERT(start > prev_start); ASSERT(start > prev_start);
prev_start = start; prev_start = start;
#endif #endif
if (has_error()) return FunctionEntry(); if (!is_recording_) return FunctionEntry();
FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0)); FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0));
result.set_start_pos(start); result.set_start_pos(start);
return result; return result;
@ -1343,6 +1371,8 @@ Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
bool inside_with) { bool inside_with) {
ASSERT(parent != NULL); ASSERT(parent != NULL);
parent->type_ = type; parent->type_ = type;
// Initialize function is hijacked by DummyScope to increment scope depth.
parent->Initialize(inside_with);
return parent; return parent;
} }
@ -1415,6 +1445,7 @@ class LexicalScope BASE_EMBEDDED {
} }
~LexicalScope() { ~LexicalScope() {
parser_->top_scope_->Leave();
parser_->top_scope_ = prev_scope_; parser_->top_scope_ = prev_scope_;
parser_->with_nesting_level_ = prev_level_; parser_->with_nesting_level_ = prev_level_;
} }
@ -1457,7 +1488,7 @@ Parser::Parser(Handle<Script> script,
ParserLog* log, ParserLog* log,
ScriptDataImpl* pre_data) ScriptDataImpl* pre_data)
: script_(script), : script_(script),
scanner_(is_pre_parsing), scanner_(),
top_scope_(NULL), top_scope_(NULL),
with_nesting_level_(0), with_nesting_level_(0),
temp_scope_(NULL), temp_scope_(NULL),
@ -1480,7 +1511,8 @@ bool Parser::PreParseProgram(Handle<String> source,
NoHandleAllocation no_handle_allocation; NoHandleAllocation no_handle_allocation;
scanner_.Initialize(source, stream, JAVASCRIPT); scanner_.Initialize(source, stream, JAVASCRIPT);
ASSERT(target_stack_ == NULL); ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY; mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
DummyScope top_scope; DummyScope top_scope;
LexicalScope scope(this, &top_scope); LexicalScope scope(this, &top_scope);
TemporaryScope temp_scope(this); TemporaryScope temp_scope(this);
@ -1503,6 +1535,7 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
source->TryFlatten(); source->TryFlatten();
scanner_.Initialize(source, JAVASCRIPT); scanner_.Initialize(source, JAVASCRIPT);
ASSERT(target_stack_ == NULL); ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
// Compute the parsing mode. // Compute the parsing mode.
mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY; mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
@ -1550,21 +1583,20 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
} }
FunctionLiteral* Parser::ParseLazy(Handle<String> source, FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
Handle<String> name,
int start_position,
int end_position,
bool is_expression) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT); CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy); HistogramTimerScope timer(&Counters::parse_lazy);
Handle<String> source(String::cast(script_->source()));
Counters::total_parse_size.Increment(source->length()); Counters::total_parse_size.Increment(source->length());
Handle<String> name(String::cast(info->name()));
fni_ = new FuncNameInferrer(); fni_ = new FuncNameInferrer();
fni_->PushEnclosingName(name); fni_->PushEnclosingName(name);
// Initialize parser state. // Initialize parser state.
source->TryFlatten(); source->TryFlatten();
scanner_.Initialize(source, start_position, end_position, JAVASCRIPT); scanner_.Initialize(source, info->start_position(), info->end_position(),
JAVASCRIPT);
ASSERT(target_stack_ == NULL); ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY; mode_ = PARSE_EAGERLY;
@ -1579,7 +1611,8 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
LexicalScope lexical_scope(this, scope); LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this); TemporaryScope temp_scope(this);
FunctionLiteralType type = is_expression ? EXPRESSION : DECLARATION; FunctionLiteralType type =
info->is_expression() ? EXPRESSION : DECLARATION;
bool ok = true; bool ok = true;
result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok); result = ParseFunctionLiteral(name, RelocInfo::kNoPosition, type, &ok);
// Make sure the results agree. // Make sure the results agree.
@ -1600,6 +1633,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<String> source,
return result; return result;
} }
FunctionLiteral* Parser::ParseJson(Handle<String> source) { FunctionLiteral* Parser::ParseJson(Handle<String> source) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT); CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
@ -1657,7 +1691,10 @@ void Parser::ReportMessage(const char* type, Vector<const char*> args) {
Handle<String> Parser::GetSymbol(bool* ok) { Handle<String> Parser::GetSymbol(bool* ok) {
if (is_pre_parsing_) {
log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal()); log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
return Handle<String>::null();
}
int symbol_id = -1; int symbol_id = -1;
if (pre_data() != NULL) { if (pre_data() != NULL) {
symbol_id = pre_data()->GetSymbolIdentifier(); symbol_id = pre_data()->GetSymbolIdentifier();
@ -1970,7 +2007,7 @@ void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor,
} }
// Propagate the collected information on this property assignments. // Propagate the collected information on this property assignments.
if (top_scope_->is_function_scope()) { if (!is_pre_parsing_ && top_scope_->is_function_scope()) {
bool only_simple_this_property_assignments = bool only_simple_this_property_assignments =
this_property_assignment_finder.only_simple_this_property_assignments() this_property_assignment_finder.only_simple_this_property_assignments()
&& top_scope_->declarations()->length() == 0; && top_scope_->declarations()->length() == 0;
@ -4122,8 +4159,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
int num_parameters = 0; int num_parameters = 0;
// Parse function body. // Parse function body.
{ Scope::Type type = Scope::FUNCTION_SCOPE; { Scope* scope =
Scope* scope = factory()->NewScope(top_scope_, type, inside_with()); factory()->NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
LexicalScope lexical_scope(this, scope); LexicalScope lexical_scope(this, scope);
TemporaryScope temp_scope(this); TemporaryScope temp_scope(this);
top_scope_->SetScopeName(name); top_scope_->SetScopeName(name);
@ -4154,7 +4191,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// NOTE: We create a proxy and resolve it here so that in the // NOTE: We create a proxy and resolve it here so that in the
// future we can change the AST to only refer to VariableProxies // future we can change the AST to only refer to VariableProxies
// instead of Variables and Proxis as is the case now. // instead of Variables and Proxis as is the case now.
if (!function_name.is_null() && function_name->length() > 0) { if (!is_pre_parsing_
&& !function_name.is_null()
&& function_name->length() > 0) {
Variable* fvar = top_scope_->DeclareFunctionVar(function_name); Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
VariableProxy* fproxy = VariableProxy* fproxy =
top_scope_->NewUnresolved(function_name, inside_with()); top_scope_->NewUnresolved(function_name, inside_with());
@ -4188,22 +4227,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
} }
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos); Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos); scanner_.SeekForward(end_pos);
pre_data()->Skip(entry.predata_function_skip(),
entry.predata_symbol_skip());
materialized_literal_count = entry.literal_count(); materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count(); expected_property_count = entry.property_count();
only_simple_this_property_assignments = false; only_simple_this_property_assignments = false;
this_property_assignments = Factory::empty_fixed_array(); this_property_assignments = Factory::empty_fixed_array();
Expect(Token::RBRACE, CHECK_OK); Expect(Token::RBRACE, CHECK_OK);
} else { } else {
if (pre_data() != NULL) { FunctionEntry entry;
// Skip pre-data entry for non-lazily compiled function. if (is_lazily_compiled) entry = log()->LogFunction(function_block_pos);
pre_data()->SkipFunctionEntry(function_block_pos); {
} ConditionalLogPauseScope pause_if(is_lazily_compiled, log());
FunctionEntry entry = log()->LogFunction(function_block_pos);
int predata_function_position_before = log()->function_position();
int predata_symbol_position_before = log()->symbol_position();
ParseSourceElements(&body, Token::RBRACE, CHECK_OK); ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
}
materialized_literal_count = temp_scope.materialized_literal_count(); materialized_literal_count = temp_scope.materialized_literal_count();
expected_property_count = temp_scope.expected_property_count(); expected_property_count = temp_scope.expected_property_count();
only_simple_this_property_assignments = only_simple_this_property_assignments =
@ -4213,13 +4248,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
Expect(Token::RBRACE, CHECK_OK); Expect(Token::RBRACE, CHECK_OK);
end_pos = scanner_.location().end_pos; end_pos = scanner_.location().end_pos;
if (entry.is_valid()) { if (entry.is_valid()) {
ASSERT(is_lazily_compiled);
ASSERT(is_pre_parsing_);
entry.set_end_pos(end_pos); entry.set_end_pos(end_pos);
entry.set_literal_count(materialized_literal_count); entry.set_literal_count(materialized_literal_count);
entry.set_property_count(expected_property_count); entry.set_property_count(expected_property_count);
entry.set_predata_function_skip(
log()->function_position() - predata_function_position_before);
entry.set_predata_symbol_skip(
log()->symbol_position() - predata_symbol_position_before);
} }
} }
@ -5439,12 +5472,6 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// The Parser interface. // The Parser interface.
// MakeAST() is just a wrapper for the corresponding Parser calls
// so we don't have to expose the entire Parser class in the .h file.
static bool always_allow_natives_syntax = false;
ParserMessage::~ParserMessage() { ParserMessage::~ParserMessage() {
for (int i = 0; i < args().length(); i++) for (int i = 0; i < args().length(); i++)
DeleteArray(args()[i]); DeleteArray(args()[i]);
@ -5479,9 +5506,7 @@ ScriptDataImpl* PartialPreParse(Handle<String> source,
v8::Extension* extension) { v8::Extension* extension) {
Handle<Script> no_script; Handle<Script> no_script;
bool allow_natives_syntax = bool allow_natives_syntax =
always_allow_natives_syntax || FLAG_allow_natives_syntax || Bootstrapper::IsActive();
FLAG_allow_natives_syntax ||
Bootstrapper::IsActive();
PartialPreParser parser(no_script, allow_natives_syntax, extension); PartialPreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL; if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single // Extract the accumulated data from the recorder as a single
@ -5492,7 +5517,9 @@ ScriptDataImpl* PartialPreParse(Handle<String> source,
void ScriptDataImpl::Initialize() { void ScriptDataImpl::Initialize() {
// Prepares state for use.
if (store_.length() >= kHeaderSize) { if (store_.length() >= kHeaderSize) {
function_index_ = kHeaderSize;
int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset]; int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
if (store_.length() > symbol_data_offset) { if (store_.length() > symbol_data_offset) {
symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]); symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
@ -5537,9 +5564,7 @@ ScriptDataImpl* PreParse(Handle<String> source,
v8::Extension* extension) { v8::Extension* extension) {
Handle<Script> no_script; Handle<Script> no_script;
bool allow_natives_syntax = bool allow_natives_syntax =
always_allow_natives_syntax || FLAG_allow_natives_syntax || Bootstrapper::IsActive();
FLAG_allow_natives_syntax ||
Bootstrapper::IsActive();
CompletePreParser parser(no_script, allow_natives_syntax, extension); CompletePreParser parser(no_script, allow_natives_syntax, extension);
if (!parser.PreParseProgram(source, stream)) return NULL; if (!parser.PreParseProgram(source, stream)) return NULL;
// Extract the accumulated data from the recorder as a single // Extract the accumulated data from the recorder as a single
@ -5571,15 +5596,15 @@ bool ParseRegExp(FlatStringReader* input,
} }
// MakeAST is just a wrapper for the corresponding Parser calls so we don't
// have to expose the entire Parser class in the .h file.
FunctionLiteral* MakeAST(bool compile_in_global_context, FunctionLiteral* MakeAST(bool compile_in_global_context,
Handle<Script> script, Handle<Script> script,
v8::Extension* extension, v8::Extension* extension,
ScriptDataImpl* pre_data, ScriptDataImpl* pre_data,
bool is_json) { bool is_json) {
bool allow_natives_syntax = bool allow_natives_syntax =
always_allow_natives_syntax || FLAG_allow_natives_syntax || Bootstrapper::IsActive();
FLAG_allow_natives_syntax ||
Bootstrapper::IsActive();
AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data); AstBuildingParser parser(script, allow_natives_syntax, extension, pre_data);
if (pre_data != NULL && pre_data->has_error()) { if (pre_data != NULL && pre_data->has_error()) {
Scanner::Location loc = pre_data->MessageLocation(); Scanner::Location loc = pre_data->MessageLocation();
@ -5605,25 +5630,13 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
} }
FunctionLiteral* MakeLazyAST(Handle<Script> script, FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info) {
Handle<String> name, Handle<Script> script(Script::cast(info->script()));
int start_position, AstBuildingParser parser(script, true, NULL, NULL);
int end_position, FunctionLiteral* result = parser.ParseLazy(info);
bool is_expression) {
bool allow_natives_syntax_before = always_allow_natives_syntax;
always_allow_natives_syntax = true;
AstBuildingParser parser(script, true, NULL, NULL); // always allow
always_allow_natives_syntax = allow_natives_syntax_before;
// Parse the function by pointing to the function source in the script source.
Handle<String> script_source(String::cast(script->source()));
FunctionLiteral* result =
parser.ParseLazy(script_source, name,
start_position, end_position, is_expression);
return result; return result;
} }
#undef NEW #undef NEW
} } // namespace v8::internal } } // namespace v8::internal

67
deps/v8/src/parser.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -72,19 +72,9 @@ class FunctionEntry BASE_EMBEDDED {
backing_[kPropertyCountOffset] = value; backing_[kPropertyCountOffset] = value;
} }
int predata_function_skip() { return backing_[kPredataFunctionSkipOffset]; }
void set_predata_function_skip(int value) {
backing_[kPredataFunctionSkipOffset] = value;
}
int predata_symbol_skip() { return backing_[kPredataSymbolSkipOffset]; }
void set_predata_symbol_skip(int value) {
backing_[kPredataSymbolSkipOffset] = value;
}
bool is_valid() { return backing_.length() > 0; } bool is_valid() { return backing_.length() > 0; }
static const int kSize = 6; static const int kSize = 4;
private: private:
Vector<unsigned> backing_; Vector<unsigned> backing_;
@ -92,8 +82,6 @@ class FunctionEntry BASE_EMBEDDED {
static const int kEndPosOffset = 1; static const int kEndPosOffset = 1;
static const int kLiteralCountOffset = 2; static const int kLiteralCountOffset = 2;
static const int kPropertyCountOffset = 3; static const int kPropertyCountOffset = 3;
static const int kPredataFunctionSkipOffset = 4;
static const int kPredataSymbolSkipOffset = 5;
}; };
@ -101,10 +89,7 @@ class ScriptDataImpl : public ScriptData {
public: public:
explicit ScriptDataImpl(Vector<unsigned> store) explicit ScriptDataImpl(Vector<unsigned> store)
: store_(store), : store_(store),
function_index_(kHeaderSize), owns_store_(true) { }
owns_store_(true) {
Initialize();
}
// Create an empty ScriptDataImpl that is guaranteed to not satisfy // Create an empty ScriptDataImpl that is guaranteed to not satisfy
// a SanityCheck. // a SanityCheck.
@ -120,7 +105,6 @@ class ScriptDataImpl : public ScriptData {
FunctionEntry GetFunctionEntry(int start); FunctionEntry GetFunctionEntry(int start);
int GetSymbolIdentifier(); int GetSymbolIdentifier();
void SkipFunctionEntry(int start);
bool SanityCheck(); bool SanityCheck();
Scanner::Location MessageLocation(); Scanner::Location MessageLocation();
@ -136,28 +120,8 @@ class ScriptDataImpl : public ScriptData {
unsigned magic() { return store_[kMagicOffset]; } unsigned magic() { return store_[kMagicOffset]; }
unsigned version() { return store_[kVersionOffset]; } unsigned version() { return store_[kVersionOffset]; }
// Skip forward in the preparser data by the given number
// of unsigned ints of function entries and the given number of bytes of
// symbol id encoding.
void Skip(int function_entries, int symbol_entries) {
ASSERT(function_entries >= 0);
ASSERT(function_entries
<= (static_cast<int>(store_[kFunctionsSizeOffset])
- (function_index_ - kHeaderSize)));
ASSERT(symbol_entries >= 0);
ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
unsigned max_function_skip = store_[kFunctionsSizeOffset] -
static_cast<unsigned>(function_index_ - kHeaderSize);
function_index_ +=
Min(static_cast<unsigned>(function_entries), max_function_skip);
symbol_data_ +=
Min(static_cast<unsigned>(symbol_entries),
static_cast<unsigned>(symbol_data_end_ - symbol_data_));
}
static const unsigned kMagicNumber = 0xBadDead; static const unsigned kMagicNumber = 0xBadDead;
static const unsigned kCurrentVersion = 3; static const unsigned kCurrentVersion = 4;
static const int kMagicOffset = 0; static const int kMagicOffset = 0;
static const int kVersionOffset = 1; static const int kVersionOffset = 1;
@ -189,11 +153,10 @@ class ScriptDataImpl : public ScriptData {
ScriptDataImpl(const char* backing_store, int length) ScriptDataImpl(const char* backing_store, int length)
: store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)), : store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
length / sizeof(unsigned)), length / static_cast<int>(sizeof(unsigned))),
function_index_(kHeaderSize),
owns_store_(false) { owns_store_(false) {
ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)); ASSERT_EQ(0, static_cast<int>(
Initialize(); reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned)));
} }
// Read strings written by ParserRecorder::WriteString. // Read strings written by ParserRecorder::WriteString.
@ -229,20 +192,8 @@ bool ParseRegExp(FlatStringReader* input,
RegExpCompileData* result); RegExpCompileData* result);
// Support for doing lazy compilation. The script is the script containing full // Support for doing lazy compilation.
// source of the script where the function is declared. The start_position and FunctionLiteral* MakeLazyAST(Handle<SharedFunctionInfo> info);
// end_position specifies the part of the script source which has the source
// for the function declaration in the form:
//
// (<formal parameters>) { <function body> }
//
// without any function keyword or name.
//
FunctionLiteral* MakeLazyAST(Handle<Script> script,
Handle<String> name,
int start_position,
int end_position,
bool is_expression);
// Support for handling complex values (array and object literals) that // Support for handling complex values (array and object literals) that

6
deps/v8/src/profile-generator-inl.h

@ -46,8 +46,7 @@ const char* StringsStorage::GetFunctionName(const char* name) {
CodeEntry::CodeEntry(int security_token_id) CodeEntry::CodeEntry(int security_token_id)
: call_uid_(0), : tag_(Logger::FUNCTION_TAG),
tag_(Logger::FUNCTION_TAG),
name_prefix_(kEmptyNamePrefix), name_prefix_(kEmptyNamePrefix),
name_(""), name_(""),
resource_name_(""), resource_name_(""),
@ -62,8 +61,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* resource_name, const char* resource_name,
int line_number, int line_number,
int security_token_id) int security_token_id)
: call_uid_(next_call_uid_++), : tag_(tag),
tag_(tag),
name_prefix_(name_prefix), name_prefix_(name_prefix),
name_(name), name_(name),
resource_name_(resource_name), resource_name_(resource_name),

28
deps/v8/src/profile-generator.cc

@ -121,11 +121,9 @@ const char* StringsStorage::GetName(String* name) {
const char* CodeEntry::kEmptyNamePrefix = ""; const char* CodeEntry::kEmptyNamePrefix = "";
unsigned CodeEntry::next_call_uid_ = 1;
void CodeEntry::CopyData(const CodeEntry& source) { void CodeEntry::CopyData(const CodeEntry& source) {
call_uid_ = source.call_uid_;
tag_ = source.tag_; tag_ = source.tag_;
name_prefix_ = source.name_prefix_; name_prefix_ = source.name_prefix_;
name_ = source.name_; name_ = source.name_;
@ -134,6 +132,29 @@ void CodeEntry::CopyData(const CodeEntry& source) {
} }
uint32_t CodeEntry::GetCallUid() const {
uint32_t hash = ComputeIntegerHash(tag_);
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
hash ^= ComputeIntegerHash(line_number_);
return hash;
}
bool CodeEntry::IsSameAs(CodeEntry* entry) const {
return this == entry
|| (tag_ == entry->tag_
&& name_prefix_ == entry->name_prefix_
&& name_ == entry->name_
&& resource_name_ == entry->resource_name_
&& line_number_ == entry->line_number_);
}
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) { ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry = HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false); children_.Lookup(entry, CodeEntryHash(entry), false);
@ -424,11 +445,12 @@ void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
CodeTree::Locator locator; CodeTree::Locator locator;
if (tree_.Find(code_start, &locator)) { if (tree_.Find(code_start, &locator)) {
const CodeEntryInfo& code_info = locator.value(); const CodeEntryInfo& code_info = locator.value();
if (tree_.Insert(start, &locator)) {
entry->CopyData(*code_info.entry); entry->CopyData(*code_info.entry);
tree_.Insert(start, &locator);
locator.set_value(CodeEntryInfo(entry, code_info.size)); locator.set_value(CodeEntryInfo(entry, code_info.size));
} }
} }
}
CodeEntry* CodeMap::FindEntry(Address addr) { CodeEntry* CodeMap::FindEntry(Address addr) {

20
deps/v8/src/profile-generator.h

@ -100,17 +100,17 @@ class CodeEntry {
INLINE(const char* name() const) { return name_; } INLINE(const char* name() const) { return name_; }
INLINE(const char* resource_name() const) { return resource_name_; } INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; } INLINE(int line_number() const) { return line_number_; }
INLINE(unsigned call_uid() const) { return call_uid_; }
INLINE(int security_token_id() const) { return security_token_id_; } INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag)); INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
void CopyData(const CodeEntry& source); void CopyData(const CodeEntry& source);
uint32_t GetCallUid() const;
bool IsSameAs(CodeEntry* entry) const;
static const char* kEmptyNamePrefix; static const char* kEmptyNamePrefix;
private: private:
unsigned call_uid_;
Logger::LogEventsAndTags tag_; Logger::LogEventsAndTags tag_;
const char* name_prefix_; const char* name_prefix_;
const char* name_; const char* name_;
@ -118,8 +118,6 @@ class CodeEntry {
int line_number_; int line_number_;
int security_token_id_; int security_token_id_;
static unsigned next_call_uid_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry); DISALLOW_COPY_AND_ASSIGN(CodeEntry);
}; };
@ -147,11 +145,12 @@ class ProfileNode {
private: private:
INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) { INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
return entry1 == entry2; return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
reinterpret_cast<CodeEntry*>(entry2));
} }
INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) { INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry)); return entry->GetCallUid();
} }
ProfileTree* tree_; ProfileTree* tree_;
@ -746,7 +745,8 @@ class HeapObjectsMap {
} }
static uint32_t AddressHash(Address addr) { static uint32_t AddressHash(Address addr) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(addr)); return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
} }
bool initial_fill_mode_; bool initial_fill_mode_;
@ -889,7 +889,8 @@ class HeapEntriesMap {
}; };
uint32_t Hash(HeapObject* object) { uint32_t Hash(HeapObject* object) {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object)); return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object)));
} }
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; } static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
@ -996,7 +997,8 @@ class HeapSnapshotJSONSerializer {
} }
INLINE(static uint32_t ObjectHash(const void* key)) { INLINE(static uint32_t ObjectHash(const void* key)) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(key)); return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)));
} }
void EnumerateNodes(); void EnumerateNodes();

57
deps/v8/src/regexp-macro-assembler-tracer.cc

@ -47,8 +47,15 @@ RegExpMacroAssemblerTracer::~RegExpMacroAssemblerTracer() {
} }
// This is used for printing out debugging information. It makes an integer
// that is closely related to the address of an object.
static int LabelToInt(Label* label) {
return static_cast<int>(reinterpret_cast<intptr_t>(label));
}
void RegExpMacroAssemblerTracer::Bind(Label* label) { void RegExpMacroAssemblerTracer::Bind(Label* label) {
PrintF("label[%08x]: (Bind)\n", label, label); PrintF("label[%08x]: (Bind)\n", LabelToInt(label));
assembler_->Bind(label); assembler_->Bind(label);
} }
@ -60,7 +67,7 @@ void RegExpMacroAssemblerTracer::AdvanceCurrentPosition(int by) {
void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) { void RegExpMacroAssemblerTracer::CheckGreedyLoop(Label* label) {
PrintF(" CheckGreedyLoop(label[%08x]);\n\n", label); PrintF(" CheckGreedyLoop(label[%08x]);\n\n", LabelToInt(label));
assembler_->CheckGreedyLoop(label); assembler_->CheckGreedyLoop(label);
} }
@ -84,14 +91,13 @@ void RegExpMacroAssemblerTracer::Backtrack() {
void RegExpMacroAssemblerTracer::GoTo(Label* label) { void RegExpMacroAssemblerTracer::GoTo(Label* label) {
PrintF(" GoTo(label[%08x]);\n\n", label); PrintF(" GoTo(label[%08x]);\n\n", LabelToInt(label));
assembler_->GoTo(label); assembler_->GoTo(label);
} }
void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) { void RegExpMacroAssemblerTracer::PushBacktrack(Label* label) {
PrintF(" PushBacktrack(label[%08x]);\n", PrintF(" PushBacktrack(label[%08x]);\n", LabelToInt(label));
label);
assembler_->PushBacktrack(label); assembler_->PushBacktrack(label);
} }
@ -176,7 +182,7 @@ void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
const char* check_msg = check_bounds ? "" : " (unchecked)"; const char* check_msg = check_bounds ? "" : " (unchecked)";
PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n", PrintF(" LoadCurrentCharacter(cp_offset=%d, label[%08x]%s (%d chars));\n",
cp_offset, cp_offset,
on_end_of_input, LabelToInt(on_end_of_input),
check_msg, check_msg,
characters); characters);
assembler_->LoadCurrentCharacter(cp_offset, assembler_->LoadCurrentCharacter(cp_offset,
@ -187,39 +193,43 @@ void RegExpMacroAssemblerTracer::LoadCurrentCharacter(int cp_offset,
void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) { void RegExpMacroAssemblerTracer::CheckCharacterLT(uc16 limit, Label* on_less) {
PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n", limit, on_less); PrintF(" CheckCharacterLT(c='u%04x', label[%08x]);\n",
limit, LabelToInt(on_less));
assembler_->CheckCharacterLT(limit, on_less); assembler_->CheckCharacterLT(limit, on_less);
} }
void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit, void RegExpMacroAssemblerTracer::CheckCharacterGT(uc16 limit,
Label* on_greater) { Label* on_greater) {
PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n", limit, on_greater); PrintF(" CheckCharacterGT(c='u%04x', label[%08x]);\n",
limit, LabelToInt(on_greater));
assembler_->CheckCharacterGT(limit, on_greater); assembler_->CheckCharacterGT(limit, on_greater);
} }
void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) { void RegExpMacroAssemblerTracer::CheckCharacter(uint32_t c, Label* on_equal) {
PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n", c, on_equal); PrintF(" CheckCharacter(c='u%04x', label[%08x]);\n",
c, LabelToInt(on_equal));
assembler_->CheckCharacter(c, on_equal); assembler_->CheckCharacter(c, on_equal);
} }
void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerTracer::CheckAtStart(Label* on_at_start) {
PrintF(" CheckAtStart(label[%08x]);\n", on_at_start); PrintF(" CheckAtStart(label[%08x]);\n", LabelToInt(on_at_start));
assembler_->CheckAtStart(on_at_start); assembler_->CheckAtStart(on_at_start);
} }
void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) { void RegExpMacroAssemblerTracer::CheckNotAtStart(Label* on_not_at_start) {
PrintF(" CheckNotAtStart(label[%08x]);\n", on_not_at_start); PrintF(" CheckNotAtStart(label[%08x]);\n", LabelToInt(on_not_at_start));
assembler_->CheckNotAtStart(on_not_at_start); assembler_->CheckNotAtStart(on_not_at_start);
} }
void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c, void RegExpMacroAssemblerTracer::CheckNotCharacter(uint32_t c,
Label* on_not_equal) { Label* on_not_equal) {
PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n", c, on_not_equal); PrintF(" CheckNotCharacter(c='u%04x', label[%08x]);\n",
c, LabelToInt(on_not_equal));
assembler_->CheckNotCharacter(c, on_not_equal); assembler_->CheckNotCharacter(c, on_not_equal);
} }
@ -231,7 +241,7 @@ void RegExpMacroAssemblerTracer::CheckCharacterAfterAnd(
PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n", PrintF(" CheckCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c, c,
mask, mask,
on_equal); LabelToInt(on_equal));
assembler_->CheckCharacterAfterAnd(c, mask, on_equal); assembler_->CheckCharacterAfterAnd(c, mask, on_equal);
} }
@ -243,7 +253,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterAnd(
PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n", PrintF(" CheckNotCharacterAfterAnd(c='u%04x', mask=0x%04x, label[%08x]);\n",
c, c,
mask, mask,
on_not_equal); LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal); assembler_->CheckNotCharacterAfterAnd(c, mask, on_not_equal);
} }
@ -258,7 +268,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
c, c,
minus, minus,
mask, mask,
on_not_equal); LabelToInt(on_not_equal));
assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal); assembler_->CheckNotCharacterAfterMinusAnd(c, minus, mask, on_not_equal);
} }
@ -266,7 +276,7 @@ void RegExpMacroAssemblerTracer::CheckNotCharacterAfterMinusAnd(
void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg, void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
Label* on_no_match) { Label* on_no_match) {
PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg, PrintF(" CheckNotBackReference(register=%d, label[%08x]);\n", start_reg,
on_no_match); LabelToInt(on_no_match));
assembler_->CheckNotBackReference(start_reg, on_no_match); assembler_->CheckNotBackReference(start_reg, on_no_match);
} }
@ -275,7 +285,7 @@ void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
int start_reg, int start_reg,
Label* on_no_match) { Label* on_no_match) {
PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n", PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, label[%08x]);\n",
start_reg, on_no_match); start_reg, LabelToInt(on_no_match));
assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match); assembler_->CheckNotBackReferenceIgnoreCase(start_reg, on_no_match);
} }
@ -286,7 +296,7 @@ void RegExpMacroAssemblerTracer::CheckNotRegistersEqual(int reg1,
PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n", PrintF(" CheckNotRegistersEqual(reg1=%d, reg2=%d, label[%08x]);\n",
reg1, reg1,
reg2, reg2,
on_not_equal); LabelToInt(on_not_equal));
assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal); assembler_->CheckNotRegistersEqual(reg1, reg2, on_not_equal);
} }
@ -300,7 +310,8 @@ void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) { for (int i = 0; i < str.length(); i++) {
PrintF("u%04x", str[i]); PrintF("u%04x", str[i]);
} }
PrintF("\", cp_offset=%d, label[%08x])\n", cp_offset, on_failure); PrintF("\", cp_offset=%d, label[%08x])\n",
cp_offset, LabelToInt(on_failure));
assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string); assembler_->CheckCharacters(str, cp_offset, on_failure, check_end_of_string);
} }
@ -312,7 +323,7 @@ bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
on_no_match); on_no_match);
PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n", PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type, type,
on_no_match, LabelToInt(on_no_match),
supported ? "true" : "false"); supported ? "true" : "false");
return supported; return supported;
} }
@ -321,7 +332,7 @@ bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index, void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
int comparand, Label* if_lt) { int comparand, Label* if_lt) {
PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n", PrintF(" IfRegisterLT(register=%d, number=%d, label[%08x]);\n",
register_index, comparand, if_lt); register_index, comparand, LabelToInt(if_lt));
assembler_->IfRegisterLT(register_index, comparand, if_lt); assembler_->IfRegisterLT(register_index, comparand, if_lt);
} }
@ -329,7 +340,7 @@ void RegExpMacroAssemblerTracer::IfRegisterLT(int register_index,
void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index, void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
Label* if_eq) { Label* if_eq) {
PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n", PrintF(" IfRegisterEqPos(register=%d, label[%08x]);\n",
register_index, if_eq); register_index, LabelToInt(if_eq));
assembler_->IfRegisterEqPos(register_index, if_eq); assembler_->IfRegisterEqPos(register_index, if_eq);
} }
@ -337,7 +348,7 @@ void RegExpMacroAssemblerTracer::IfRegisterEqPos(int register_index,
void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index, void RegExpMacroAssemblerTracer::IfRegisterGE(int register_index,
int comparand, Label* if_ge) { int comparand, Label* if_ge) {
PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n", PrintF(" IfRegisterGE(register=%d, number=%d, label[%08x]);\n",
register_index, comparand, if_ge); register_index, comparand, LabelToInt(if_ge));
assembler_->IfRegisterGE(register_index, comparand, if_ge); assembler_->IfRegisterGE(register_index, comparand, if_ge);
} }

8
deps/v8/src/regexp.js

@ -186,6 +186,10 @@ function RegExpExec(string) {
%_IsRegExpEquivalent(cache.regExp, this) && %_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string)) { %_ObjectEquals(cache.subject, string)) {
if (cache.answerSaved) { if (cache.answerSaved) {
// If this regexp is not global, cache.lastIndex is zero, so we only get
// here if this.lastIndex is zero, and resulting this.lastIndex
// must be zero too, so no change is necessary.
if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
return %_RegExpCloneResult(cache.answer); return %_RegExpCloneResult(cache.answer);
} else { } else {
saveAnswer = true; saveAnswer = true;
@ -282,6 +286,10 @@ function RegExpTest(string) {
%_IsRegExpEquivalent(cache.regExp, this) && %_IsRegExpEquivalent(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string) && %_ObjectEquals(cache.subject, string) &&
%_ObjectEquals(cache.lastIndex, lastIndex)) { %_ObjectEquals(cache.lastIndex, lastIndex)) {
// If this regexp is not global, cache.lastIndex is zero, so we only get
// here if this.lastIndex is zero, and resulting this.lastIndex
// must be zero too, so no change is necessary.
if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
return cache.answer; return cache.answer;
} }

4
deps/v8/src/rewriter.cc

@ -525,8 +525,8 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
Variable* rvar = rvar_proxy->AsVariable(); Variable* rvar = rvar_proxy->AsVariable();
if (lvar != NULL && rvar != NULL) { if (lvar != NULL && rvar != NULL) {
if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) { if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
Slot* lslot = lvar->slot(); Slot* lslot = lvar->AsSlot();
Slot* rslot = rvar->slot(); Slot* rslot = rvar->AsSlot();
if (lslot->type() == rslot->type() && if (lslot->type() == rslot->type() &&
(lslot->type() == Slot::PARAMETER || (lslot->type() == Slot::PARAMETER ||
lslot->type() == Slot::LOCAL) && lslot->type() == Slot::LOCAL) &&

293
deps/v8/src/runtime.cc

@ -638,56 +638,78 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE); Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms); Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result; LookupResult result;
CONVERT_CHECKED(JSObject, obj, args[0]); CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_CHECKED(String, name, args[1]); CONVERT_ARG_CHECKED(String, name, 1);
// This could be an element. // This could be an element.
uint32_t index; uint32_t index;
if (name->AsArrayIndex(&index)) { if (name->AsArrayIndex(&index)) {
if (!obj->HasLocalElement(index)) { switch (obj->HasLocalElement(index)) {
case JSObject::UNDEFINED_ELEMENT:
return Heap::undefined_value(); return Heap::undefined_value();
}
// Special handling of string objects according to ECMAScript 5 15.5.5.2. case JSObject::STRING_CHARACTER_ELEMENT: {
// Note that this might be a string object with elements other than the // Special handling of string objects according to ECMAScript 5
// actual string value. This is covered by the subsequent cases. // 15.5.5.2. Note that this might be a string object with elements
if (obj->IsStringObjectWithCharacterAt(index)) { // other than the actual string value. This is covered by the
JSValue* js_value = JSValue::cast(obj); // subsequent cases.
String* str = String::cast(js_value->value()); Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
Handle<String> str(String::cast(js_value->value()));
Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, str->SubString(index, index+1)); elms->set(VALUE_INDEX, *substr);
elms->set(WRITABLE_INDEX, Heap::false_value()); elms->set(WRITABLE_INDEX, Heap::false_value());
elms->set(ENUMERABLE_INDEX, Heap::false_value()); elms->set(ENUMERABLE_INDEX, Heap::false_value());
elms->set(CONFIGURABLE_INDEX, Heap::false_value()); elms->set(CONFIGURABLE_INDEX, Heap::false_value());
return *desc; return *desc;
} }
// This can potentially be an element in the elements dictionary or case JSObject::INTERCEPTED_ELEMENT:
// a fast element. case JSObject::FAST_ELEMENT: {
if (obj->HasDictionaryElements()) { elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
Handle<Object> element = GetElement(Handle<Object>(obj), index);
elms->set(VALUE_INDEX, *element);
elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value());
return *desc;
}
case JSObject::DICTIONARY_ELEMENT: {
NumberDictionary* dictionary = obj->element_dictionary(); NumberDictionary* dictionary = obj->element_dictionary();
int entry = dictionary->FindEntry(index); int entry = dictionary->FindEntry(index);
ASSERT(entry != NumberDictionary::kNotFound);
PropertyDetails details = dictionary->DetailsAt(entry); PropertyDetails details = dictionary->DetailsAt(entry);
switch (details.type()) {
case CALLBACKS: {
// This is an accessor property with getter and/or setter.
FixedArray* callbacks =
FixedArray::cast(dictionary->ValueAt(entry));
elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
elms->set(GETTER_INDEX, callbacks->get(0));
elms->set(SETTER_INDEX, callbacks->get(1));
break;
}
case NORMAL:
// This is a data property.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, dictionary->ValueAt(entry)); elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly())); elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
break;
default:
UNREACHABLE();
break;
}
elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum())); elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete())); elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
return *desc; return *desc;
} else { }
// Elements that are stored as array elements always has:
// writable: true, configurable: true, enumerable: true.
elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, obj->GetElement(index));
elms->set(WRITABLE_INDEX, Heap::true_value());
elms->set(ENUMERABLE_INDEX, Heap::true_value());
elms->set(CONFIGURABLE_INDEX, Heap::true_value());
return *desc;
} }
} }
// Use recursive implementation to also traverse hidden prototypes // Use recursive implementation to also traverse hidden prototypes
GetOwnPropertyImplementation(obj, name, &result); GetOwnPropertyImplementation(*obj, *name, &result);
if (!result.IsProperty()) { if (!result.IsProperty()) {
return Heap::undefined_value(); return Heap::undefined_value();
@ -698,7 +720,8 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
// Property that is internally implemented as a callback or // Property that is internally implemented as a callback or
// an API defined callback. // an API defined callback.
Object* value = obj->GetPropertyWithCallback( Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder()); *obj, structure, *name, result.holder());
if (value->IsFailure()) return value;
elms->set(IS_ACCESSOR_INDEX, Heap::false_value()); elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
elms->set(VALUE_INDEX, value); elms->set(VALUE_INDEX, value);
elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly())); elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
@ -946,7 +969,7 @@ static Object* Runtime_DeclareContextSlot(Arguments args) {
Handle<String> name(String::cast(args[1])); Handle<String> name(String::cast(args[1]));
PropertyAttributes mode = PropertyAttributes mode =
static_cast<PropertyAttributes>(Smi::cast(args[2])->value()); static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
ASSERT(mode == READ_ONLY || mode == NONE); RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
Handle<Object> initial_value(args[3]); Handle<Object> initial_value(args[3]);
// Declarations are always done in the function context. // Declarations are always done in the function context.
@ -2601,15 +2624,15 @@ int Runtime::StringMatch(Handle<String> sub,
if (seq_pat->IsAsciiRepresentation()) { if (seq_pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = seq_pat->ToAsciiVector(); Vector<const char> pat_vector = seq_pat->ToAsciiVector();
if (seq_sub->IsAsciiRepresentation()) { if (seq_sub->IsAsciiRepresentation()) {
return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index); return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
} }
return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index); return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
} }
Vector<const uc16> pat_vector = seq_pat->ToUC16Vector(); Vector<const uc16> pat_vector = seq_pat->ToUC16Vector();
if (seq_sub->IsAsciiRepresentation()) { if (seq_sub->IsAsciiRepresentation()) {
return StringSearch(seq_sub->ToAsciiVector(), pat_vector, start_index); return SearchString(seq_sub->ToAsciiVector(), pat_vector, start_index);
} }
return StringSearch(seq_sub->ToUC16Vector(), pat_vector, start_index); return SearchString(seq_sub->ToUC16Vector(), pat_vector, start_index);
} }
@ -2837,7 +2860,8 @@ static Object* Runtime_StringMatch(Arguments args) {
for (int i = 0; i < matches ; i++) { for (int i = 0; i < matches ; i++) {
int from = offsets.at(i * 2); int from = offsets.at(i * 2);
int to = offsets.at(i * 2 + 1); int to = offsets.at(i * 2 + 1);
elements->set(i, *Factory::NewSubString(subject, from, to)); Handle<String> match = Factory::NewSubString(subject, from, to);
elements->set(i, *match);
} }
Handle<JSArray> result = Factory::NewJSArrayWithElements(elements); Handle<JSArray> result = Factory::NewJSArrayWithElements(elements);
result->set_length(Smi::FromInt(matches)); result->set_length(Smi::FromInt(matches));
@ -2865,22 +2889,17 @@ static void SetLastMatchInfoNoCaptures(Handle<String> subject,
} }
template <typename schar, typename pchar> template <typename SubjectChar, typename PatternChar>
static bool SearchStringMultiple(Vector<schar> subject, static bool SearchStringMultiple(Vector<const SubjectChar> subject,
String* pattern, Vector<const PatternChar> pattern,
Vector<pchar> pattern_string, String* pattern_string,
FixedArrayBuilder* builder, FixedArrayBuilder* builder,
int* match_pos) { int* match_pos) {
int pos = *match_pos; int pos = *match_pos;
int subject_length = subject.length(); int subject_length = subject.length();
int pattern_length = pattern_string.length(); int pattern_length = pattern.length();
int max_search_start = subject_length - pattern_length; int max_search_start = subject_length - pattern_length;
bool is_ascii = (sizeof(schar) == 1); StringSearch<PatternChar, SubjectChar> search(pattern);
StringSearchStrategy strategy =
InitializeStringSearch(pattern_string, is_ascii);
switch (strategy) {
case SEARCH_FAIL: break;
case SEARCH_SHORT:
while (pos <= max_search_start) { while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) { if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
*match_pos = pos; *match_pos = pos;
@ -2888,7 +2907,7 @@ static bool SearchStringMultiple(Vector<schar> subject,
} }
// Position of end of previous match. // Position of end of previous match.
int match_end = pos + pattern_length; int match_end = pos + pattern_length;
int new_pos = SimpleIndexOf(subject, pattern_string, match_end); int new_pos = search.Search(subject, match_end);
if (new_pos >= 0) { if (new_pos >= 0) {
// A match. // A match.
if (new_pos > match_end) { if (new_pos > match_end) {
@ -2897,35 +2916,12 @@ static bool SearchStringMultiple(Vector<schar> subject,
new_pos); new_pos);
} }
pos = new_pos; pos = new_pos;
builder->Add(pattern); builder->Add(pattern_string);
} else { } else {
break; break;
} }
} }
break;
case SEARCH_LONG:
while (pos <= max_search_start) {
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
*match_pos = pos;
return false;
}
int match_end = pos + pattern_length;
int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
if (new_pos >= 0) {
// A match has been found.
if (new_pos > match_end) {
ReplacementStringBuilder::AddSubjectSlice(builder,
match_end,
new_pos);
}
pos = new_pos;
builder->Add(pattern);
} else {
break;
}
}
break;
}
if (pos < max_search_start) { if (pos < max_search_start) {
ReplacementStringBuilder::AddSubjectSlice(builder, ReplacementStringBuilder::AddSubjectSlice(builder,
pos + pattern_length, pos + pattern_length,
@ -2953,14 +2949,14 @@ static bool SearchStringMultiple(Handle<String> subject,
Vector<const char> subject_vector = subject->ToAsciiVector(); Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) { if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector, if (SearchStringMultiple(subject_vector,
*pattern,
pattern->ToAsciiVector(), pattern->ToAsciiVector(),
*pattern,
builder, builder,
&match_pos)) break; &match_pos)) break;
} else { } else {
if (SearchStringMultiple(subject_vector, if (SearchStringMultiple(subject_vector,
*pattern,
pattern->ToUC16Vector(), pattern->ToUC16Vector(),
*pattern,
builder, builder,
&match_pos)) break; &match_pos)) break;
} }
@ -2968,14 +2964,14 @@ static bool SearchStringMultiple(Handle<String> subject,
Vector<const uc16> subject_vector = subject->ToUC16Vector(); Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) { if (pattern->IsAsciiRepresentation()) {
if (SearchStringMultiple(subject_vector, if (SearchStringMultiple(subject_vector,
*pattern,
pattern->ToAsciiVector(), pattern->ToAsciiVector(),
*pattern,
builder, builder,
&match_pos)) break; &match_pos)) break;
} else { } else {
if (SearchStringMultiple(subject_vector, if (SearchStringMultiple(subject_vector,
*pattern,
pattern->ToUC16Vector(), pattern->ToUC16Vector(),
*pattern,
builder, builder,
&match_pos)) break; &match_pos)) break;
} }
@ -3105,9 +3101,10 @@ static RegExpImpl::IrregexpResult SearchRegExpMultiple(
// Arguments array to replace function is match, captures, index and // Arguments array to replace function is match, captures, index and
// subject, i.e., 3 + capture count in total. // subject, i.e., 3 + capture count in total.
Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count); Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count);
elements->set(0, *Factory::NewSubString(subject, Handle<String> match = Factory::NewSubString(subject,
match_start, match_start,
match_end)); match_end);
elements->set(0, *match);
for (int i = 1; i <= capture_count; i++) { for (int i = 1; i <= capture_count; i++) {
int start = register_vector[i * 2]; int start = register_vector[i * 2];
if (start >= 0) { if (start >= 0) {
@ -4756,52 +4753,24 @@ static Object* Runtime_StringTrim(Arguments args) {
} }
// Define storage for buffers declared in header file. template <typename SubjectChar, typename PatternChar>
// TODO(lrn): Remove these when rewriting search code. void FindStringIndices(Vector<const SubjectChar> subject,
int BMBuffers::bad_char_occurrence[kBMAlphabetSize]; Vector<const PatternChar> pattern,
BMGoodSuffixBuffers BMBuffers::bmgs_buffers;
template <typename schar, typename pchar>
void FindStringIndices(Vector<const schar> subject,
Vector<const pchar> pattern,
ZoneList<int>* indices, ZoneList<int>* indices,
unsigned int limit) { unsigned int limit) {
ASSERT(limit > 0); ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index. // Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values. // Stop after finding at most limit values.
StringSearchStrategy strategy = StringSearch<PatternChar, SubjectChar> search(pattern);
InitializeStringSearch(pattern, sizeof(schar) == 1);
switch (strategy) {
case SEARCH_FAIL: return;
case SEARCH_SHORT: {
int pattern_length = pattern.length(); int pattern_length = pattern.length();
int index = 0; int index = 0;
while (limit > 0) { while (limit > 0) {
index = SimpleIndexOf(subject, pattern, index); index = search.Search(subject, index);
if (index < 0) return; if (index < 0) return;
indices->Add(index); indices->Add(index);
index += pattern_length; index += pattern_length;
limit--; limit--;
} }
return;
}
case SEARCH_LONG: {
int pattern_length = pattern.length();
int index = 0;
while (limit > 0) {
index = ComplexIndexOf(subject, pattern, index);
if (index < 0) return;
indices->Add(index);
index += pattern_length;
limit--;
}
return;
}
default:
UNREACHABLE();
return;
}
} }
@ -4953,12 +4922,14 @@ static Object* Runtime_StringToArray(Arguments args) {
length); length);
for (int i = num_copied_from_cache; i < length; ++i) { for (int i = num_copied_from_cache; i < length; ++i) {
elements->set(i, *LookupSingleCharacterStringFromCode(chars[i])); Handle<Object> str = LookupSingleCharacterStringFromCode(chars[i]);
elements->set(i, *str);
} }
} else { } else {
elements = Factory::NewFixedArray(length); elements = Factory::NewFixedArray(length);
for (int i = 0; i < length; ++i) { for (int i = 0; i < length; ++i) {
elements->set(i, *LookupSingleCharacterStringFromCode(s->Get(i))); Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
elements->set(i, *str);
} }
} }
@ -6279,7 +6250,7 @@ static Object* Runtime_NewObjectFromBound(Arguments args) {
} }
static Code* ComputeConstructStub(Handle<JSFunction> function) { static void TrySettingInlineConstructStub(Handle<JSFunction> function) {
Handle<Object> prototype = Factory::null_value(); Handle<Object> prototype = Factory::null_value();
if (function->has_instance_prototype()) { if (function->has_instance_prototype()) {
prototype = Handle<Object>(function->instance_prototype()); prototype = Handle<Object>(function->instance_prototype());
@ -6287,13 +6258,10 @@ static Code* ComputeConstructStub(Handle<JSFunction> function) {
if (function->shared()->CanGenerateInlineConstructor(*prototype)) { if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
ConstructStubCompiler compiler; ConstructStubCompiler compiler;
Object* code = compiler.CompileConstructStub(function->shared()); Object* code = compiler.CompileConstructStub(function->shared());
if (code->IsFailure()) { if (!code->IsFailure()) {
return Builtins::builtin(Builtins::JSConstructStubGeneric); function->shared()->set_construct_stub(Code::cast(code));
} }
return Code::cast(code);
} }
return function->shared()->construct_stub();
} }
@ -6350,12 +6318,20 @@ static Object* Runtime_NewObject(Arguments args) {
Handle<SharedFunctionInfo> shared(function->shared()); Handle<SharedFunctionInfo> shared(function->shared());
EnsureCompiled(shared, CLEAR_EXCEPTION); EnsureCompiled(shared, CLEAR_EXCEPTION);
bool first_allocation = !function->has_initial_map(); if (!function->has_initial_map() &&
shared->IsInobjectSlackTrackingInProgress()) {
// The tracking is already in progress for another function. We can only
// track one initial_map at a time, so we force the completion before the
// function is called as a constructor for the first time.
shared->CompleteInobjectSlackTracking();
TrySettingInlineConstructStub(function);
}
bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = Factory::NewJSObject(function); Handle<JSObject> result = Factory::NewJSObject(function);
if (first_allocation) { // Delay setting the stub if inobject slack tracking is in progress.
Handle<Code> stub = Handle<Code>( if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
ComputeConstructStub(Handle<JSFunction>(function))); TrySettingInlineConstructStub(function);
shared->set_construct_stub(*stub);
} }
Counters::constructed_objects.Increment(); Counters::constructed_objects.Increment();
@ -6365,6 +6341,18 @@ static Object* Runtime_NewObject(Arguments args) {
} }
static Object* Runtime_FinalizeInstanceSize(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
TrySettingInlineConstructStub(function);
return Heap::undefined_value();
}
static Object* Runtime_LazyCompile(Arguments args) { static Object* Runtime_LazyCompile(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
@ -6386,7 +6374,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
// this means that things called through constructors are never known to // this means that things called through constructors are never known to
// be in loops. We compile them as if they are in loops here just in case. // be in loops. We compile them as if they are in loops here just in case.
ASSERT(!function->is_compiled()); ASSERT(!function->is_compiled());
if (!CompileLazyInLoop(function, Handle<Object>::null(), KEEP_EXCEPTION)) { if (!CompileLazyInLoop(function, KEEP_EXCEPTION)) {
return Failure::Exception(); return Failure::Exception();
} }
@ -6757,7 +6745,7 @@ static void PrintObject(Object* obj) {
} else if (obj->IsFalse()) { } else if (obj->IsFalse()) {
PrintF("<false>"); PrintF("<false>");
} else { } else {
PrintF("%p", obj); PrintF("%p", reinterpret_cast<void*>(obj));
} }
} }
@ -7209,15 +7197,15 @@ static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
Handle<Smi> e(Smi::FromInt(static_cast<int>(val))); Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
visitor->visit(j, e); visitor->visit(j, e);
} else { } else {
Handle<Object> e( Handle<Object> e =
Heap::AllocateHeapNumber(static_cast<ElementType>(val))); Factory::NewNumber(static_cast<ElementType>(val));
visitor->visit(j, e); visitor->visit(j, e);
} }
} }
} }
} else { } else {
for (uint32_t j = 0; j < len; j++) { for (uint32_t j = 0; j < len; j++) {
Handle<Object> e(Heap::AllocateHeapNumber(array->get(j))); Handle<Object> e = Factory::NewNumber(array->get(j));
visitor->visit(j, e); visitor->visit(j, e);
} }
} }
@ -7498,14 +7486,18 @@ static Object* Runtime_ArrayConcat(Arguments args) {
// The backing storage array must have non-existing elements to // The backing storage array must have non-existing elements to
// preserve holes across concat operations. // preserve holes across concat operations.
storage = Factory::NewFixedArrayWithHoles(result_length); storage = Factory::NewFixedArrayWithHoles(result_length);
result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map()))); Handle<Map> fast_map =
Factory::GetFastElementsMap(Handle<Map>(result->map()));
result->set_map(*fast_map);
} else { } else {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for = estimate_nof_elements + uint32_t at_least_space_for = estimate_nof_elements +
(estimate_nof_elements >> 2); (estimate_nof_elements >> 2);
storage = Handle<FixedArray>::cast( storage = Handle<FixedArray>::cast(
Factory::NewNumberDictionary(at_least_space_for)); Factory::NewNumberDictionary(at_least_space_for));
result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map()))); Handle<Map> slow_map =
Factory::GetSlowElementsMap(Handle<Map>(result->map()));
result->set_map(*slow_map);
} }
Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length)); Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@ -7826,7 +7818,8 @@ static Object* Runtime_DebugGetPropertyDetails(Arguments args) {
uint32_t index; uint32_t index;
if (name->AsArrayIndex(&index)) { if (name->AsArrayIndex(&index)) {
Handle<FixedArray> details = Factory::NewFixedArray(2); Handle<FixedArray> details = Factory::NewFixedArray(2);
details->set(0, Runtime::GetElementOrCharAt(obj, index)); Object* element_or_char = Runtime::GetElementOrCharAt(obj, index);
details->set(0, element_or_char);
details->set(1, PropertyDetails(NONE, NORMAL).AsSmi()); details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
return *Factory::NewJSArrayWithElements(details); return *Factory::NewJSArrayWithElements(details);
} }
@ -8628,7 +8621,8 @@ static Object* Runtime_GetScopeDetails(Arguments args) {
// Fill in scope details. // Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type())); details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
details->set(kScopeDetailsObjectIndex, *it.ScopeObject()); Handle<JSObject> scope_object = it.ScopeObject();
details->set(kScopeDetailsObjectIndex, *scope_object);
return *Factory::NewJSArrayWithElements(details); return *Factory::NewJSArrayWithElements(details);
} }
@ -8673,10 +8667,10 @@ static Object* Runtime_GetCFrames(Arguments args) {
Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count); Handle<FixedArray> frames_array = Factory::NewFixedArray(frames_count);
for (int i = 0; i < frames_count; i++) { for (int i = 0; i < frames_count; i++) {
Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function()); Handle<JSObject> frame_value = Factory::NewJSObject(Top::object_function());
frame_value->SetProperty( Handle<Object> frame_address =
*address_str, Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address));
*Factory::NewNumberFromInt(reinterpret_cast<int>(frames[i].address)),
NONE); frame_value->SetProperty(*address_str, *frame_address, NONE);
// Get the stack walk text for this frame. // Get the stack walk text for this frame.
Handle<String> frame_text; Handle<String> frame_text;
@ -8944,24 +8938,39 @@ static Object* Runtime_ClearBreakPoint(Arguments args) {
} }
// Change the state of break on exceptions // Change the state of break on exceptions.
// args[0]: boolean indicating uncaught exceptions // args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
// args[1]: boolean indicating on/off // args[1]: Boolean indicating on/off.
static Object* Runtime_ChangeBreakOnException(Arguments args) { static Object* Runtime_ChangeBreakOnException(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
ASSERT(args[0]->IsNumber()); RUNTIME_ASSERT(args[0]->IsNumber());
ASSERT(args[1]->IsBoolean()); CONVERT_BOOLEAN_CHECKED(enable, args[1]);
// Update break point state // If the number doesn't match an enum value, the ChangeBreakOnException
// function will default to affecting caught exceptions.
ExceptionBreakType type = ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0])); static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
bool enable = args[1]->ToBoolean()->IsTrue(); // Update break point state.
Debug::ChangeBreakOnException(type, enable); Debug::ChangeBreakOnException(type, enable);
return Heap::undefined_value(); return Heap::undefined_value();
} }
// Returns the state of break on exceptions
// args[0]: boolean indicating uncaught exceptions
static Object* Runtime_IsBreakOnException(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 1);
RUNTIME_ASSERT(args[0]->IsNumber());
ExceptionBreakType type =
static_cast<ExceptionBreakType>(NumberToUint32(args[0]));
bool result = Debug::IsBreakOnException(type);
return Smi::FromInt(result);
}
// Prepare for stepping // Prepare for stepping
// args[0]: break id for checking execution state // args[0]: break id for checking execution state
// args[1]: step action from the enumeration StepAction // args[1]: step action from the enumeration StepAction
@ -9023,8 +9032,8 @@ static Handle<Context> CopyWithContextChain(Handle<Context> context_chain,
// Recursively copy the with contexts. // Recursively copy the with contexts.
Handle<Context> previous(context_chain->previous()); Handle<Context> previous(context_chain->previous());
Handle<JSObject> extension(JSObject::cast(context_chain->extension())); Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
return Factory::NewWithContext( Handle<Context> context = CopyWithContextChain(function_context, previous);
CopyWithContextChain(function_context, previous), return Factory::NewWithContext(context,
extension, extension,
context_chain->IsCatchContext()); context_chain->IsCatchContext());
} }

2
deps/v8/src/runtime.h

@ -263,6 +263,7 @@ namespace internal {
F(NewClosure, 2, 1) \ F(NewClosure, 2, 1) \
F(NewObject, 1, 1) \ F(NewObject, 1, 1) \
F(NewObjectFromBound, 2, 1) \ F(NewObjectFromBound, 2, 1) \
F(FinalizeInstanceSize, 1, 1) \
F(Throw, 1, 1) \ F(Throw, 1, 1) \
F(ReThrow, 1, 1) \ F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \ F(ThrowReferenceError, 1, 1) \
@ -332,6 +333,7 @@ namespace internal {
F(SetScriptBreakPoint, 3, 1) \ F(SetScriptBreakPoint, 3, 1) \
F(ClearBreakPoint, 1, 1) \ F(ClearBreakPoint, 1, 1) \
F(ChangeBreakOnException, 2, 1) \ F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 3, 1) \ F(PrepareStep, 3, 1) \
F(ClearStepping, 0, 1) \ F(ClearStepping, 0, 1) \
F(DebugEvaluate, 4, 1) \ F(DebugEvaluate, 4, 1) \

9
deps/v8/src/scanner.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -342,8 +342,11 @@ void Scanner::LiteralScope::Complete() {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Scanner // Scanner
Scanner::Scanner(ParserMode pre) Scanner::Scanner()
: is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { } : has_line_terminator_before_next_(false),
is_parsing_json_(false),
source_(NULL),
stack_overflow_(false) {}
void Scanner::Initialize(Handle<String> source, void Scanner::Initialize(Handle<String> source,

6
deps/v8/src/scanner.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -281,8 +281,7 @@ class Scanner {
bool complete_; bool complete_;
}; };
// Construction Scanner();
explicit Scanner(ParserMode parse_mode);
// Initialize the Scanner to scan source. // Initialize the Scanner to scan source.
void Initialize(Handle<String> source, void Initialize(Handle<String> source,
@ -488,7 +487,6 @@ class Scanner {
TokenDesc current_; // desc for current token (as returned by Next()) TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead) TokenDesc next_; // desc for next token (one token look-ahead)
bool has_line_terminator_before_next_; bool has_line_terminator_before_next_;
bool is_pre_parsing_;
bool is_parsing_json_; bool is_parsing_json_;
// Different UTF16 buffers used to pull characters from. Based on input one of // Different UTF16 buffers used to pull characters from. Based on input one of

16
deps/v8/src/scopeinfo.cc

@ -37,8 +37,8 @@ namespace internal {
static int CompareLocal(Variable* const* v, Variable* const* w) { static int CompareLocal(Variable* const* v, Variable* const* w) {
Slot* s = (*v)->slot(); Slot* s = (*v)->AsSlot();
Slot* t = (*w)->slot(); Slot* t = (*w)->AsSlot();
// We may have rewritten parameters (that are in the arguments object) // We may have rewritten parameters (that are in the arguments object)
// and which may have a NULL slot... - find a better solution... // and which may have a NULL slot... - find a better solution...
int x = (s != NULL ? s->index() : 0); int x = (s != NULL ? s->index() : 0);
@ -83,7 +83,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
for (int i = 0; i < locals.length(); i++) { for (int i = 0; i < locals.length(); i++) {
Variable* var = locals[i]; Variable* var = locals[i];
if (var->is_used()) { if (var->is_used()) {
Slot* slot = var->slot(); Slot* slot = var->AsSlot();
if (slot != NULL) { if (slot != NULL) {
switch (slot->type()) { switch (slot->type()) {
case Slot::PARAMETER: case Slot::PARAMETER:
@ -112,9 +112,9 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
if (scope->num_heap_slots() > 0) { if (scope->num_heap_slots() > 0) {
// Add user-defined slots. // Add user-defined slots.
for (int i = 0; i < heap_locals.length(); i++) { for (int i = 0; i < heap_locals.length(); i++) {
ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS == ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_slots_.length()); context_slots_.length());
ASSERT(heap_locals[i]->slot()->index() - Context::MIN_CONTEXT_SLOTS == ASSERT(heap_locals[i]->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length()); context_modes_.length());
context_slots_.Add(heap_locals[i]->name()); context_slots_.Add(heap_locals[i]->name());
context_modes_.Add(heap_locals[i]->mode()); context_modes_.Add(heap_locals[i]->mode());
@ -131,15 +131,15 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
Variable* var = scope->function(); Variable* var = scope->function();
if (var != NULL && if (var != NULL &&
var->is_used() && var->is_used() &&
var->slot()->type() == Slot::CONTEXT) { var->AsSlot()->type() == Slot::CONTEXT) {
function_name_ = var->name(); function_name_ = var->name();
// Note that we must not find the function name in the context slot // Note that we must not find the function name in the context slot
// list - instead it must be handled separately in the // list - instead it must be handled separately in the
// Contexts::Lookup() function. Thus record an empty symbol here so we // Contexts::Lookup() function. Thus record an empty symbol here so we
// get the correct number of context slots. // get the correct number of context slots.
ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS == ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_slots_.length()); context_slots_.length());
ASSERT(var->slot()->index() - Context::MIN_CONTEXT_SLOTS == ASSERT(var->AsSlot()->index() - Context::MIN_CONTEXT_SLOTS ==
context_modes_.length()); context_modes_.length());
context_slots_.Add(Factory::empty_symbol()); context_slots_.Add(Factory::empty_symbol());
context_modes_.Add(Variable::INTERNAL); context_modes_.Add(Variable::INTERNAL);

32
deps/v8/src/scopes.cc

@ -201,7 +201,6 @@ void Scope::Initialize(bool inside_with) {
} }
Variable* Scope::LocalLookup(Handle<String> name) { Variable* Scope::LocalLookup(Handle<String> name) {
return variables_.Lookup(name); return variables_.Lookup(name);
} }
@ -810,8 +809,7 @@ void Scope::AllocateParameterLocals() {
// We are using 'arguments'. Tell the code generator that is needs to // We are using 'arguments'. Tell the code generator that is needs to
// allocate the arguments object by setting 'arguments_'. // allocate the arguments object by setting 'arguments_'.
arguments_ = new VariableProxy(Factory::arguments_symbol(), false, false); arguments_ = arguments;
arguments_->BindTo(arguments);
// We also need the '.arguments' shadow variable. Declare it and create // We also need the '.arguments' shadow variable. Declare it and create
// and bind the corresponding proxy. It's ok to declare it only now // and bind the corresponding proxy. It's ok to declare it only now
@ -822,13 +820,13 @@ void Scope::AllocateParameterLocals() {
// NewTemporary() because the mode needs to be INTERNAL since this // NewTemporary() because the mode needs to be INTERNAL since this
// variable may be allocated in the heap-allocated context (temporaries // variable may be allocated in the heap-allocated context (temporaries
// are never allocated in the context). // are never allocated in the context).
Variable* arguments_shadow = arguments_shadow_ = new Variable(this,
new Variable(this, Factory::arguments_shadow_symbol(), Factory::arguments_shadow_symbol(),
Variable::INTERNAL, true, Variable::ARGUMENTS); Variable::INTERNAL,
arguments_shadow_ = true,
new VariableProxy(Factory::arguments_shadow_symbol(), false, false); Variable::ARGUMENTS);
arguments_shadow_->BindTo(arguments_shadow); arguments_shadow_->set_is_used(true);
temps_.Add(arguments_shadow); temps_.Add(arguments_shadow_);
// Allocate the parameters by rewriting them into '.arguments[i]' accesses. // Allocate the parameters by rewriting them into '.arguments[i]' accesses.
for (int i = 0; i < params_.length(); i++) { for (int i = 0; i < params_.length(); i++) {
@ -839,14 +837,13 @@ void Scope::AllocateParameterLocals() {
// It is ok to set this only now, because arguments is a local // It is ok to set this only now, because arguments is a local
// variable that is allocated after the parameters have been // variable that is allocated after the parameters have been
// allocated. // allocated.
arguments_shadow->is_accessed_from_inner_scope_ = true; arguments_shadow_->is_accessed_from_inner_scope_ = true;
} }
var->rewrite_ = var->rewrite_ =
new Property(arguments_shadow_, new Property(new VariableProxy(arguments_shadow_),
new Literal(Handle<Object>(Smi::FromInt(i))), new Literal(Handle<Object>(Smi::FromInt(i))),
RelocInfo::kNoPosition, RelocInfo::kNoPosition,
Property::SYNTHETIC); Property::SYNTHETIC);
if (var->is_used()) arguments_shadow->set_is_used(true);
} }
} }
@ -862,7 +859,8 @@ void Scope::AllocateParameterLocals() {
if (MustAllocate(var)) { if (MustAllocate(var)) {
if (MustAllocateInContext(var)) { if (MustAllocateInContext(var)) {
ASSERT(var->rewrite_ == NULL || ASSERT(var->rewrite_ == NULL ||
(var->slot() != NULL && var->slot()->type() == Slot::CONTEXT)); (var->AsSlot() != NULL &&
var->AsSlot()->type() == Slot::CONTEXT));
if (var->rewrite_ == NULL) { if (var->rewrite_ == NULL) {
// Only set the heap allocation if the parameter has not // Only set the heap allocation if the parameter has not
// been allocated yet. // been allocated yet.
@ -870,8 +868,8 @@ void Scope::AllocateParameterLocals() {
} }
} else { } else {
ASSERT(var->rewrite_ == NULL || ASSERT(var->rewrite_ == NULL ||
(var->slot() != NULL && (var->AsSlot() != NULL &&
var->slot()->type() == Slot::PARAMETER)); var->AsSlot()->type() == Slot::PARAMETER));
// Set the parameter index always, even if the parameter // Set the parameter index always, even if the parameter
// was seen before! (We need to access the actual parameter // was seen before! (We need to access the actual parameter
// supplied for the last occurrence of a multiply declared // supplied for the last occurrence of a multiply declared
@ -888,7 +886,7 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this); ASSERT(var->scope() == this);
ASSERT(var->rewrite_ == NULL || ASSERT(var->rewrite_ == NULL ||
(!var->IsVariable(Factory::result_symbol())) || (!var->IsVariable(Factory::result_symbol())) ||
(var->slot() == NULL || var->slot()->type() != Slot::LOCAL)); (var->AsSlot() == NULL || var->AsSlot()->type() != Slot::LOCAL));
if (var->rewrite_ == NULL && MustAllocate(var)) { if (var->rewrite_ == NULL && MustAllocate(var)) {
if (MustAllocateInContext(var)) { if (MustAllocateInContext(var)) {
AllocateHeapSlot(var); AllocateHeapSlot(var);

58
deps/v8/src/scopes.h

@ -34,7 +34,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// A hash map to support fast variable declaration and lookup. // A hash map to support fast variable declaration and lookup.
class VariableMap: public HashMap { class VariableMap: public HashMap {
public: public:
@ -100,8 +99,12 @@ class Scope: public ZoneObject {
// The scope name is only used for printing/debugging. // The scope name is only used for printing/debugging.
void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; } void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
void Initialize(bool inside_with); virtual void Initialize(bool inside_with);
// Called just before leaving a scope.
virtual void Leave() {
// No cleanup or fixup necessary.
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Declarations // Declarations
@ -233,11 +236,11 @@ class Scope: public ZoneObject {
// The local variable 'arguments' if we need to allocate it; NULL otherwise. // The local variable 'arguments' if we need to allocate it; NULL otherwise.
// If arguments() exist, arguments_shadow() exists, too. // If arguments() exist, arguments_shadow() exists, too.
VariableProxy* arguments() const { return arguments_; } Variable* arguments() const { return arguments_; }
// The '.arguments' shadow variable if we need to allocate it; NULL otherwise. // The '.arguments' shadow variable if we need to allocate it; NULL otherwise.
// If arguments_shadow() exist, arguments() exists, too. // If arguments_shadow() exist, arguments() exists, too.
VariableProxy* arguments_shadow() const { return arguments_shadow_; } Variable* arguments_shadow() const { return arguments_shadow_; }
// Declarations list. // Declarations list.
ZoneList<Declaration*>* declarations() { return &decls_; } ZoneList<Declaration*>* declarations() { return &decls_; }
@ -272,7 +275,7 @@ class Scope: public ZoneObject {
bool AllowsLazyCompilation() const; bool AllowsLazyCompilation() const;
// True if the outer context of this scope is always the global context. // True if the outer context of this scope is always the global context.
bool HasTrivialOuterContext() const; virtual bool HasTrivialOuterContext() const;
// The number of contexts between this and scope; zero if this == scope. // The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope); int ContextChainLength(Scope* scope);
@ -322,9 +325,9 @@ class Scope: public ZoneObject {
// Function variable, if any; function scopes only. // Function variable, if any; function scopes only.
Variable* function_; Variable* function_;
// Convenience variable; function scopes only. // Convenience variable; function scopes only.
VariableProxy* arguments_; Variable* arguments_;
// Convenience variable; function scopes only. // Convenience variable; function scopes only.
VariableProxy* arguments_shadow_; Variable* arguments_shadow_;
// Illegal redeclaration. // Illegal redeclaration.
Expression* illegal_redecl_; Expression* illegal_redecl_;
@ -378,20 +381,53 @@ class Scope: public ZoneObject {
}; };
// Scope used during pre-parsing.
class DummyScope : public Scope { class DummyScope : public Scope {
public: public:
DummyScope() : Scope(GLOBAL_SCOPE) { DummyScope()
: Scope(GLOBAL_SCOPE),
nesting_level_(1), // Allows us to Leave the initial scope.
inside_with_level_(kNotInsideWith) {
outer_scope_ = this; outer_scope_ = this;
scope_inside_with_ = false;
} }
virtual Variable* Lookup(Handle<String> name) { return NULL; } virtual void Initialize(bool inside_with) {
virtual Variable* Declare(Handle<String> name, Variable::Mode mode) { nesting_level_++;
return NULL; if (inside_with && inside_with_level_ == kNotInsideWith) {
inside_with_level_ = nesting_level_;
}
ASSERT(inside_with_level_ <= nesting_level_);
}
virtual void Leave() {
nesting_level_--;
ASSERT(nesting_level_ >= 0);
if (nesting_level_ < inside_with_level_) {
inside_with_level_ = kNotInsideWith;
}
ASSERT(inside_with_level_ <= nesting_level_);
} }
virtual Variable* Lookup(Handle<String> name) { return NULL; }
virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) { virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
return NULL; return NULL;
} }
virtual VariableProxy* NewTemporary(Handle<String> name) { return NULL; } virtual VariableProxy* NewTemporary(Handle<String> name) { return NULL; }
virtual bool HasTrivialOuterContext() const {
return (nesting_level_ == 0 || inside_with_level_ <= 0);
}
private:
static const int kNotInsideWith = -1;
// Number of surrounding scopes of the current scope.
int nesting_level_;
// Nesting level of outermost scope that is contained in a with statement,
// or kNotInsideWith if there are no with's around the current scope.
int inside_with_level_;
}; };

44
deps/v8/src/spaces.cc

@ -270,9 +270,9 @@ void CodeRange::TearDown() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// MemoryAllocator // MemoryAllocator
// //
int MemoryAllocator::capacity_ = 0; intptr_t MemoryAllocator::capacity_ = 0;
int MemoryAllocator::size_ = 0; intptr_t MemoryAllocator::size_ = 0;
int MemoryAllocator::size_executable_ = 0; intptr_t MemoryAllocator::size_executable_ = 0;
List<MemoryAllocator::MemoryAllocationCallbackRegistration> List<MemoryAllocator::MemoryAllocationCallbackRegistration>
MemoryAllocator::memory_allocation_callbacks_; MemoryAllocator::memory_allocation_callbacks_;
@ -302,7 +302,7 @@ int MemoryAllocator::Pop() {
} }
bool MemoryAllocator::Setup(int capacity) { bool MemoryAllocator::Setup(intptr_t capacity) {
capacity_ = RoundUp(capacity, Page::kPageSize); capacity_ = RoundUp(capacity, Page::kPageSize);
// Over-estimate the size of chunks_ array. It assumes the expansion of old // Over-estimate the size of chunks_ array. It assumes the expansion of old
@ -314,7 +314,8 @@ bool MemoryAllocator::Setup(int capacity) {
// //
// Reserve two chunk ids for semispaces, one for map space, one for old // Reserve two chunk ids for semispaces, one for map space, one for old
// space, and one for code space. // space, and one for code space.
max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; max_nof_chunks_ =
static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
if (max_nof_chunks_ > kMaxNofChunks) return false; if (max_nof_chunks_ > kMaxNofChunks) return false;
size_ = 0; size_ = 0;
@ -691,7 +692,9 @@ Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
#ifdef DEBUG #ifdef DEBUG
void MemoryAllocator::ReportStatistics() { void MemoryAllocator::ReportStatistics() {
float pct = static_cast<float>(capacity_ - size_) / capacity_; float pct = static_cast<float>(capacity_ - size_) / capacity_;
PrintF(" capacity: %d, used: %d, available: %%%d\n\n", PrintF(" capacity: %" V8_PTR_PREFIX "d"
", used: %" V8_PTR_PREFIX "d"
", available: %%%d\n\n",
capacity_, size_, static_cast<int>(pct*100)); capacity_, size_, static_cast<int>(pct*100));
} }
#endif #endif
@ -769,7 +772,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// PagedSpace implementation // PagedSpace implementation
PagedSpace::PagedSpace(int max_capacity, PagedSpace::PagedSpace(intptr_t max_capacity,
AllocationSpace id, AllocationSpace id,
Executability executable) Executability executable)
: Space(id, executable) { : Space(id, executable) {
@ -797,8 +800,9 @@ bool PagedSpace::Setup(Address start, size_t size) {
Page::kPageSize * pages_in_chunk, Page::kPageSize * pages_in_chunk,
this, &num_pages); this, &num_pages);
} else { } else {
int requested_pages = Min(MemoryAllocator::kPagesPerChunk, int requested_pages =
max_capacity_ / Page::kObjectAreaSize); Min(MemoryAllocator::kPagesPerChunk,
static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
first_page_ = first_page_ =
MemoryAllocator::AllocatePages(requested_pages, &num_pages, this); MemoryAllocator::AllocatePages(requested_pages, &num_pages, this);
if (!first_page_->is_valid()) return false; if (!first_page_->is_valid()) return false;
@ -984,7 +988,8 @@ bool PagedSpace::Expand(Page* last_page) {
// Last page must be valid and its next page is invalid. // Last page must be valid and its next page is invalid.
ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; int available_pages =
static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
if (available_pages <= 0) return false; if (available_pages <= 0) return false;
int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
@ -1264,7 +1269,7 @@ void NewSpace::Grow() {
void NewSpace::Shrink() { void NewSpace::Shrink() {
int new_capacity = Max(InitialCapacity(), 2 * Size()); int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
int rounded_new_capacity = int rounded_new_capacity =
RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
if (rounded_new_capacity < Capacity() && if (rounded_new_capacity < Capacity() &&
@ -1643,7 +1648,8 @@ void NewSpace::ReportStatistics() {
#ifdef DEBUG #ifdef DEBUG
if (FLAG_heap_stats) { if (FLAG_heap_stats) {
float pct = static_cast<float>(Available()) / Capacity(); float pct = static_cast<float>(Available()) / Capacity();
PrintF(" capacity: %d, available: %d, %%%d\n", PrintF(" capacity: %" V8_PTR_PREFIX "d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Available(), static_cast<int>(pct*100)); Capacity(), Available(), static_cast<int>(pct*100));
PrintF("\n Object Histogram:\n"); PrintF("\n Object Histogram:\n");
for (int i = 0; i <= LAST_TYPE; i++) { for (int i = 0; i <= LAST_TYPE; i++) {
@ -2401,8 +2407,10 @@ void PagedSpace::CollectCodeStatistics() {
void OldSpace::ReportStatistics() { void OldSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity(); int pct = static_cast<int>(Available() * 100 / Capacity());
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", PrintF(" capacity: %" V8_PTR_PREFIX "d"
", waste: %" V8_PTR_PREFIX "d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct); Capacity(), Waste(), Available(), pct);
ClearHistograms(); ClearHistograms();
@ -2558,8 +2566,10 @@ void FixedSpace::DeallocateBlock(Address start,
#ifdef DEBUG #ifdef DEBUG
void FixedSpace::ReportStatistics() { void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity(); int pct = static_cast<int>(Available() * 100 / Capacity());
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", PrintF(" capacity: %" V8_PTR_PREFIX "d"
", waste: %" V8_PTR_PREFIX "d"
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct); Capacity(), Waste(), Available(), pct);
ClearHistograms(); ClearHistograms();
@ -3011,7 +3021,7 @@ void LargeObjectSpace::Print() {
void LargeObjectSpace::ReportStatistics() { void LargeObjectSpace::ReportStatistics() {
PrintF(" size: %d\n", size_); PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
int num_objects = 0; int num_objects = 0;
ClearHistograms(); ClearHistograms();
LargeObjectIterator it(this); LargeObjectIterator it(this);

102
deps/v8/src/spaces.h

@ -243,8 +243,10 @@ class Page {
static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
kIntSize + kPointerSize; kIntSize + kPointerSize;
// The start offset of the object area in a page. // The start offset of the object area in a page. Aligned to both maps and
static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize); // code alignment to be suitable for both.
static const int kObjectStartOffset =
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
// Object area size in bytes. // Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset; static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@ -369,7 +371,7 @@ class Space : public Malloced {
// Identity used in error reporting. // Identity used in error reporting.
AllocationSpace identity() { return id_; } AllocationSpace identity() { return id_; }
virtual int Size() = 0; virtual intptr_t Size() = 0;
#ifdef ENABLE_HEAP_PROTECTION #ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable. // Protect/unprotect the space by marking it read-only/writable.
@ -489,7 +491,7 @@ class MemoryAllocator : public AllStatic {
public: public:
// Initializes its internal bookkeeping structures. // Initializes its internal bookkeeping structures.
// Max capacity of the total space. // Max capacity of the total space.
static bool Setup(int max_capacity); static bool Setup(intptr_t max_capacity);
// Deletes valid chunks. // Deletes valid chunks.
static void TearDown(); static void TearDown();
@ -580,16 +582,18 @@ class MemoryAllocator : public AllStatic {
MemoryAllocationCallback callback); MemoryAllocationCallback callback);
// Returns the maximum available bytes of heaps. // Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } static intptr_t Available() {
return capacity_ < size_ ? 0 : capacity_ - size_;
}
// Returns allocated spaces in bytes. // Returns allocated spaces in bytes.
static int Size() { return size_; } static intptr_t Size() { return size_; }
// Returns allocated executable spaces in bytes. // Returns allocated executable spaces in bytes.
static int SizeExecutable() { return size_executable_; } static intptr_t SizeExecutable() { return size_executable_; }
// Returns maximum available bytes that the old space can have. // Returns maximum available bytes that the old space can have.
static int MaxAvailable() { static intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kObjectAreaSize; return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
} }
@ -647,12 +651,12 @@ class MemoryAllocator : public AllStatic {
private: private:
// Maximum space size in bytes. // Maximum space size in bytes.
static int capacity_; static intptr_t capacity_;
// Allocated space size in bytes. // Allocated space size in bytes.
static int size_; static intptr_t size_;
// Allocated executable space size in bytes. // Allocated executable space size in bytes.
static int size_executable_; static intptr_t size_executable_;
struct MemoryAllocationCallbackRegistration { struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@ -925,10 +929,10 @@ class AllocationStats BASE_EMBEDDED {
} }
// Accessors for the allocation statistics. // Accessors for the allocation statistics.
int Capacity() { return capacity_; } intptr_t Capacity() { return capacity_; }
int Available() { return available_; } intptr_t Available() { return available_; }
int Size() { return size_; } intptr_t Size() { return size_; }
int Waste() { return waste_; } intptr_t Waste() { return waste_; }
// Grow the space by adding available bytes. // Grow the space by adding available bytes.
void ExpandSpace(int size_in_bytes) { void ExpandSpace(int size_in_bytes) {
@ -943,13 +947,13 @@ class AllocationStats BASE_EMBEDDED {
} }
// Allocate from available bytes (available -> size). // Allocate from available bytes (available -> size).
void AllocateBytes(int size_in_bytes) { void AllocateBytes(intptr_t size_in_bytes) {
available_ -= size_in_bytes; available_ -= size_in_bytes;
size_ += size_in_bytes; size_ += size_in_bytes;
} }
// Free allocated bytes, making them available (size -> available). // Free allocated bytes, making them available (size -> available).
void DeallocateBytes(int size_in_bytes) { void DeallocateBytes(intptr_t size_in_bytes) {
size_ -= size_in_bytes; size_ -= size_in_bytes;
available_ += size_in_bytes; available_ += size_in_bytes;
} }
@ -962,23 +966,25 @@ class AllocationStats BASE_EMBEDDED {
// Consider the wasted bytes to be allocated, as they contain filler // Consider the wasted bytes to be allocated, as they contain filler
// objects (waste -> size). // objects (waste -> size).
void FillWastedBytes(int size_in_bytes) { void FillWastedBytes(intptr_t size_in_bytes) {
waste_ -= size_in_bytes; waste_ -= size_in_bytes;
size_ += size_in_bytes; size_ += size_in_bytes;
} }
private: private:
int capacity_; intptr_t capacity_;
int available_; intptr_t available_;
int size_; intptr_t size_;
int waste_; intptr_t waste_;
}; };
class PagedSpace : public Space { class PagedSpace : public Space {
public: public:
// Creates a space with a maximum capacity, and an id. // Creates a space with a maximum capacity, and an id.
PagedSpace(int max_capacity, AllocationSpace id, Executability executable); PagedSpace(intptr_t max_capacity,
AllocationSpace id,
Executability executable);
virtual ~PagedSpace() {} virtual ~PagedSpace() {}
@ -1029,21 +1035,21 @@ class PagedSpace : public Space {
} }
// Current capacity without growing (Size() + Available() + Waste()). // Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); } intptr_t Capacity() { return accounting_stats_.Capacity(); }
// Total amount of memory committed for this space. For paged // Total amount of memory committed for this space. For paged
// spaces this equals the capacity. // spaces this equals the capacity.
int CommittedMemory() { return Capacity(); } intptr_t CommittedMemory() { return Capacity(); }
// Available bytes without growing. // Available bytes without growing.
int Available() { return accounting_stats_.Available(); } intptr_t Available() { return accounting_stats_.Available(); }
// Allocated bytes in this space. // Allocated bytes in this space.
virtual int Size() { return accounting_stats_.Size(); } virtual intptr_t Size() { return accounting_stats_.Size(); }
// Wasted bytes due to fragmentation and not recoverable until the // Wasted bytes due to fragmentation and not recoverable until the
// next GC of this space. // next GC of this space.
int Waste() { return accounting_stats_.Waste(); } intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the address of the first object in this space. // Returns the address of the first object in this space.
Address bottom() { return first_page_->ObjectAreaStart(); } Address bottom() { return first_page_->ObjectAreaStart(); }
@ -1135,7 +1141,7 @@ class PagedSpace : public Space {
protected: protected:
// Maximum capacity of this space. // Maximum capacity of this space.
int max_capacity_; intptr_t max_capacity_;
// Accounting information for this space. // Accounting information for this space.
AllocationStats accounting_stats_; AllocationStats accounting_stats_;
@ -1326,7 +1332,7 @@ class SemiSpace : public Space {
// If we don't have these here then SemiSpace will be abstract. However // If we don't have these here then SemiSpace will be abstract. However
// they should never be called. // they should never be called.
virtual int Size() { virtual intptr_t Size() {
UNREACHABLE(); UNREACHABLE();
return 0; return 0;
} }
@ -1469,22 +1475,26 @@ class NewSpace : public Space {
} }
// Return the allocated bytes in the active semispace. // Return the allocated bytes in the active semispace.
virtual int Size() { return static_cast<int>(top() - bottom()); } virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
// The same, but returning an int. We have to have the one that returns
// intptr_t because it is inherited, but if we know we are dealing with the
// new space, which can't get as big as the other spaces then this is useful:
int SizeAsInt() { return static_cast<int>(Size()); }
// Return the current capacity of a semispace. // Return the current capacity of a semispace.
int Capacity() { intptr_t Capacity() {
ASSERT(to_space_.Capacity() == from_space_.Capacity()); ASSERT(to_space_.Capacity() == from_space_.Capacity());
return to_space_.Capacity(); return to_space_.Capacity();
} }
// Return the total amount of memory committed for new space. // Return the total amount of memory committed for new space.
int CommittedMemory() { intptr_t CommittedMemory() {
if (from_space_.is_committed()) return 2 * Capacity(); if (from_space_.is_committed()) return 2 * Capacity();
return Capacity(); return Capacity();
} }
// Return the available bytes without growing in the active semispace. // Return the available bytes without growing in the active semispace.
int Available() { return Capacity() - Size(); } intptr_t Available() { return Capacity() - Size(); }
// Return the maximum capacity of a semispace. // Return the maximum capacity of a semispace.
int MaximumCapacity() { int MaximumCapacity() {
@ -1679,7 +1689,7 @@ class OldSpaceFreeList BASE_EMBEDDED {
void Reset(); void Reset();
// Return the number of bytes available on the free list. // Return the number of bytes available on the free list.
int available() { return available_; } intptr_t available() { return available_; }
// Place a node on the free list. The block of size 'size_in_bytes' // Place a node on the free list. The block of size 'size_in_bytes'
// starting at 'start' is placed on the free list. The return value is the // starting at 'start' is placed on the free list. The return value is the
@ -1781,7 +1791,7 @@ class FixedSizeFreeList BASE_EMBEDDED {
void Reset(); void Reset();
// Return the number of bytes available on the free list. // Return the number of bytes available on the free list.
int available() { return available_; } intptr_t available() { return available_; }
// Place a node on the free list. The block starting at 'start' (assumed to // Place a node on the free list. The block starting at 'start' (assumed to
// have size object_size_) is placed on the free list. Bookkeeping // have size object_size_) is placed on the free list. Bookkeeping
@ -1795,7 +1805,7 @@ class FixedSizeFreeList BASE_EMBEDDED {
private: private:
// Available bytes on the free list. // Available bytes on the free list.
int available_; intptr_t available_;
// The head of the free list. // The head of the free list.
Address head_; Address head_;
@ -1821,7 +1831,7 @@ class OldSpace : public PagedSpace {
public: public:
// Creates an old space object with a given maximum capacity. // Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS. // The constructor does not allocate pages from OS.
explicit OldSpace(int max_capacity, explicit OldSpace(intptr_t max_capacity,
AllocationSpace id, AllocationSpace id,
Executability executable) Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) { : PagedSpace(max_capacity, id, executable), free_list_(id) {
@ -1830,7 +1840,7 @@ class OldSpace : public PagedSpace {
// The bytes available on the free list (ie, not above the linear allocation // The bytes available on the free list (ie, not above the linear allocation
// pointer). // pointer).
int AvailableFree() { return free_list_.available(); } intptr_t AvailableFree() { return free_list_.available(); }
// The limit of allocation for a page in this space. // The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) { virtual Address PageAllocationLimit(Page* page) {
@ -1891,7 +1901,7 @@ class OldSpace : public PagedSpace {
class FixedSpace : public PagedSpace { class FixedSpace : public PagedSpace {
public: public:
FixedSpace(int max_capacity, FixedSpace(intptr_t max_capacity,
AllocationSpace id, AllocationSpace id,
int object_size_in_bytes, int object_size_in_bytes,
const char* name) const char* name)
@ -1966,7 +1976,7 @@ class FixedSpace : public PagedSpace {
class MapSpace : public FixedSpace { class MapSpace : public FixedSpace {
public: public:
// Creates a map space object with a maximum capacity. // Creates a map space object with a maximum capacity.
MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id) MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
: FixedSpace(max_capacity, id, Map::kSize, "map"), : FixedSpace(max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(max_map_space_pages) { max_map_space_pages_(max_map_space_pages) {
ASSERT(max_map_space_pages < kMaxMapPageIndex); ASSERT(max_map_space_pages < kMaxMapPageIndex);
@ -2071,7 +2081,7 @@ class MapSpace : public FixedSpace {
class CellSpace : public FixedSpace { class CellSpace : public FixedSpace {
public: public:
// Creates a property cell space object with a maximum capacity. // Creates a property cell space object with a maximum capacity.
CellSpace(int max_capacity, AllocationSpace id) CellSpace(intptr_t max_capacity, AllocationSpace id)
: FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {} : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
protected: protected:
@ -2127,7 +2137,7 @@ class LargeObjectChunk {
// Given a chunk size, returns the object size it can accommodate. Used by // Given a chunk size, returns the object size it can accommodate. Used by
// LargeObjectSpace::Available. // LargeObjectSpace::Available.
static int ObjectSizeFor(int chunk_size) { static intptr_t ObjectSizeFor(intptr_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset; return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
} }
@ -2163,11 +2173,11 @@ class LargeObjectSpace : public Space {
Object* AllocateRawFixedArray(int size_in_bytes); Object* AllocateRawFixedArray(int size_in_bytes);
// Available bytes for objects in this space. // Available bytes for objects in this space.
int Available() { intptr_t Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available()); return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
} }
virtual int Size() { virtual intptr_t Size() {
return size_; return size_;
} }
@ -2221,7 +2231,7 @@ class LargeObjectSpace : public Space {
private: private:
// The head of the linked list of large object chunks. // The head of the linked list of large object chunks.
LargeObjectChunk* first_chunk_; LargeObjectChunk* first_chunk_;
int size_; // allocated bytes intptr_t size_; // allocated bytes
int page_count_; // number of chunks int page_count_; // number of chunks

40
deps/v8/src/string-search.cc

@ -0,0 +1,40 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "string-search.h"
namespace v8 {
namespace internal {
// Storage for constants used by string-search.
int StringSearchBase::kBadCharShiftTable[kUC16AlphabetSize];
int StringSearchBase::kGoodSuffixShiftTable[kBMMaxShift + 1];
int StringSearchBase::kSuffixTable[kBMMaxShift + 1];
}} // namespace v8::internal

699
deps/v8/src/string-search.h

@ -32,11 +32,20 @@ namespace v8 {
namespace internal { namespace internal {
//---------------------------------------------------------------------
// String Search object.
//---------------------------------------------------------------------
// Class holding constants and methods that apply to all string search variants,
// independently of subject and pattern char size.
class StringSearchBase {
protected:
// Cap on the maximal shift in the Boyer-Moore implementation. By setting a // Cap on the maximal shift in the Boyer-Moore implementation. By setting a
// limit, we can fix the size of tables. For a needle longer than this limit, // limit, we can fix the size of tables. For a needle longer than this limit,
// search will not be optimal, since we only build tables for a smaller suffix // search will not be optimal, since we only build tables for a suffix
// of the string, which is a safe approximation. // of the string, but it is a safe approximation.
static const int kBMMaxShift = 250; static const int kBMMaxShift = 250;
// Reduce alphabet to this size. // Reduce alphabet to this size.
// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size // One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
// proportional to the input alphabet. We reduce the alphabet size by // proportional to the input alphabet. We reduce the alphabet size by
@ -44,266 +53,463 @@ static const int kBMMaxShift = 250;
// a potentially less efficient searching, but is a safe approximation. // a potentially less efficient searching, but is a safe approximation.
// For needles using only characters in the same Unicode 256-code point page, // For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation. // there is no search speed degradation.
static const int kBMAlphabetSize = 256; static const int kAsciiAlphabetSize = 128;
static const int kUC16AlphabetSize = 256;
// Bad-char shift table stored in the state. It's length is the alphabet size.
// For patterns below this length, the skip length of Boyer-Moore is too short // For patterns below this length, the skip length of Boyer-Moore is too short
// to compensate for the algorithmic overhead compared to simple brute force. // to compensate for the algorithmic overhead compared to simple brute force.
static const int kBMMinPatternLength = 7; static const int kBMMinPatternLength = 7;
// Holds the two buffers used by Boyer-Moore string search's Good Suffix static inline bool IsAsciiString(Vector<const char>) {
// shift. Only allows the last kBMMaxShift characters of the needle return true;
// to be indexed.
class BMGoodSuffixBuffers {
public:
BMGoodSuffixBuffers() {}
inline void Initialize(int needle_length) {
ASSERT(needle_length > 1);
int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
int len = needle_length - start;
biased_suffixes_ = suffixes_ - start;
biased_good_suffix_shift_ = good_suffix_shift_ - start;
for (int i = 0; i <= len; i++) {
good_suffix_shift_[i] = len;
} }
static inline bool IsAsciiString(Vector<const uc16> string) {
for (int i = 0, n = string.length(); i < n; i++) {
if (static_cast<unsigned>(string[i]) > String::kMaxAsciiCharCodeU) {
return false;
} }
inline int& suffix(int index) {
ASSERT(biased_suffixes_ + index >= suffixes_);
return biased_suffixes_[index];
} }
inline int& shift(int index) { return true;
ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
return biased_good_suffix_shift_[index];
} }
private:
int suffixes_[kBMMaxShift + 1]; // The following tables are shared by all searches.
int good_suffix_shift_[kBMMaxShift + 1]; // TODO(lrn): Introduce a way for a pattern to keep its tables
int* biased_suffixes_; // between searches (e.g., for an Atom RegExp).
int* biased_good_suffix_shift_;
DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers); // Store for the BoyerMoore(Horspool) bad char shift table.
static int kBadCharShiftTable[kUC16AlphabetSize];
// Store for the BoyerMoore good suffix shift table.
static int kGoodSuffixShiftTable[kBMMaxShift + 1];
// Table used temporarily while building the BoyerMoore good suffix
// shift table.
static int kSuffixTable[kBMMaxShift + 1];
}; };
// buffers reused by BoyerMoore
struct BMBuffers { template <typename PatternChar, typename SubjectChar>
class StringSearch : private StringSearchBase {
public: public:
static int bad_char_occurrence[kBMAlphabetSize]; explicit StringSearch(Vector<const PatternChar> pattern)
static BMGoodSuffixBuffers bmgs_buffers; : pattern_(pattern),
start_(Max(0, pattern.length() - kBMMaxShift)) {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
if (!IsAsciiString(pattern_)) {
strategy_ = &FailSearch;
return;
}
}
int pattern_length = pattern_.length();
if (pattern_length < kBMMinPatternLength) {
if (pattern_length == 1) {
strategy_ = &SingleCharSearch;
return;
}
strategy_ = &LinearSearch;
return;
}
strategy_ = &InitialSearch;
}
int Search(Vector<const SubjectChar> subject, int index) {
return strategy_(this, subject, index);
}
static inline int AlphabetSize() {
if (sizeof(PatternChar) == 1) {
// ASCII needle.
return kAsciiAlphabetSize;
} else {
ASSERT(sizeof(PatternChar) == 2);
// UC16 needle.
return kUC16AlphabetSize;
}
}
private:
typedef int (*SearchFunction)( // NOLINT - it's not a cast!
StringSearch<PatternChar, SubjectChar>*,
Vector<const SubjectChar>,
int);
static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
Vector<const SubjectChar>,
int) {
return -1;
}
static int SingleCharSearch(StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index);
static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index);
static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index);
static int BoyerMooreHorspoolSearch(
StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index);
static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index);
void PopulateBoyerMooreHorspoolTable();
void PopulateBoyerMooreTable();
static inline int CharOccurrence(int* bad_char_occurrence,
SubjectChar char_code) {
if (sizeof(SubjectChar) == 1) {
return bad_char_occurrence[static_cast<int>(char_code)];
}
if (sizeof(PatternChar) == 1) {
if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) {
return -1;
}
return bad_char_occurrence[static_cast<unsigned int>(char_code)];
}
// Both pattern and subject are UC16. Reduce character to equivalence class.
int equiv_class = char_code % kUC16AlphabetSize;
return bad_char_occurrence[equiv_class];
}
// Return a table covering the last kBMMaxShift+1 positions of
// pattern.
int* bad_char_table() {
return kBadCharShiftTable;
}
int* good_suffix_shift_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kGoodSuffixShiftTable array.
return kGoodSuffixShiftTable - start_;
}
int* suffix_table() {
// Return biased pointer that maps the range [start_..pattern_.length()
// to the kSuffixTable array.
return kSuffixTable - start_;
}
// The pattern to search for.
Vector<const PatternChar> pattern_;
// Pointer to implementation of the search.
SearchFunction strategy_;
// Cache value of Max(0, pattern_length() - kBMMaxShift)
int start_;
}; };
// State of the string match tables.
// SIMPLE: No usable content in the buffers.
// BOYER_MOORE_HORSPOOL: The bad_char_occurence table has been populated.
// BOYER_MOORE: The bmgs_buffers tables have also been populated.
// Whenever starting with a new needle, one should call InitializeStringSearch
// to determine which search strategy to use, and in the case of a long-needle
// strategy, the call also initializes the algorithm to SIMPLE.
enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
static StringSearchAlgorithm algorithm;
//---------------------------------------------------------------------
// Single Character Pattern Search Strategy
//---------------------------------------------------------------------
// Compute the bad-char table for Boyer-Moore in the static buffer. template <typename PatternChar, typename SubjectChar>
template <typename PatternChar> int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
static void BoyerMoorePopulateBadCharTable(Vector<const PatternChar> pattern) { StringSearch<PatternChar, SubjectChar>* search,
// Only preprocess at most kBMMaxShift last characters of pattern. Vector<const SubjectChar> subject,
int start = Max(pattern.length() - kBMMaxShift, 0); int index) {
// Run forwards to populate bad_char_table, so that *last* instance ASSERT_EQ(1, search->pattern_.length());
// of character equivalence class is the one registered. PatternChar pattern_first_char = search->pattern_[0];
// Notice: Doesn't include the last character. int i = index;
int table_size = (sizeof(PatternChar) == 1) ? String::kMaxAsciiCharCode + 1 if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
: kBMAlphabetSize; const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
if (start == 0) { // All patterns less than kBMMaxShift in length. memchr(subject.start() + i,
memset(BMBuffers::bad_char_occurrence, pattern_first_char,
-1, subject.length() - i));
table_size * sizeof(*BMBuffers::bad_char_occurrence)); if (pos == NULL) return -1;
return static_cast<int>(pos - subject.start());
} else { } else {
for (int i = 0; i < table_size; i++) { if (sizeof(PatternChar) > sizeof(SubjectChar)) {
BMBuffers::bad_char_occurrence[i] = start - 1; if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
return -1;
}
}
SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
int n = subject.length();
while (i < n) {
if (subject[i++] == search_char) return i - 1;
}
return -1;
}
}
//---------------------------------------------------------------------
// Linear Search Strategy
//---------------------------------------------------------------------
template <typename PatternChar, typename SubjectChar>
static inline bool CharCompare(const PatternChar* pattern,
const SubjectChar* subject,
int length) {
ASSERT(length > 0);
int pos = 0;
do {
if (pattern[pos] != subject[pos]) {
return false;
}
pos++;
} while (pos < length);
return true;
}
// Simple linear search for short patterns. Never bails out.
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::LinearSearch(
StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int index) {
Vector<const PatternChar> pattern = search->pattern_;
ASSERT(pattern.length() > 1);
int pattern_length = pattern.length();
PatternChar pattern_first_char = pattern[0];
int i = index;
int n = subject.length() - pattern_length;
while (i <= n) {
if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
memchr(subject.start() + i,
pattern_first_char,
n - i + 1));
if (pos == NULL) return -1;
i = static_cast<int>(pos - subject.start()) + 1;
} else {
if (subject[i++] != pattern_first_char) continue;
} }
// Loop extracted to separate function to allow using return to do
// a deeper break.
if (CharCompare(pattern.start() + 1,
subject.start() + i,
pattern_length - 1)) {
return i - 1;
} }
for (int i = start; i < pattern.length() - 1; i++) {
PatternChar c = pattern[i];
int bucket = (sizeof(PatternChar) ==1) ? c : c % kBMAlphabetSize;
BMBuffers::bad_char_occurrence[bucket] = i;
} }
return -1;
} }
//---------------------------------------------------------------------
// Boyer-Moore string search
//---------------------------------------------------------------------
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index) {
Vector<const PatternChar> pattern = search->pattern_;
int subject_length = subject.length();
int pattern_length = pattern.length();
// Only preprocess at most kBMMaxShift last characters of pattern.
int start = search->start_;
template <typename PatternChar> int* bad_char_occurence = search->bad_char_table();
static void BoyerMoorePopulateGoodSuffixTable( int* good_suffix_shift = search->good_suffix_shift_table();
Vector<const PatternChar> pattern) {
int m = pattern.length();
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
int len = m - start;
// Compute Good Suffix tables.
BMBuffers::bmgs_buffers.Initialize(m);
BMBuffers::bmgs_buffers.shift(m-1) = 1; PatternChar last_char = pattern[pattern_length - 1];
BMBuffers::bmgs_buffers.suffix(m) = m + 1; int index = start_index;
PatternChar last_char = pattern[m - 1]; // Continue search from i.
int suffix = m + 1; while (index <= subject_length - pattern_length) {
int j = pattern_length - 1;
int c;
while (last_char != (c = subject[index + j])) {
int shift =
j - CharOccurrence(bad_char_occurence, c);
index += shift;
if (index > subject_length - pattern_length) {
return -1;
}
}
while (j >= 0 && pattern[j] == (c = subject[index + j])) j--;
if (j < 0) {
return index;
} else if (j < start) {
// we have matched more than our tables allow us to be smart about.
// Fall back on BMH shift.
index += pattern_length - 1
- CharOccurrence(bad_char_occurence,
static_cast<SubjectChar>(last_char));
} else {
int gs_shift = good_suffix_shift[j + 1];
int bc_occ =
CharOccurrence(bad_char_occurence, c);
int shift = j - bc_occ;
if (gs_shift > shift) {
shift = gs_shift;
}
index += shift;
}
}
return -1;
}
template <typename PatternChar, typename SubjectChar>
void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
int pattern_length = pattern_.length();
const PatternChar* pattern = pattern_.start();
// Only look at the last kBMMaxShift characters of pattern (from start_
// to pattern_length).
int start = start_;
int length = pattern_length - start;
// Biased tables so that we can use pattern indices as table indices,
// even if we only cover the part of the pattern from offset start.
int* shift_table = good_suffix_shift_table();
int* suffix_table = this->suffix_table();
// Initialize table.
for (int i = start; i < pattern_length; i++) {
shift_table[i] = length;
}
shift_table[pattern_length] = 1;
suffix_table[pattern_length] = pattern_length + 1;
// Find suffixes.
PatternChar last_char = pattern[pattern_length - 1];
int suffix = pattern_length + 1;
{ {
int i = m; int i = pattern_length;
while (i > start) { while (i > start) {
PatternChar c = pattern[i - 1]; PatternChar c = pattern[i - 1];
while (suffix <= m && c != pattern[suffix - 1]) { while (suffix <= pattern_length && c != pattern[suffix - 1]) {
if (BMBuffers::bmgs_buffers.shift(suffix) == len) { if (shift_table[suffix] == length) {
BMBuffers::bmgs_buffers.shift(suffix) = suffix - i; shift_table[suffix] = suffix - i;
} }
suffix = BMBuffers::bmgs_buffers.suffix(suffix); suffix = suffix_table[suffix];
} }
BMBuffers::bmgs_buffers.suffix(--i) = --suffix; suffix_table[--i] = --suffix;
if (suffix == m) { if (suffix == pattern_length) {
// No suffix to extend, so we check against last_char only. // No suffix to extend, so we check against last_char only.
while ((i > start) && (pattern[i - 1] != last_char)) { while ((i > start) && (pattern[i - 1] != last_char)) {
if (BMBuffers::bmgs_buffers.shift(m) == len) { if (shift_table[pattern_length] == length) {
BMBuffers::bmgs_buffers.shift(m) = m - i; shift_table[pattern_length] = pattern_length - i;
} }
BMBuffers::bmgs_buffers.suffix(--i) = m; suffix_table[--i] = pattern_length;
} }
if (i > start) { if (i > start) {
BMBuffers::bmgs_buffers.suffix(--i) = --suffix; suffix_table[--i] = --suffix;
} }
} }
} }
} }
if (suffix < m) { // Build shift table using suffixes.
for (int i = start; i <= m; i++) { if (suffix < pattern_length) {
if (BMBuffers::bmgs_buffers.shift(i) == len) { for (int i = start; i <= pattern_length; i++) {
BMBuffers::bmgs_buffers.shift(i) = suffix - start; if (shift_table[i] == length) {
shift_table[i] = suffix - start;
} }
if (i == suffix) { if (i == suffix) {
suffix = BMBuffers::bmgs_buffers.suffix(suffix); suffix = suffix_table[suffix];
}
} }
} }
} }
template <typename SubjectChar, typename PatternChar>
static inline int CharOccurrence(int char_code) {
if (sizeof(SubjectChar) == 1) {
return BMBuffers::bad_char_occurrence[char_code];
}
if (sizeof(PatternChar) == 1) {
if (char_code > String::kMaxAsciiCharCode) {
return -1;
} }
return BMBuffers::bad_char_occurrence[char_code];
}
return BMBuffers::bad_char_occurrence[char_code % kBMAlphabetSize];
}
// Restricted simplified Boyer-Moore string matching. //---------------------------------------------------------------------
// Uses only the bad-shift table of Boyer-Moore and only uses it // Boyer-Moore-Horspool string search.
// for the character compared to the last character of the needle. //---------------------------------------------------------------------
template <typename SubjectChar, typename PatternChar>
static int BoyerMooreHorspool(Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
int start_index,
bool* complete) {
ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
int n = subject.length();
int m = pattern.length();
int badness = -m; template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
int start_index) {
Vector<const PatternChar> pattern = search->pattern_;
int subject_length = subject.length();
int pattern_length = pattern.length();
int* char_occurrences = search->bad_char_table();
int badness = -pattern_length;
// How bad we are doing without a good-suffix table. // How bad we are doing without a good-suffix table.
int idx; // No matches found prior to this index. PatternChar last_char = pattern[pattern_length - 1];
PatternChar last_char = pattern[m - 1]; int last_char_shift = pattern_length - 1 -
int last_char_shift = CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
// Perform search // Perform search
for (idx = start_index; idx <= n - m;) { int index = start_index; // No matches found prior to this index.
int j = m - 1; while (index <= subject_length - pattern_length) {
int c; int j = pattern_length - 1;
while (last_char != (c = subject[idx + j])) { int subject_char;
int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c); while (last_char != (subject_char = subject[index + j])) {
int bc_occ = CharOccurrence(char_occurrences, subject_char);
int shift = j - bc_occ; int shift = j - bc_occ;
idx += shift; index += shift;
badness += 1 - shift; // at most zero, so badness cannot increase. badness += 1 - shift; // at most zero, so badness cannot increase.
if (idx > n - m) { if (index > subject_length - pattern_length) {
*complete = true;
return -1; return -1;
} }
} }
j--; j--;
while (j >= 0 && pattern[j] == (subject[idx + j])) j--; while (j >= 0 && pattern[j] == (subject[index + j])) j--;
if (j < 0) { if (j < 0) {
*complete = true; return index;
return idx;
} else { } else {
idx += last_char_shift; index += last_char_shift;
// Badness increases by the number of characters we have // Badness increases by the number of characters we have
// checked, and decreases by the number of characters we // checked, and decreases by the number of characters we
// can skip by shifting. It's a measure of how we are doing // can skip by shifting. It's a measure of how we are doing
// compared to reading each character exactly once. // compared to reading each character exactly once.
badness += (m - j) - last_char_shift; badness += (pattern_length - j) - last_char_shift;
if (badness > 0) { if (badness > 0) {
*complete = false; search->PopulateBoyerMooreTable();
return idx; search->strategy_ = &BoyerMooreSearch;
return BoyerMooreSearch(search, subject, index);
} }
} }
} }
*complete = true;
return -1; return -1;
} }
template <typename SubjectChar, typename PatternChar> template <typename PatternChar, typename SubjectChar>
static int BoyerMooreIndexOf(Vector<const SubjectChar> subject, void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
Vector<const PatternChar> pattern, int pattern_length = pattern_.length();
int idx) {
ASSERT(algorithm <= BOYER_MOORE);
int n = subject.length();
int m = pattern.length();
// Only preprocess at most kBMMaxShift last characters of pattern.
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
PatternChar last_char = pattern[m - 1]; int* bad_char_occurrence = bad_char_table();
// Continue search from i.
while (idx <= n - m) { // Only preprocess at most kBMMaxShift last characters of pattern.
int j = m - 1; int start = start_;
SubjectChar c; // Run forwards to populate bad_char_table, so that *last* instance
while (last_char != (c = subject[idx + j])) { // of character equivalence class is the one registered.
int shift = j - CharOccurrence<SubjectChar, PatternChar>(c); // Notice: Doesn't include the last character.
idx += shift; int table_size = AlphabetSize();
if (idx > n - m) { if (start == 0) { // All patterns less than kBMMaxShift in length.
return -1; memset(bad_char_occurrence,
} -1,
} table_size * sizeof(*bad_char_occurrence));
while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
if (j < 0) {
return idx;
} else if (j < start) {
// we have matched more than our tables allow us to be smart about.
// Fall back on BMH shift.
idx += m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
} else { } else {
int gs_shift = BMBuffers::bmgs_buffers.shift(j + 1); for (int i = 0; i < table_size; i++) {
int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c); bad_char_occurrence[i] = start - 1;
int shift = j - bc_occ;
if (gs_shift > shift) {
shift = gs_shift;
} }
idx += shift;
} }
for (int i = start; i < pattern_length - 1; i++) {
PatternChar c = pattern_[i];
int bucket = (sizeof(PatternChar) == 1) ? c : c % AlphabetSize();
bad_char_occurrence[bucket] = i;
} }
return -1;
} }
//---------------------------------------------------------------------
// Linear string search with bailout to BMH.
//---------------------------------------------------------------------
// Trivial string search for shorter strings. // Simple linear search for short patterns, which bails out if the string
// On return, if "complete" is set to true, the return value is the // isn't found very early in the subject. Upgrades to BoyerMooreHorspool.
// final result of searching for the patter in the subject.
// If "complete" is set to false, the return value is the index where
// further checking should start, i.e., it's guaranteed that the pattern
// does not occur at a position prior to the returned index.
template <typename PatternChar, typename SubjectChar> template <typename PatternChar, typename SubjectChar>
static int SimpleIndexOf(Vector<const SubjectChar> subject, int StringSearch<PatternChar, SubjectChar>::InitialSearch(
Vector<const PatternChar> pattern, StringSearch<PatternChar, SubjectChar>* search,
int idx, Vector<const SubjectChar> subject,
bool* complete) { int index) {
ASSERT(pattern.length() > 1); Vector<const PatternChar> pattern = search->pattern_;
int pattern_length = pattern.length(); int pattern_length = pattern.length();
// Badness is a count of how much work we have done. When we have // Badness is a count of how much work we have done. When we have
// done enough work we decide it's probably worth switching to a better // done enough work we decide it's probably worth switching to a better
@ -313,19 +519,15 @@ static int SimpleIndexOf(Vector<const SubjectChar> subject,
// We know our pattern is at least 2 characters, we cache the first so // We know our pattern is at least 2 characters, we cache the first so
// the common case of the first character not matching is faster. // the common case of the first character not matching is faster.
PatternChar pattern_first_char = pattern[0]; PatternChar pattern_first_char = pattern[0];
for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) { for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
badness++; badness++;
if (badness > 0) { if (badness <= 0) {
*complete = false;
return i;
}
if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) { if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
const SubjectChar* pos = reinterpret_cast<const SubjectChar*>( const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
memchr(subject.start() + i, memchr(subject.start() + i,
pattern_first_char, pattern_first_char,
n - i + 1)); n - i + 1));
if (pos == NULL) { if (pos == NULL) {
*complete = true;
return -1; return -1;
} }
i = static_cast<int>(pos - subject.start()); i = static_cast<int>(pos - subject.start());
@ -340,122 +542,29 @@ static int SimpleIndexOf(Vector<const SubjectChar> subject,
j++; j++;
} while (j < pattern_length); } while (j < pattern_length);
if (j == pattern_length) { if (j == pattern_length) {
*complete = true;
return i; return i;
} }
badness += j; badness += j;
}
*complete = true;
return -1;
}
// Simple indexOf that never bails out. For short patterns only.
template <typename PatternChar, typename SubjectChar>
static int SimpleIndexOf(Vector<const SubjectChar> subject,
Vector<const PatternChar> pattern,
int idx) {
int pattern_length = pattern.length();
PatternChar pattern_first_char = pattern[0];
for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
memchr(subject.start() + i,
pattern_first_char,
n - i + 1));
if (pos == NULL) return -1;
i = static_cast<int>(pos - subject.start());
} else { } else {
if (subject[i] != pattern_first_char) continue; search->PopulateBoyerMooreHorspoolTable();
} search->strategy_ = &BoyerMooreHorspoolSearch;
int j = 1; return BoyerMooreHorspoolSearch(search, subject, i);
while (j < pattern_length) {
if (pattern[j] != subject[i+j]) {
break;
}
j++;
}
if (j == pattern_length) {
return i;
} }
} }
return -1; return -1;
} }
// Strategy for searching for a string in another string. // Perform a a single stand-alone search.
enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG }; // If searching multiple times for the same pattern, a search
// object should be constructed once and the Search function then called
// for each search.
template <typename PatternChar>
static inline StringSearchStrategy InitializeStringSearch(
Vector<const PatternChar> pat, bool ascii_subject) {
// We have an ASCII haystack and a non-ASCII needle. Check if there
// really is a non-ASCII character in the needle and bail out if there
// is.
if (ascii_subject && sizeof(PatternChar) > 1) {
for (int i = 0; i < pat.length(); i++) {
uc16 c = pat[i];
if (c > String::kMaxAsciiCharCode) {
return SEARCH_FAIL;
}
}
}
if (pat.length() < kBMMinPatternLength) {
return SEARCH_SHORT;
}
algorithm = SIMPLE_SEARCH;
return SEARCH_LONG;
}
// Dispatch long needle searches to different algorithms.
template <typename SubjectChar, typename PatternChar>
static int ComplexIndexOf(Vector<const SubjectChar> sub,
Vector<const PatternChar> pat,
int start_index) {
ASSERT(pat.length() >= kBMMinPatternLength);
// Try algorithms in order of increasing setup cost and expected performance.
bool complete;
int idx = start_index;
switch (algorithm) {
case SIMPLE_SEARCH:
idx = SimpleIndexOf(sub, pat, idx, &complete);
if (complete) return idx;
BoyerMoorePopulateBadCharTable(pat);
algorithm = BOYER_MOORE_HORSPOOL;
// FALLTHROUGH.
case BOYER_MOORE_HORSPOOL:
idx = BoyerMooreHorspool(sub, pat, idx, &complete);
if (complete) return idx;
// Build the Good Suffix table and continue searching.
BoyerMoorePopulateGoodSuffixTable(pat);
algorithm = BOYER_MOORE;
// FALLTHROUGH.
case BOYER_MOORE:
return BoyerMooreIndexOf(sub, pat, idx);
}
UNREACHABLE();
return -1;
}
// Dispatch to different search strategies for a single search.
// If searching multiple times on the same needle, the search
// strategy should only be computed once and then dispatch to different
// loops.
template <typename SubjectChar, typename PatternChar> template <typename SubjectChar, typename PatternChar>
static int StringSearch(Vector<const SubjectChar> sub, static int SearchString(Vector<const SubjectChar> subject,
Vector<const PatternChar> pat, Vector<const PatternChar> pattern,
int start_index) { int start_index) {
bool ascii_subject = (sizeof(SubjectChar) == 1); StringSearch<PatternChar, SubjectChar> search(pattern);
StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject); return search.Search(subject, start_index);
switch (strategy) {
case SEARCH_FAIL: return -1;
case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
}
UNREACHABLE();
return -1;
} }
}} // namespace v8::internal }} // namespace v8::internal

29
deps/v8/src/stub-cache.cc

@ -988,6 +988,7 @@ Object* StoreInterceptorProperty(Arguments args) {
Object* KeyedLoadPropertyWithInterceptor(Arguments args) { Object* KeyedLoadPropertyWithInterceptor(Arguments args) {
JSObject* receiver = JSObject::cast(args[0]); JSObject* receiver = JSObject::cast(args[0]);
ASSERT(Smi::cast(args[1])->value() >= 0);
uint32_t index = Smi::cast(args[1])->value(); uint32_t index = Smi::cast(args[1])->value();
return receiver->GetElementWithInterceptor(receiver, index); return receiver->GetElementWithInterceptor(receiver, index);
} }
@ -1186,25 +1187,43 @@ void StubCompiler::LookupPostInterceptor(JSObject* holder,
Object* LoadStubCompiler::GetCode(PropertyType type, String* name) { Object* LoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
return GetCodeWithFlags(flags, name); Object* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(result), name));
}
return result;
} }
Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) { Object* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, type);
return GetCodeWithFlags(flags, name); Object* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
PROFILE(
CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(result), name));
}
return result;
} }
Object* StoreStubCompiler::GetCode(PropertyType type, String* name) { Object* StoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type);
return GetCodeWithFlags(flags, name); Object* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(result), name));
}
return result;
} }
Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) { Object* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
return GetCodeWithFlags(flags, name); Object* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) {
PROFILE(
CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(result), name));
}
return result;
} }
@ -1227,7 +1246,7 @@ Object* CallStubCompiler::CompileCustomCall(int generator_id,
String* fname) { String* fname) {
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators); ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
switch (generator_id) { switch (generator_id) {
#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \ #define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
case k##name##CallGenerator: \ case k##name##CallGenerator: \
return CallStubCompiler::Compile##name##Call(object, \ return CallStubCompiler::Compile##name##Call(object, \
holder, \ holder, \

45
deps/v8/src/stub-cache.h

@ -370,13 +370,15 @@ class StubCompiler BASE_EMBEDDED {
Register prototype); Register prototype);
// Generates prototype loading code that uses the objects from the // Generates prototype loading code that uses the objects from the
// context we were in when this function was called. This ties the // context we were in when this function was called. If the context
// generated code to a particular context and so must not be used in // has changed, a jump to miss is performed. This ties the generated
// cases where the generated code is not allowed to have references // code to a particular context and so must not be used in cases
// to objects from a context. // where the generated code is not allowed to have references to
// objects from a context.
static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm, static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index, int index,
Register prototype); Register prototype,
Label* miss);
static void GenerateFastPropertyLoad(MacroAssembler* masm, static void GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst, Register src, Register dst, Register src,
@ -612,29 +614,26 @@ class KeyedStoreStubCompiler: public StubCompiler {
// Installation of custom call generators for the selected builtins is // Installation of custom call generators for the selected builtins is
// handled by the bootstrapper. // handled by the bootstrapper.
// //
// Each entry has a name of a global function (lowercased), a flag // Each entry has a name of a global object property holding an object
// controlling whether the generator is set on the function itself or // optionally followed by ".prototype" (this controls whether the
// on its instance prototype, a name of a builtin function on the // generator is set on the object itself or, in case it's a function,
// function or its instance prototype (the one the generator is set // on the its instance prototype), a name of a builtin function on the
// for), and a name of a generator itself (used to build ids and // object (the one the generator is set for), and a name of the
// generator function names). // generator (used to build ids and generator function names).
#define CUSTOM_CALL_IC_GENERATORS(V) \ #define CUSTOM_CALL_IC_GENERATORS(V) \
V(array, INSTANCE_PROTOTYPE, push, ArrayPush) \ V(Array.prototype, push, ArrayPush) \
V(array, INSTANCE_PROTOTYPE, pop, ArrayPop) \ V(Array.prototype, pop, ArrayPop) \
V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \ V(String.prototype, charCodeAt, StringCharCodeAt) \
V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt) \ V(String.prototype, charAt, StringCharAt) \
V(string, FUNCTION, fromCharCode, StringFromCharCode) V(String, fromCharCode, StringFromCharCode) \
V(Math, floor, MathFloor) \
V(Math, abs, MathAbs)
class CallStubCompiler: public StubCompiler { class CallStubCompiler: public StubCompiler {
public: public:
enum CustomGeneratorOwner {
FUNCTION,
INSTANCE_PROTOTYPE
};
enum { enum {
#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \ #define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
k##name##CallGenerator, k##name##CallGenerator,
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID) CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
#undef DECLARE_CALL_GENERATOR_ID #undef DECLARE_CALL_GENERATOR_ID
@ -673,7 +672,7 @@ class CallStubCompiler: public StubCompiler {
JSFunction* function, JSFunction* function,
String* name); String* name);
#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \ #define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
Object* Compile##name##Call(Object* object, \ Object* Compile##name##Call(Object* object, \
JSObject* holder, \ JSObject* holder, \
JSGlobalPropertyCell* cell, \ JSGlobalPropertyCell* cell, \

20
deps/v8/src/utils.h

@ -222,11 +222,21 @@ uint32_t ComputeIntegerHash(uint32_t key);
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// I/O support. // I/O support.
// Our version of printf(). Avoids compilation errors that we get #if __GNUC__ >= 4
// with standard printf when attempting to print pointers, etc. // On gcc we can ask the compiler to check the types of %d-style format
// (the errors are due to the extra compilation flags, which we // specifiers and their associated arguments. TODO(erikcorry) fix this
// want elsewhere). // so it works on MacOSX.
void PrintF(const char* format, ...); #if defined(__MACH__) && defined(__APPLE__)
#define PRINTF_CHECKING
#else // MacOsX.
#define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
#endif
#else
#define PRINTF_CHECKING
#endif
// Our version of printf().
void PRINTF_CHECKING PrintF(const char* format, ...);
// Our version of fflush. // Our version of fflush.
void Flush(); void Flush();

962
deps/v8/src/utils.h.orig

@ -1,962 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_UTILS_H_
#define V8_UTILS_H_
#include <stdlib.h>
#include <string.h>
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// General helper functions
#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
// Returns true iff x is a power of 2 (or zero). Cannot be used with the
// maximally negative value of the type T (the -1 overflows).
template <typename T>
static inline bool IsPowerOf2(T x) {
return IS_POWER_OF_TWO(x);
}
// X must be a power of 2. Returns the number of trailing zeros.
template <typename T>
static inline int WhichPowerOf2(T x) {
ASSERT(IsPowerOf2(x));
ASSERT(x != 0);
if (x < 0) return 31;
int bits = 0;
#ifdef DEBUG
int original_x = x;
#endif
if (x >= 0x10000) {
bits += 16;
x >>= 16;
}
if (x >= 0x100) {
bits += 8;
x >>= 8;
}
if (x >= 0x10) {
bits += 4;
x >>= 4;
}
switch (x) {
default: UNREACHABLE();
case 8: bits++; // Fall through.
case 4: bits++; // Fall through.
case 2: bits++; // Fall through.
case 1: break;
}
ASSERT_EQ(1 << bits, original_x);
return bits;
return 0;
}
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
static inline int ArithmeticShiftRight(int x, int s) {
return x >> s;
}
// Compute the 0-relative offset of some absolute value x of type T.
// This allows conversion of Addresses and integral types into
// 0-relative int offsets.
template <typename T>
static inline intptr_t OffsetFrom(T x) {
return x - static_cast<T>(0);
}
// Compute the absolute value of type T for some 0-relative offset x.
// This allows conversion of 0-relative int offsets into Addresses and
// integral types.
template <typename T>
static inline T AddressFrom(intptr_t x) {
return static_cast<T>(static_cast<T>(0) + x);
}
// Return the largest multiple of m which is <= x.
template <typename T>
static inline T RoundDown(T x, int m) {
ASSERT(IsPowerOf2(m));
return AddressFrom<T>(OffsetFrom(x) & -m);
}
// Return the smallest multiple of m which is >= x.
template <typename T>
static inline T RoundUp(T x, int m) {
return RoundDown(x + m - 1, m);
}
template <typename T>
static int Compare(const T& a, const T& b) {
if (a == b)
return 0;
else if (a < b)
return -1;
else
return 1;
}
template <typename T>
static int PointerValueCompare(const T* a, const T* b) {
return Compare<T>(*a, *b);
}
// Returns the smallest power of two which is >= x. If you pass in a
// number that is already a power of two, it is returned as is.
uint32_t RoundUpToPowerOf2(uint32_t x);
template <typename T>
static inline bool IsAligned(T value, T alignment) {
ASSERT(IsPowerOf2(alignment));
return (value & (alignment - 1)) == 0;
}
// Returns true if (addr + offset) is aligned.
static inline bool IsAddressAligned(Address addr,
intptr_t alignment,
int offset) {
intptr_t offs = OffsetFrom(addr + offset);
return IsAligned(offs, alignment);
}
// Returns the maximum of the two parameters.
template <typename T>
static T Max(T a, T b) {
return a < b ? b : a;
}
// Returns the minimum of the two parameters.
template <typename T>
static T Min(T a, T b) {
return a < b ? a : b;
}
inline int StrLength(const char* string) {
size_t length = strlen(string);
ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
return static_cast<int>(length);
}
// ----------------------------------------------------------------------------
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
template<class T, int shift, int size>
class BitField {
public:
// Tells whether the provided value fits into the bit field.
static bool is_valid(T value) {
return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
}
// Returns a uint32_t mask of bit field.
static uint32_t mask() {
// To use all bits of a uint32 in a bitfield without compiler warnings we
// have to compute 2^32 without using a shift count of 32.
return ((1U << shift) << size) - (1U << shift);
}
// Returns a uint32_t with the bit field value encoded.
static uint32_t encode(T value) {
ASSERT(is_valid(value));
return static_cast<uint32_t>(value) << shift;
}
// Extracts the bit field from the value.
static T decode(uint32_t value) {
return static_cast<T>((value & mask()) >> shift);
}
};
// ----------------------------------------------------------------------------
// Hash function.
uint32_t ComputeIntegerHash(uint32_t key);
// ----------------------------------------------------------------------------
// I/O support.
// Our version of printf(). Avoids compilation errors that we get
// with standard printf when attempting to print pointers, etc.
// (the errors are due to the extra compilation flags, which we
// want elsewhere).
void PrintF(const char* format, ...);
// Our version of fflush.
void Flush();
// Read a line of characters after printing the prompt to stdout. The resulting
// char* needs to be disposed off with DeleteArray by the caller.
char* ReadLine(const char* prompt);
// Read and return the raw bytes in a file. the size of the buffer is returned
// in size.
// The returned buffer must be freed by the caller.
byte* ReadBytes(const char* filename, int* size, bool verbose = true);
// Write size chars from str to the file given by filename.
// The file is overwritten. Returns the number of chars written.
int WriteChars(const char* filename,
const char* str,
int size,
bool verbose = true);
// Write size bytes to the file given by filename.
// The file is overwritten. Returns the number of bytes written.
int WriteBytes(const char* filename,
const byte* bytes,
int size,
bool verbose = true);
// Write the C code
// const char* <varname> = "<str>";
// const int <varname>_len = <len>;
// to the file given by filename. Only the first len chars are written.
int WriteAsCFile(const char* filename, const char* varname,
const char* str, int size, bool verbose = true);
// ----------------------------------------------------------------------------
// Miscellaneous
// A static resource holds a static instance that can be reserved in
// a local scope using an instance of Access. Attempts to re-reserve
// the instance will cause an error.
template <typename T>
class StaticResource {
public:
StaticResource() : is_reserved_(false) {}
private:
template <typename S> friend class Access;
T instance_;
bool is_reserved_;
};
// Locally scoped access to a static resource.
template <typename T>
class Access {
public:
explicit Access(StaticResource<T>* resource)
: resource_(resource)
, instance_(&resource->instance_) {
ASSERT(!resource->is_reserved_);
resource->is_reserved_ = true;
}
~Access() {
resource_->is_reserved_ = false;
resource_ = NULL;
instance_ = NULL;
}
T* value() { return instance_; }
T* operator -> () { return instance_; }
private:
StaticResource<T>* resource_;
T* instance_;
};
template <typename T>
class Vector {
public:
Vector() : start_(NULL), length_(0) {}
Vector(T* data, int length) : start_(data), length_(length) {
ASSERT(length == 0 || (length > 0 && data != NULL));
}
static Vector<T> New(int length) {
return Vector<T>(NewArray<T>(length), length);
}
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
Vector<T> SubVector(int from, int to) {
ASSERT(to <= length_);
ASSERT(from < to);
ASSERT(0 <= from);
return Vector<T>(start() + from, to - from);
}
// Returns the length of the vector.
int length() const { return length_; }
// Returns whether or not the vector is empty.
bool is_empty() const { return length_ == 0; }
// Returns the pointer to the start of the data in the vector.
T* start() const { return start_; }
// Access individual vector elements - checks bounds in debug mode.
T& operator[](int index) const {
ASSERT(0 <= index && index < length_);
return start_[index];
}
T& first() { return start_[0]; }
T& last() { return start_[length_ - 1]; }
// Returns a clone of this vector with a new backing store.
Vector<T> Clone() const {
T* result = NewArray<T>(length_);
for (int i = 0; i < length_; i++) result[i] = start_[i];
return Vector<T>(result, length_);
}
void Sort(int (*cmp)(const T*, const T*)) {
typedef int (*RawComparer)(const void*, const void*);
qsort(start(),
length(),
sizeof(T),
reinterpret_cast<RawComparer>(cmp));
}
void Sort() {
Sort(PointerValueCompare<T>);
}
void Truncate(int length) {
ASSERT(length <= length_);
length_ = length;
}
// Releases the array underlying this vector. Once disposed the
// vector is empty.
void Dispose() {
DeleteArray(start_);
start_ = NULL;
length_ = 0;
}
inline Vector<T> operator+(int offset) {
ASSERT(offset < length_);
return Vector<T>(start_ + offset, length_ - offset);
}
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(NULL, 0); }
template<typename S>
static Vector<T> cast(Vector<S> input) {
return Vector<T>(reinterpret_cast<T*>(input.start()),
input.length() * sizeof(S) / sizeof(T));
}
protected:
void set_start(T* start) { start_ = start; }
private:
T* start_;
int length_;
};
// A temporary assignment sets a (non-local) variable to a value on
// construction and resets it the value on destruction.
template <typename T>
class TempAssign {
public:
TempAssign(T* var, T value): var_(var), old_value_(*var) {
*var = value;
}
~TempAssign() { *var_ = old_value_; }
private:
T* var_;
T old_value_;
};
template <typename T, int kSize>
class EmbeddedVector : public Vector<T> {
public:
EmbeddedVector() : Vector<T>(buffer_, kSize) { }
// When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector& rhs)
: Vector<T>(rhs) {
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
}
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
return *this;
}
private:
T buffer_[kSize];
};
template <typename T>
class ScopedVector : public Vector<T> {
public:
explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
~ScopedVector() {
DeleteArray(this->start());
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
};
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data) {
return Vector<char>(data, StrLength(data));
}
inline Vector<char> MutableCStrVector(char* data, int max) {
int length = StrLength(data);
return Vector<char>(data, (length < max) ? length : max);
}
template <typename T>
inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
int length) {
return Vector< Handle<Object> >(
reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
}
/*
* A class that collects values into a backing store.
* Specialized versions of the class can allow access to the backing store
* in different ways.
* There is no guarantee that the backing store is contiguous (and, as a
* consequence, no guarantees that consecutively added elements are adjacent
* in memory). The collector may move elements unless it has guaranteed not
* to.
*/
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
class Collector {
public:
explicit Collector(int initial_capacity = kMinCapacity)
: index_(0), size_(0) {
if (initial_capacity < kMinCapacity) {
initial_capacity = kMinCapacity;
}
current_chunk_ = Vector<T>::New(initial_capacity);
}
virtual ~Collector() {
// Free backing store (in reverse allocation order).
current_chunk_.Dispose();
for (int i = chunks_.length() - 1; i >= 0; i--) {
chunks_.at(i).Dispose();
}
}
// Add a single element.
inline void Add(T value) {
if (index_ >= current_chunk_.length()) {
Grow(1);
}
current_chunk_[index_] = value;
index_++;
size_++;
}
// Add a block of contiguous elements and return a Vector backed by the
// memory area.
// A basic Collector will keep this vector valid as long as the Collector
// is alive.
inline Vector<T> AddBlock(int size, T initial_value) {
ASSERT(size > 0);
if (size > current_chunk_.length() - index_) {
Grow(size);
}
T* position = current_chunk_.start() + index_;
index_ += size;
size_ += size;
for (int i = 0; i < size; i++) {
position[i] = initial_value;
}
return Vector<T>(position, size);
}
// Write the contents of the collector into the provided vector.
void WriteTo(Vector<T> destination) {
ASSERT(size_ <= destination.length());
int position = 0;
for (int i = 0; i < chunks_.length(); i++) {
Vector<T> chunk = chunks_.at(i);
for (int j = 0; j < chunk.length(); j++) {
destination[position] = chunk[j];
position++;
}
}
for (int i = 0; i < index_; i++) {
destination[position] = current_chunk_[i];
position++;
}
}
// Allocate a single contiguous vector, copy all the collected
// elements to the vector, and return it.
// The caller is responsible for freeing the memory of the returned
// vector (e.g., using Vector::Dispose).
Vector<T> ToVector() {
Vector<T> new_store = Vector<T>::New(size_);
WriteTo(new_store);
return new_store;
}
// Resets the collector to be empty.
virtual void Reset() {
for (int i = chunks_.length() - 1; i >= 0; i--) {
chunks_.at(i).Dispose();
}
chunks_.Rewind(0);
index_ = 0;
size_ = 0;
}
// Total number of elements added to collector so far.
inline int size() { return size_; }
protected:
static const int kMinCapacity = 16;
List<Vector<T> > chunks_;
Vector<T> current_chunk_; // Block of memory currently being written into.
int index_; // Current index in current chunk.
int size_; // Total number of elements in collector.
// Creates a new current chunk, and stores the old chunk in the chunks_ list.
void Grow(int min_capacity) {
ASSERT(growth_factor > 1);
int growth = current_chunk_.length() * (growth_factor - 1);
if (growth > max_growth) {
growth = max_growth;
}
int new_capacity = current_chunk_.length() + growth;
if (new_capacity < min_capacity) {
new_capacity = min_capacity + growth;
}
Vector<T> new_chunk = Vector<T>::New(new_capacity);
int new_index = PrepareGrow(new_chunk);
if (index_ > 0) {
chunks_.Add(current_chunk_.SubVector(0, index_));
} else {
// Can happen if the call to PrepareGrow moves everything into
// the new chunk.
current_chunk_.Dispose();
}
current_chunk_ = new_chunk;
index_ = new_index;
ASSERT(index_ + min_capacity <= current_chunk_.length());
}
// Before replacing the current chunk, give a subclass the option to move
// some of the current data into the new chunk. The function may update
// the current index_ value to represent data no longer in the current chunk.
// Returns the initial index of the new chunk (after copied data).
virtual int PrepareGrow(Vector<T> new_chunk) {
return 0;
}
};
/*
* A collector that allows sequences of values to be guaranteed to
* stay consecutive.
* If the backing store grows while a sequence is active, the current
* sequence might be moved, but after the sequence is ended, it will
* not move again.
* NOTICE: Blocks allocated using Collector::AddBlock(int) can move
* as well, if inside an active sequence where another element is added.
*/
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
class SequenceCollector : public Collector<T, growth_factor, max_growth> {
public:
explicit SequenceCollector(int initial_capacity)
: Collector<T, growth_factor, max_growth>(initial_capacity),
sequence_start_(kNoSequence) { }
virtual ~SequenceCollector() {}
void StartSequence() {
ASSERT(sequence_start_ == kNoSequence);
sequence_start_ = this->index_;
}
Vector<T> EndSequence() {
ASSERT(sequence_start_ != kNoSequence);
int sequence_start = sequence_start_;
sequence_start_ = kNoSequence;
if (sequence_start == this->index_) return Vector<T>();
return this->current_chunk_.SubVector(sequence_start, this->index_);
}
// Drops the currently added sequence, and all collected elements in it.
void DropSequence() {
ASSERT(sequence_start_ != kNoSequence);
int sequence_length = this->index_ - sequence_start_;
this->index_ = sequence_start_;
this->size_ -= sequence_length;
sequence_start_ = kNoSequence;
}
virtual void Reset() {
sequence_start_ = kNoSequence;
this->Collector<T, growth_factor, max_growth>::Reset();
}
private:
static const int kNoSequence = -1;
int sequence_start_;
// Move the currently active sequence to the new chunk.
virtual int PrepareGrow(Vector<T> new_chunk) {
if (sequence_start_ != kNoSequence) {
int sequence_length = this->index_ - sequence_start_;
// The new chunk is always larger than the current chunk, so there
// is room for the copy.
ASSERT(sequence_length < new_chunk.length());
for (int i = 0; i < sequence_length; i++) {
new_chunk[i] = this->current_chunk_[sequence_start_ + i];
}
this->index_ = sequence_start_;
sequence_start_ = 0;
return sequence_length;
}
return 0;
}
};
// Simple support to read a file into a 0-terminated C-string.
// The returned buffer must be freed by the caller.
// On return, *exits tells whether the file existed.
Vector<const char> ReadFile(const char* filename,
bool* exists,
bool verbose = true);
// Simple wrapper that allows an ExternalString to refer to a
// Vector<const char>. Doesn't assume ownership of the data.
class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
public:
explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
virtual const char* data() const { return data_.start(); }
virtual size_t length() const { return data_.length(); }
private:
Vector<const char> data_;
};
// Helper class for building result strings in a character buffer. The
// purpose of the class is to use safe operations that checks the
// buffer bounds on all operations in debug mode.
class StringBuilder {
public:
// Create a string builder with a buffer of the given size. The
// buffer is allocated through NewArray<char> and must be
// deallocated by the caller of Finalize().
explicit StringBuilder(int size);
StringBuilder(char* buffer, int size)
: buffer_(buffer, size), position_(0) { }
~StringBuilder() { if (!is_finalized()) Finalize(); }
int size() const { return buffer_.length(); }
// Get the current position in the builder.
int position() const {
ASSERT(!is_finalized());
return position_;
}
// Reset the position.
void Reset() { position_ = 0; }
// Add a single character to the builder. It is not allowed to add
// 0-characters; use the Finalize() method to terminate the string
// instead.
void AddCharacter(char c) {
ASSERT(c != '\0');
ASSERT(!is_finalized() && position_ < buffer_.length());
buffer_[position_++] = c;
}
// Add an entire string to the builder. Uses strlen() internally to
// compute the length of the input string.
void AddString(const char* s);
// Add the first 'n' characters of the given string 's' to the
// builder. The input string must have enough characters.
void AddSubstring(const char* s, int n);
// Add formatted contents to the builder just like printf().
void AddFormatted(const char* format, ...);
// Add character padding to the builder. If count is non-positive,
// nothing is added to the builder.
void AddPadding(char c, int count);
// Finalize the string by 0-terminating it and returning the buffer.
char* Finalize();
private:
Vector<char> buffer_;
int position_;
bool is_finalized() const { return position_ < 0; }
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
};
// Custom memcpy implementation for platforms where the standard version
// may not be good enough.
// TODO(lrn): Check whether some IA32 platforms should be excluded.
#if defined(V8_TARGET_ARCH_IA32)
// TODO(lrn): Extend to other platforms as needed.
typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
// Implemented in codegen-<arch>.cc.
MemCopyFunction CreateMemCopyFunction();
// Copy memory area to disjoint memory area.
static inline void MemCopy(void* dest, const void* src, size_t size) {
static MemCopyFunction memcopy = CreateMemCopyFunction();
(*memcopy)(dest, src, size);
#ifdef DEBUG
CHECK_EQ(0, memcmp(dest, src, size));
#endif
}
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
// TODO(lrn): Try to find a more precise value.
static const int kMinComplexMemCopy = 64;
#else // V8_TARGET_ARCH_IA32
static inline void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
static const int kMinComplexMemCopy = 256;
#endif // V8_TARGET_ARCH_IA32
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
MemCopy(dest, src, chars * sizeof(*dest));
return;
}
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
while (dest <= limit - kStepSize) {
*reinterpret_cast<uintptr_t*>(dest) =
*reinterpret_cast<const uintptr_t*>(src);
dest += kStepSize;
src += kStepSize;
}
}
#endif
while (dest < limit) {
*dest++ = static_cast<sinkchar>(*src++);
}
}
// Compare ASCII/16bit chars to ASCII/16bit chars.
template <typename lchar, typename rchar>
static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
const lchar* limit = lhs + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*lhs) == sizeof(*rhs)) {
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
while (lhs <= limit - kStepSize) {
if (*reinterpret_cast<const uintptr_t*>(lhs) !=
*reinterpret_cast<const uintptr_t*>(rhs)) {
break;
}
lhs += kStepSize;
rhs += kStepSize;
}
}
#endif
while (lhs < limit) {
int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
if (r != 0) return r;
++lhs;
++rhs;
}
return 0;
}
template <typename T>
static inline void MemsetPointer(T** dest, T* value, int counter) {
#if defined(V8_HOST_ARCH_IA32)
#define STOS "stosl"
#elif defined(V8_HOST_ARCH_X64)
#define STOS "stosq"
#endif
#if defined(__GNUC__) && defined(STOS)
asm volatile(
"cld;"
"rep ; " STOS
: "+&c" (counter), "+&D" (dest)
: "a" (value)
: "memory", "cc");
#else
for (int i = 0; i < counter; i++) {
dest[i] = value;
}
#endif
#undef STOS
}
// Copies data from |src| to |dst|. The data spans MUST not overlap.
inline void CopyWords(Object** dst, Object** src, int num_words) {
ASSERT(Min(dst, src) + num_words <= Max(dst, src));
ASSERT(num_words > 0);
// Use block copying memcpy if the segment we're copying is
// enough to justify the extra call/setup overhead.
static const int kBlockCopyLimit = 16;
if (num_words >= kBlockCopyLimit) {
memcpy(dst, src, num_words * kPointerSize);
} else {
int remaining = num_words;
do {
remaining--;
*dst++ = *src++;
} while (remaining > 0);
}
}
// Calculate 10^exponent.
int TenToThe(int exponent);
// The type-based aliasing rule allows the compiler to assume that pointers of
// different types (for some definition of different) never alias each other.
// Thus the following code does not work:
//
// float f = foo();
// int fbits = *(int*)(&f);
//
// The compiler 'knows' that the int pointer can't refer to f since the types
// don't match, so the compiler may cache f in a register, leaving random data
// in fbits. Using C++ style casts makes no difference, however a pointer to
// char data is assumed to alias any other pointer. This is the 'memcpy
// exception'.
//
// Bit_cast uses the memcpy exception to move the bits from a variable of one
// type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize BitCast away.
//
// There is an additional use for BitCast.
// Recent gccs will warn when they see casts that may result in breakage due to
// the type-based aliasing rule. If you have checked that there is no breakage
// you can use BitCast to cast one pointer type to another. This confuses gcc
// enough that it can no longer see that you have cast one pointer type to
// another thus avoiding the warning.
template <class Dest, class Source>
inline Dest BitCast(const Source& source) {
// Compile time assertion: sizeof(Dest) == sizeof(Source)
// A compile error here means your Dest and Source have different sizes.
typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
Dest dest;
memcpy(&dest, &source, sizeof(dest));
return dest;
}
template <class Dest, class Source>
inline Dest BitCast(Source* source) {
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} } // namespace v8::internal
#endif // V8_UTILS_H_

2
deps/v8/src/v8-counters.h

@ -161,6 +161,8 @@ namespace internal {
SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \ SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
SC(named_load_global_inline, V8.NamedLoadGlobalInline) \ SC(named_load_global_inline, V8.NamedLoadGlobalInline) \
SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \ SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \
SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss) \
SC(keyed_store_field, V8.KeyedStoreField) \ SC(keyed_store_field, V8.KeyedStoreField) \
SC(keyed_store_inline, V8.KeyedStoreInline) \ SC(keyed_store_inline, V8.KeyedStoreInline) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save