Browse Source

Merge branch 'v0.4'

Conflicts:
	src/node_version.h
v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
247d880113
  1. 44
      ChangeLog
  2. 93
      LICENSE
  3. 2
      cmake/configure.cmake
  4. 1
      cmake/node_build.cmake
  5. 27
      deps/v8/ChangeLog
  6. 1
      deps/v8/SConstruct
  7. 6
      deps/v8/src/api.cc
  8. 1
      deps/v8/src/arm/assembler-arm.h
  9. 712
      deps/v8/src/arm/code-stubs-arm.cc
  10. 2
      deps/v8/src/arm/code-stubs-arm.h
  11. 47
      deps/v8/src/arm/codegen-arm.cc
  12. 8
      deps/v8/src/arm/constants-arm.h
  13. 766
      deps/v8/src/arm/full-codegen-arm.cc
  14. 27
      deps/v8/src/arm/ic-arm.cc
  15. 3
      deps/v8/src/arm/lithium-arm.cc
  16. 74
      deps/v8/src/arm/lithium-codegen-arm.cc
  17. 5
      deps/v8/src/arm/lithium-codegen-arm.h
  18. 125
      deps/v8/src/arm/macro-assembler-arm.cc
  19. 51
      deps/v8/src/arm/macro-assembler-arm.h
  20. 65
      deps/v8/src/arm/simulator-arm.cc
  21. 62
      deps/v8/src/arm/stub-cache-arm.cc
  22. 10
      deps/v8/src/arm/virtual-frame-arm.cc
  23. 2
      deps/v8/src/arm/virtual-frame-arm.h
  24. 9
      deps/v8/src/array.js
  25. 2
      deps/v8/src/assembler.cc
  26. 31
      deps/v8/src/assembler.h
  27. 20
      deps/v8/src/builtins.cc
  28. 15
      deps/v8/src/builtins.h
  29. 7
      deps/v8/src/compiler.cc
  30. 8
      deps/v8/src/d8.cc
  31. 618
      deps/v8/src/d8.js
  32. 121
      deps/v8/src/debug-debugger.js
  33. 3
      deps/v8/src/debug.cc
  34. 14
      deps/v8/src/flag-definitions.h
  35. 4
      deps/v8/src/frame-element.h
  36. 16
      deps/v8/src/full-codegen.cc
  37. 48
      deps/v8/src/full-codegen.h
  38. 4
      deps/v8/src/handles-inl.h
  39. 24
      deps/v8/src/handles.cc
  40. 63
      deps/v8/src/handles.h
  41. 45
      deps/v8/src/heap-profiler.cc
  42. 4
      deps/v8/src/heap-profiler.h
  43. 15
      deps/v8/src/heap.cc
  44. 401
      deps/v8/src/hydrogen.cc
  45. 53
      deps/v8/src/hydrogen.h
  46. 5
      deps/v8/src/ia32/code-stubs-ia32.cc
  47. 39
      deps/v8/src/ia32/codegen-ia32.cc
  48. 378
      deps/v8/src/ia32/full-codegen-ia32.cc
  49. 27
      deps/v8/src/ia32/ic-ia32.cc
  50. 20
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  51. 11
      deps/v8/src/ia32/lithium-ia32.cc
  52. 10
      deps/v8/src/ia32/stub-cache-ia32.cc
  53. 25
      deps/v8/src/ia32/virtual-frame-ia32.cc
  54. 6
      deps/v8/src/ia32/virtual-frame-ia32.h
  55. 9
      deps/v8/src/ic-inl.h
  56. 101
      deps/v8/src/ic.cc
  57. 45
      deps/v8/src/ic.h
  58. 90
      deps/v8/src/liveobjectlist-inl.h
  59. 2476
      deps/v8/src/liveobjectlist.cc
  60. 260
      deps/v8/src/liveobjectlist.h
  61. 7
      deps/v8/src/mark-compact.cc
  62. 8
      deps/v8/src/messages.js
  63. 10
      deps/v8/src/objects-inl.h
  64. 59
      deps/v8/src/objects.cc
  65. 12
      deps/v8/src/objects.h
  66. 72
      deps/v8/src/parser.cc
  67. 132
      deps/v8/src/platform-solaris.cc
  68. 28
      deps/v8/src/profile-generator-inl.h
  69. 747
      deps/v8/src/profile-generator.cc
  70. 166
      deps/v8/src/profile-generator.h
  71. 71
      deps/v8/src/runtime-profiler.cc
  72. 5
      deps/v8/src/runtime-profiler.h
  73. 932
      deps/v8/src/runtime.cc
  74. 27
      deps/v8/src/runtime.h
  75. 6
      deps/v8/src/spaces.h
  76. 78
      deps/v8/src/stub-cache.cc
  77. 33
      deps/v8/src/stub-cache.h
  78. 2
      deps/v8/src/version.cc
  79. 6
      deps/v8/src/virtual-frame-heavy-inl.h
  80. 2
      deps/v8/src/x64/assembler-x64-inl.h
  81. 24
      deps/v8/src/x64/assembler-x64.cc
  82. 4
      deps/v8/src/x64/assembler-x64.h
  83. 2
      deps/v8/src/x64/builtins-x64.cc
  84. 321
      deps/v8/src/x64/code-stubs-x64.cc
  85. 18
      deps/v8/src/x64/code-stubs-x64.h
  86. 2
      deps/v8/src/x64/codegen-x64-inl.h
  87. 38
      deps/v8/src/x64/codegen-x64.cc
  88. 2
      deps/v8/src/x64/codegen-x64.h
  89. 2
      deps/v8/src/x64/cpu-x64.cc
  90. 2
      deps/v8/src/x64/debug-x64.cc
  91. 146
      deps/v8/src/x64/deoptimizer-x64.cc
  92. 12
      deps/v8/src/x64/disasm-x64.cc
  93. 2
      deps/v8/src/x64/frames-x64.cc
  94. 2
      deps/v8/src/x64/frames-x64.h
  95. 387
      deps/v8/src/x64/full-codegen-x64.cc
  96. 27
      deps/v8/src/x64/ic-x64.cc
  97. 2
      deps/v8/src/x64/jump-target-x64.cc
  98. 241
      deps/v8/src/x64/lithium-codegen-x64.cc
  99. 12
      deps/v8/src/x64/lithium-codegen-x64.h
  100. 34
      deps/v8/src/x64/lithium-x64.cc

44
ChangeLog

@ -1,4 +1,46 @@
2011.02.19, Version 0.4.1 (stable) 2011.03.02, Version 0.4.2 (stable)
* Improve docs.
* Fix process.on edge case with signal event (Alexis Sellier)
* Pragma HTTP header comma separation
* In addition to 'aborted' emit 'close' from incoming requests
(Felix Geisendörfer)
* Fix memleak in vm.runInNewContext
* Do not cache modules that throw exceptions (Felix Geisendörfer)
* Build system changes for libnode (Aria Stewart)
* Read up the prototype of the 'env' object. (Nathan Rajlich)
* Add 'close' and 'aborted' events to Agent responses
* http: fix missing 'drain' events (Russell Haering)
* Fix process.stdout.end() throws ENOTSOCK error. (Koichi Kobayashi)
* REPL bug fixes (isaacs)
* node_modules folders should be highest priority (isaacs)
* URL parse more safely (isaacs)
* Expose errno with a string for dns/cares (Felix Geisendörfer)
* Fix tty.setWindowSize
* spawn: setuid after chdir (isaacs)
* SIGUSR1 should break the VM without delay
* Upgrade V8 to 3.1.8.
2011.02.19, Version 0.4.1 (stable), e8aef84191bc2c1ba2bcaa54f30aabde7f03769b
* Fixed field merging with progressive fields on writeHead() * Fixed field merging with progressive fields on writeHead()
(TJ Holowaychuk) (TJ Holowaychuk)

93
LICENSE

@ -1,49 +1,8 @@
This license applies to all parts of Node that are not externally
maintained libraries. The externally maintained libraries used by Node
are:
- v8, located under deps/v8, which is copyrighted by the Google, Inc.
v8 has a BSD license.
- libev, located under deps/libev, and libeio, located at deps/libeio.
This code is copyrighted by Marc Alexander Lehmann. Both are dually
licensed under MIT and GPL2.
- WAF build system, located at tools/waf. Copyrighted Thomas Nagy.
Released under an MIT license.
- The SCONS build system, located at tools/scons. Copyrighted by the SCONS
Foundation. Released under an MIT license.
- C-Ares, an asynchronous DNS client, located at deps/c-ares. Copyright by
the Massachusetts Institute of Technology; authored by Greg Hudson,
Daniel Stenberg and others. Released under an MIT license.
- Node, optionally, dynmaically links to OpenSSL, cryptographic software
written by Eric Young (eay@cryptsoft.com) to provide SSL/TLS encryption.
OpenSSL is copyrighted by The OpenSSL Project. OpenSSL has a simple
Apache-style license. OpenSSL is not included in the Node distribution.
See http://openssl.org/ for more information.
- tools/doctool/markdown.js is Released under MIT license and
Copyright 2009-2010 Dominic Baggott and Ash Berli
- HTTP Parser, located at deps/http_parser, is a small C library
copyrighted by Ryan Lienhart Dahl and has a MIT license.
- src/platform_darwin_proctitle.cc, has code taken from the Chromium
project copyright Google Inc. and released with the BSD license.
- tools/closure_linter is copyrighted by The Closure Linter Authors and
Google Inc and is released under the Apache license.
- tools/cpplint.py is copyrighted by Google Inc and is released under the
BSD license.
Node's license follows: Node's license follows:
Copyright 2009, 2010 Ryan Lienhart Dahl. All rights reserved. ====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the deal in the Software without restriction, including without limitation the
@ -61,3 +20,49 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE. IN THE SOFTWARE.
====
This license applies to all parts of Node that are not externally
maintained libraries.
The externally maintained libraries used by Node are:
- v8, located at deps/v8. v8 is copyright Google, Inc, and released
under a BSD license.
- libev, located at deps/libev, and libeio, located at deps/libeio. libev
and libeio are copyright Marc Alexander Lehmann, and dual-licensed
under the MIT license and GPL2.
- WAF build system, located at tools/waf. WAF is copyright Thomas Nagy,
and released under the MIT license.
- The SCONS build system, located at tools/scons. SCONS is copyright
the SCONS Foundation and released under the MIT license.
- C-Ares, an asynchronous DNS client, located at deps/c-ares. C-Ares is
copyright the Massachusetts Institute of Technology, authored by
Greg Hudson, Daniel Stenberg and others, and released under the MIT
license.
- Node, optionally, dynamically links to OpenSSL, cryptographic software
written by Eric Young (eay@cryptsoft.com) to provide SSL/TLS encryption.
OpenSSL is copyright The OpenSSL Project and released under the OpenSSL
license. OpenSSL is not included in the Node distribution.
See http://openssl.org/ for more information.
- tools/doctool/markdown.js is copyright 2009-2010 Dominic Baggott and Ash
Berli and released under the MIT license.
- HTTP Parser, located at deps/http_parser, is a small C library
copyright Ryan Lienhart Dahl and released under the MIT license.
- src/platform_darwin_proctitle.cc, has code taken from the Chromium
project copyright Google Inc. and released under a BSD license.
- tools/closure_linter is copyright The Closure Linter Authors and
Google Inc. and released under the Apache License, version 2.0.
- tools/cpplint.py is copyright Google Inc. and released under a
BSD license.

2
cmake/configure.cmake

@ -62,6 +62,8 @@ endif()
if(${node_platform} MATCHES darwin) if(${node_platform} MATCHES darwin)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -framework Carbon") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -framework Carbon")
# explicitly set this so that we don't check again when building libeio
set(HAVE_FDATASYNC 0)
else() else()
# OSX fdatasync() check wrong: http://public.kitware.com/Bug/view.php?id=10044 # OSX fdatasync() check wrong: http://public.kitware.com/Bug/view.php?id=10044
check_function_exists(fdatasync HAVE_FDATASYNC) check_function_exists(fdatasync HAVE_FDATASYNC)

1
cmake/node_build.cmake

@ -62,6 +62,7 @@ set(node_sources
src/node_script.cc src/node_script.cc
src/node_os.cc src/node_os.cc
src/node_dtrace.cc src/node_dtrace.cc
src/node_string.cc
src/node_natives.h src/node_natives.h
${node_extra_src}) ${node_extra_src})

27
deps/v8/ChangeLog

@ -1,3 +1,30 @@
2011-03-02: Version 3.1.8
Fixed a number of crash bugs.
Improved Crankshaft for x64 and ARM.
Implemented more of EcmaScript 5 strict mode.
Fixed issue with unaligned reads and writes on ARM.
Improved heap profiler support.
2011-02-28: Version 3.1.7
Fixed a number of crash bugs.
Improved Crankshaft for x64 and ARM.
Fixed implementation of indexOf/lastIndexOf for sparse
arrays (http://crbug.com/73940).
Fixed bug in map space compaction (http://crbug.com/59688).
Added support for direct getter accessors calls on ARM.
2011-02-24: Version 3.1.6 2011-02-24: Version 3.1.6
Fixed a number of crash bugs. Fixed a number of crash bugs.

1
deps/v8/SConstruct

@ -306,7 +306,6 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']

6
deps/v8/src/api.cc

@ -2286,7 +2286,8 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
self, self,
key_obj, key_obj,
value_obj, value_obj,
static_cast<PropertyAttributes>(attribs)); static_cast<PropertyAttributes>(attribs),
i::kNonStrictMode);
has_pending_exception = obj.is_null(); has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false); EXCEPTION_BAILOUT_CHECK(false);
return true; return true;
@ -2711,7 +2712,8 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
hidden_props, hidden_props,
key_obj, key_obj,
value_obj, value_obj,
static_cast<PropertyAttributes>(None)); static_cast<PropertyAttributes>(None),
i::kNonStrictMode);
has_pending_exception = obj.is_null(); has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false); EXCEPTION_BAILOUT_CHECK(false);
return true; return true;

1
deps/v8/src/arm/assembler-arm.h

@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 };
const SwVfpRegister s30 = { 30 }; const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 }; const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
const DwVfpRegister d0 = { 0 }; const DwVfpRegister d0 = { 0 };
const DwVfpRegister d1 = { 1 }; const DwVfpRegister d1 = { 1 };
const DwVfpRegister d2 = { 2 }; const DwVfpRegister d2 = { 2 };

712
deps/v8/src/arm/code-stubs-arm.cc

@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic {
Label* not_number); Label* not_number);
// Loads the number from object into dst as a 32-bit integer if possible. If // Loads the number from object into dst as a 32-bit integer if possible. If
// the object is not a 32-bit integer control continues at the label // the object cannot be converted to a 32-bit integer control continues at
// not_int32. If VFP is supported double_scratch is used but not scratch2. // the label not_int32. If VFP is supported double_scratch is used
// but not scratch2.
// Floating point value in the 32-bit integer range will be rounded
// to an integer.
static void LoadNumberAsInteger(MacroAssembler* masm, static void LoadNumberAsInteger(MacroAssembler* masm,
Register object, Register object,
Register dst, Register dst,
@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic {
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Label* not_int32); Label* not_int32);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
SwVfpRegister single_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
Token::Value op,
Register heap_number_result,
Register scratch);
private: private:
static void LoadNumber(MacroAssembler* masm, static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination, FloatingPointHelper::Destination destination,
@ -560,6 +633,319 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
} }
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
SwVfpRegister single_scratch,
Label* not_int32) {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
ASSERT(!scratch1.is(scratch2));
ASSERT(!heap_number_map.is(object) &&
!heap_number_map.is(scratch1) &&
!heap_number_map.is(scratch2));
Label done, obj_is_not_smi;
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ vmov(single_scratch, scratch1);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
__ vmov(dst1, dst2, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
// | dst1 | dst2 |
// | s | exp | mantissa |
// Check for zero.
__ cmp(scratch1, Operand(0));
__ mov(dst1, scratch1);
__ mov(dst2, scratch1);
__ b(eq, &done);
// Preload the sign of the value.
__ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
// Get mantisssa[51:20].
// Get the position of the first set bit.
__ CountLeadingZeros(dst2, scratch1, scratch2);
__ rsb(dst2, dst2, Operand(31));
// Set the exponent.
__ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
__ Bfi(dst1, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
__ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
__ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
__ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
__ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
__ mov(dst2, Operand(scratch1, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
__ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(scratch1, LSL, scratch2));
__ orr(dst1, dst1, scratch2);
// Set dst2 to 0.
__ mov(dst2, Operand(0));
}
__ b(&done);
__ bind(&obj_is_not_smi);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
single_scratch,
double_dst,
scratch1,
scratch2,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ b(ne, not_int32);
if (destination == kCoreRegisters) {
__ vmov(dst1, dst2, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers..
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
// Check for 0 and -0.
__ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
__ orr(scratch1, scratch1, Operand(dst2));
__ cmp(scratch1, Operand(0));
__ b(eq, &done);
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
// dst1 and dst2 were trashed. Reload the double value.
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
}
__ bind(&done);
}
void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
ASSERT(!scratch1.is(scratch2) &&
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
Label done;
// Untag the object into the destination register.
__ SmiUntag(dst, object);
// Just return if the object is a smi.
__ JumpIfSmi(object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
SwVfpRegister single_scratch = double_scratch.low();
// Load the double value.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
__ EmitVFPTruncate(kRoundToZero,
single_scratch,
double_scratch,
scratch1,
scratch2,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ b(ne, not_int32);
// Get the result in the destination register.
__ vmov(dst, single_scratch);
} else {
// Load the double value in the destination registers.
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
__ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0.
__ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
__ orr(dst, scratch2, Operand(dst));
__ cmp(dst, Operand(0));
__ b(eq, &done);
DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
// Registers state after DoubleIs32BitInteger.
// dst: mantissa[51:20].
// scratch2: 1
// Shift back the higher bits of the mantissa.
__ mov(dst, Operand(dst, LSR, scratch3));
// Set the implicit first bit.
__ rsb(scratch3, scratch3, Operand(32));
__ orr(dst, dst, Operand(scratch2, LSL, scratch3));
// Set the sign.
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand(0), LeaveCC, mi);
}
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ubfx(scratch,
src1,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Substract the bias from the exponent.
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
// src1: higher (exponent) part of the double value.
// src2: lower (mantissa) part of the double value.
// scratch: unbiased exponent.
// Fast cases. Check for obvious non 32-bit integer values.
// Negative exponent cannot yield 32-bit integers.
__ b(mi, not_int32);
// Exponent greater than 31 cannot yield 32-bit integers.
// Also, a positive value with an exponent equal to 31 is outside of the
// signed 32-bit integer range.
__ tst(src1, Operand(HeapNumber::kSignMask));
__ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
// the gt condition will be "correct" and
// the next instruction will be skipped.
__ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
// exponent is not 30.
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
__ tst(src2, Operand(0x3fffff));
__ b(ne, not_int32);
// Otherwise the exponent needs to be big enough to shift left all the
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
// (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
src2,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
__ orr(dst,
dst,
Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
// Create the mask and test the lower bits (of the higher bits).
__ rsb(scratch, scratch, Operand(32));
__ mov(src2, Operand(1));
__ mov(src1, Operand(src2, LSL, scratch));
__ sub(src1, src1, Operand(1));
__ tst(dst, src1);
__ b(ne, not_int32);
}
void FloatingPointHelper::CallCCodeForDoubleOperation(
MacroAssembler* masm,
Token::Value op,
Register heap_number_result,
Register scratch) {
// Using core registers:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// Assert that heap_number_result is callee-saved.
// We currently always use r5 to pass it.
ASSERT(heap_number_result.is(r5));
// Push the current return address before the C call. Return will be
// through pop(pc) below.
__ push(lr);
__ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
// Call C routine that may not cause GC or other trouble.
__ CallCFunction(ExternalReference::double_fp_operation(op), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from heap_number_result.
__ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(heap_number_result,
HeapNumber::kValueOffset));
#endif
// Place heap_number_result in r0 and return to the pushed return address.
__ mov(r0, Operand(heap_number_result));
__ pop(pc);
}
// See comment for class. // See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
@ -1296,6 +1682,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// This stub does not handle the inlined cases (Smis, Booleans, undefined). // This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true. // The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) { void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
ASSERT(CpuFeatures::IsEnabled(VFP3));
Label false_result; Label false_result;
Label not_heap_number; Label not_heap_number;
Register scratch = r9.is(tos_) ? r7 : r9; Register scratch = r9.is(tos_) ? r7 : r9;
@ -2704,33 +3093,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ add(r0, r0, Operand(kHeapObjectTag)); __ add(r0, r0, Operand(kHeapObjectTag));
__ Ret(); __ Ret();
} else { } else {
// Using core registers: // Call the C function to handle the double operation.
// r0: Left value (least significant part of mantissa). FloatingPointHelper::CallCCodeForDoubleOperation(masm,
// r1: Left value (sign, exponent, top of mantissa). op_,
// r2: Right value (least significant part of mantissa). result,
// r3: Right value (sign, exponent, top of mantissa). scratch1);
// Push the current return address before the C call. Return will be
// through pop(pc) below.
__ push(lr);
__ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
// Call C routine that may not cause GC or other trouble. r5 is callee
// save.
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
// Store answer in the overwritable heap number.
#if !defined(USE_ARM_EABI)
// Double returned in fp coprocessor register 0 and 1, encoded as
// register cr8. Offsets must be divisible by 4 for coprocessor so we
// need to substract the tag from r5.
__ sub(scratch1, result, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
#else
// Double returned in registers 0 and 1.
__ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
#endif
// Plase result in r0 and return to the pushed return address.
__ mov(r0, Operand(result));
__ pop(pc);
} }
break; break;
} }
@ -2776,7 +3143,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
break; break;
case Token::SAR: case Token::SAR:
// Use only the 5 least significant bits of the shift count. // Use only the 5 least significant bits of the shift count.
__ and_(r2, r2, Operand(0x1f));
__ GetLeastBitsFromInt32(r2, r2, 5); __ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, ASR, r2)); __ mov(r2, Operand(r3, ASR, r2));
break; break;
@ -2921,9 +3287,290 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::INT32); ASSERT(operands_type_ == TRBinaryOpIC::INT32);
Register left = r1;
Register right = r0;
Register scratch1 = r7;
Register scratch2 = r9;
DwVfpRegister double_scratch = d0;
SwVfpRegister single_scratch = s3;
Register heap_number_result = no_reg;
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label call_runtime;
// Labels for type transition, used for wrong input or output types.
// Both label are currently actually bound to the same position. We use two
// different label to differentiate the cause leading to type transition.
Label transition;
// Smi-smi fast case.
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
GenerateSmiSmiOperation(masm);
// Fall through if the result is not a smi.
__ bind(&skip);
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD: {
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::LoadNumberAsInt32Double(masm,
right,
destination,
d7,
r2,
r3,
heap_number_map,
scratch1,
scratch2,
s0,
&transition);
FloatingPointHelper::LoadNumberAsInt32Double(masm,
left,
destination,
d6,
r4,
r5,
heap_number_map,
scratch1,
scratch2,
s0,
&transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
Label return_heap_number;
switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
case Token::SUB:
__ vsub(d5, d6, d7);
break;
case Token::MUL:
__ vmul(d5, d6, d7);
break;
case Token::DIV:
__ vdiv(d5, d6, d7);
break;
default:
UNREACHABLE();
}
if (op_ != Token::DIV) {
// These operations produce an integer result.
// Try to return a smi if we can.
// Otherwise return a heap number if allowed, or jump to type
// transition.
__ EmitVFPTruncate(kRoundToZero,
single_scratch,
d5,
scratch1,
scratch2);
if (result_type_ <= TRBinaryOpIC::INT32) {
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
}
// Check if the result fits in a smi.
__ vmov(scratch1, single_scratch);
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
// Tag the result and return.
__ SmiTag(r0, scratch1);
__ Ret();
}
if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
: TRBinaryOpIC::INT32) {
__ bind(&return_heap_number);
// We are using vfp registers so r5 is available.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
heap_number_result,
heap_number_map,
scratch1,
scratch2,
&call_runtime);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
__ Ret();
}
// A DIV operation expecting an integer result falls through
// to type transition.
} else {
// We preserved r0 and r1 to be able to call runtime.
// Save the left value on the stack.
__ Push(r5, r4);
// Allocate a heap number to store the result.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
heap_number_result,
heap_number_map,
scratch1,
scratch2,
&call_runtime);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(
masm, op_, heap_number_result, scratch1);
}
break;
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SAR:
case Token::SHR:
case Token::SHL: {
Label return_heap_number;
Register scratch3 = r5;
// Convert operands to 32-bit integers. Right in r2 and left in r3. The
// registers r0 and r1 (right and left) are preserved for the runtime
// call.
FloatingPointHelper::LoadNumberAsInt32(masm,
left,
r3,
heap_number_map,
scratch1,
scratch2,
scratch3,
d0,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
r2,
heap_number_map,
scratch1,
scratch2,
scratch3,
d0,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
// 5 least significant bits of the shift value should be used.
switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
case Token::BIT_XOR:
__ eor(r2, r3, Operand(r2));
break;
case Token::BIT_AND:
__ and_(r2, r3, Operand(r2));
break;
case Token::SAR:
__ and_(r2, r2, Operand(0x1f));
__ mov(r2, Operand(r3, ASR, r2));
break;
case Token::SHR:
__ and_(r2, r2, Operand(0x1f));
__ mov(r2, Operand(r3, LSR, r2), SetCC);
// SHR is special because it is required to produce a positive answer.
// We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
// The non vfp3 code does not support this special case, so jump to
// runtime if we don't support it.
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi,
(result_type_ <= TRBinaryOpIC::INT32) ? &transition
: &return_heap_number);
} else {
__ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
: &call_runtime);
}
break;
case Token::SHL:
__ and_(r2, r2, Operand(0x1f));
__ mov(r2, Operand(r3, LSL, r2));
break;
default:
UNREACHABLE();
}
// Check if the result fits in a smi.
__ add(scratch1, r2, Operand(0x40000000), SetCC);
// If not try to return a heap number. (We know the result is an int32.)
__ b(mi, &return_heap_number);
// Tag the result and return.
__ SmiTag(r0, r2);
__ Ret();
__ bind(&return_heap_number);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
heap_number_result,
heap_number_map,
scratch1,
scratch2,
&call_runtime);
if (op_ != Token::SHR) {
// Convert the result to a floating point value.
__ vmov(double_scratch.low(), r2);
__ vcvt_f64_s32(double_scratch, double_scratch.low());
} else {
// The result must be interpreted as an unsigned 32-bit integer.
__ vmov(double_scratch.low(), r2);
__ vcvt_f64_u32(double_scratch, double_scratch.low());
}
// Store the result.
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(double_scratch, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
__ Ret();
} else {
// Tail call that writes the int32 in r2 to the heap number in r0, using
// r3 as scratch. r0 is preserved and returned.
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
__ TailCallStub(&stub);
}
break;
}
default:
UNREACHABLE();
}
if (transition.is_linked()) {
__ bind(&transition);
GenerateTypeTransition(masm); GenerateTypeTransition(masm);
} }
__ bind(&call_runtime);
GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label not_numbers, call_runtime; Label not_numbers, call_runtime;
@ -5957,11 +6604,10 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
ApiFunction *function) { ExternalReference function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET)); RelocInfo::CODE_TARGET));
__ mov(r2, __ mov(r2, Operand(function));
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
// Push return address (accessible to GC through exit frame pc). // Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0)); __ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function. __ Jump(r2); // Call the api function.

2
deps/v8/src/arm/code-stubs-arm.h

@ -592,7 +592,7 @@ class DirectCEntryStub: public CodeStub {
public: public:
DirectCEntryStub() {} DirectCEntryStub() {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, ApiFunction *function); void GenerateCall(MacroAssembler* masm, ExternalReference function);
void GenerateCall(MacroAssembler* masm, Register target); void GenerateCall(MacroAssembler* masm, Register target);
private: private:

47
deps/v8/src/arm/codegen-arm.cc

@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(cp); frame_->EmitPush(cp);
frame_->EmitPush(Operand(pairs)); frame_->EmitPush(Operand(pairs));
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0))); frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
frame_->CallRuntime(Runtime::kDeclareGlobals, 3); frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// The result is discarded. // The result is discarded.
} }
@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// context slot followed by initialization. // context slot followed by initialization.
frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else { } else {
frame_->CallRuntime(Runtime::kStoreContextSlot, 3); frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
} }
// Storing a variable must keep the (new) value on the expression // Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions. // stack. This is necessary for compiling assignment expressions.
@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(key); Load(key);
Load(value); Load(value);
if (property->emit_store()) { if (property->emit_store()) {
frame_->CallRuntime(Runtime::kSetProperty, 3); frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
frame_->CallRuntime(Runtime::kSetProperty, 4);
} else { } else {
frame_->Drop(3); frame_->Drop(3);
} }
@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
// Set the bit in the map to indicate that it has been checked safe for // Set the bit in the map to indicate that it has been checked safe for
// default valueOf and set true result. // default valueOf and set true result.
__ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ orr(scratch1_, __ orr(scratch1_,
scratch1_, scratch1_,
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset)); __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
__ mov(map_result_, Operand(1)); __ mov(map_result_, Operand(1));
__ jmp(exit_label()); __ jmp(exit_label());
__ bind(&false_result); __ bind(&false_result);
@ -6674,8 +6677,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
public: public:
DeferredReferenceSetKeyedValue(Register value, DeferredReferenceSetKeyedValue(Register value,
Register key, Register key,
Register receiver) Register receiver,
: value_(value), key_(key), receiver_(receiver) { StrictModeFlag strict_mode)
: value_(value),
key_(key),
receiver_(receiver),
strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue"); set_comment("[ DeferredReferenceSetKeyedValue");
} }
@ -6685,6 +6692,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register value_; Register value_;
Register key_; Register key_;
Register receiver_; Register receiver_;
StrictModeFlag strict_mode_;
}; };
@ -6706,7 +6714,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_); { Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0, // Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2. // r1 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the // The call must be followed by a nop instruction to indicate that the
// keyed store has been inlined. // keyed store has been inlined.
@ -6724,8 +6734,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
public: public:
DeferredReferenceSetNamedValue(Register value, DeferredReferenceSetNamedValue(Register value,
Register receiver, Register receiver,
Handle<String> name) Handle<String> name,
: value_(value), receiver_(receiver), name_(name) { StrictModeFlag strict_mode)
: value_(value),
receiver_(receiver),
name_(name),
strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetNamedValue"); set_comment("[ DeferredReferenceSetNamedValue");
} }
@ -6735,6 +6749,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
Register value_; Register value_;
Register receiver_; Register receiver_;
Handle<String> name_; Handle<String> name_;
StrictModeFlag strict_mode_;
}; };
@ -6754,7 +6769,9 @@ void DeferredReferenceSetNamedValue::Generate() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_); { Assembler::BlockConstPoolScope block_const_pool(masm_);
// Call keyed store IC. It has the arguments value, key and receiver in r0, // Call keyed store IC. It has the arguments value, key and receiver in r0,
// r1 and r2. // r1 and r2.
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// The call must be followed by a nop instruction to indicate that the // The call must be followed by a nop instruction to indicate that the
// named store has been inlined. // named store has been inlined.
@ -6943,7 +6960,8 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Register receiver = r1; Register receiver = r1;
DeferredReferenceSetNamedValue* deferred = DeferredReferenceSetNamedValue* deferred =
new DeferredReferenceSetNamedValue(value, receiver, name); new DeferredReferenceSetNamedValue(
value, receiver, name, strict_mode_flag());
// Check that the receiver is a heap object. // Check that the receiver is a heap object.
__ tst(receiver, Operand(kSmiTagMask)); __ tst(receiver, Operand(kSmiTagMask));
@ -7129,7 +7147,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
// The deferred code expects value, key and receiver in registers. // The deferred code expects value, key and receiver in registers.
DeferredReferenceSetKeyedValue* deferred = DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(value, key, receiver); new DeferredReferenceSetKeyedValue(
value, key, receiver, strict_mode_flag());
// Check that the value is a smi. As this inlined code does not set the // Check that the value is a smi. As this inlined code does not set the
// write barrier it is only possible to store smi values. // write barrier it is only possible to store smi values.
@ -7214,7 +7233,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
deferred->BindExit(); deferred->BindExit();
} else { } else {
frame()->CallKeyedStoreIC(); frame()->CallKeyedStoreIC(strict_mode_flag());
} }
} }

8
deps/v8/src/arm/constants-arm.h

@ -385,7 +385,10 @@ enum VFPConversionMode {
kDefaultRoundToZero = 1 kDefaultRoundToZero = 1
}; };
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf; static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24; static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPInvalidExceptionBit = 1; static const uint32_t kVFPInvalidExceptionBit = 1;
@ -411,6 +414,11 @@ enum VFPRoundingMode {
static const uint32_t kVFPRoundingModeMask = 3 << 22; static const uint32_t kVFPRoundingModeMask = 3 << 22;
enum CheckForInexactConversion {
kCheckForInexactConversion,
kDontCheckForInexactConversion
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Hints. // Hints.

766
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

27
deps/v8/src/arm/ic-arm.cc

@ -1400,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- r0 : value // -- r0 : value
// -- r1 : key // -- r1 : key
@ -1411,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// Push receiver, key and value for runtime call. // Push receiver, key and value for runtime call.
__ Push(r2, r1, r0); __ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- r0 : value // -- r0 : value
// -- r1 : key // -- r1 : key
@ -1470,7 +1476,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// r0: value. // r0: value.
// r1: key. // r1: key.
// r2: receiver. // r2: receiver.
GenerateRuntimeSetProperty(masm); GenerateRuntimeSetProperty(masm, strict_mode);
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// r4: elements map. // r4: elements map.
@ -1540,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm, void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver // -- r1 : receiver
@ -1552,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP, NOT_IN_LOOP,
MONOMORPHIC, MONOMORPHIC,
extra_ic_state); strict_mode);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5); StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
@ -1646,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
} }
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : value // -- r0 : value
// -- r1 : receiver // -- r1 : receiver
@ -1656,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
__ Push(r1, r2, r0); __ Push(r1, r2, r0);
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r0, Operand(Smi::FromInt(strict_mode)));
__ Push(r1, r0);
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }

3
deps/v8/src/arm/lithium-arm.cc

@ -1154,8 +1154,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) { HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result = LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4)); new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
MarkAsSaveDoubles(result); return MarkAsCall(DefineFixed(result, r0), instr);
return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
} }

74
deps/v8/src/arm/lithium-codegen-arm.cc

@ -573,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data = Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED); Factory::NewDeoptimizationInputData(length, TENURED);
data->SetTranslationByteArray(*translations_.CreateByteArray()); Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals = Handle<FixedArray> literals =
@ -1985,11 +1986,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ BlockConstPoolFor(kAdditionalDelta); __ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize)); __ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp, temp); __ StoreToSafepointRegisterSlot(temp, temp);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
ASSERT_EQ(kAdditionalDelta,
masm_->InstructionsGeneratedSince(&before_push_delta));
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the result register slot and // Put the result value into the result register slot and
// restore all registers. // restore all registers.
__ StoreToSafepointRegisterSlot(result, result); __ StoreToSafepointRegisterSlot(result, result);
@ -2586,41 +2583,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
} }
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
Register scratch1,
Register scratch2) {
Register prev_fpscr = scratch1;
Register scratch = scratch2;
// Set custom FPCSR:
// - Set rounding mode.
// - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset.
__ vmrs(prev_fpscr);
__ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
kVFPRoundingModeMask |
kVFPFlushToZeroMask));
__ orr(scratch, scratch, Operand(rounding_mode));
__ vmsr(scratch);
// Convert the argument to an integer.
__ vcvt_s32_f64(result,
double_input,
kFPSCRRounding);
// Retrieve FPSCR.
__ vmrs(scratch);
// Restore FPSCR.
__ vmsr(prev_fpscr);
// Check for vfp exceptions.
__ tst(scratch, Operand(kVFPExceptionMask));
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
@ -2628,7 +2590,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
Register scratch1 = scratch0(); Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0)); Register scratch2 = ToRegister(instr->TempAt(0));
EmitVFPTruncate(kRoundToMinusInf, __ EmitVFPTruncate(kRoundToMinusInf,
single_scratch, single_scratch,
input, input,
scratch1, scratch1,
@ -2654,7 +2616,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
Register result = ToRegister(instr->result()); Register result = ToRegister(instr->result());
Register scratch1 = scratch0(); Register scratch1 = scratch0();
Register scratch2 = result; Register scratch2 = result;
EmitVFPTruncate(kRoundToNearest, __ EmitVFPTruncate(kRoundToNearest,
double_scratch0().low(), double_scratch0().low(),
input, input,
scratch1, scratch1,
@ -2863,8 +2825,8 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2. // Name is always in r2.
__ mov(r2, Operand(instr->name())); __ mov(r2, Operand(instr->name()));
Handle<Code> ic(Builtins::builtin(info_->is_strict() Handle<Code> ic(Builtins::builtin(
? Builtins::StoreIC_Initialize_Strict info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize)); : Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -2907,7 +2869,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(r1)); ASSERT(ToRegister(instr->key()).is(r1));
ASSERT(ToRegister(instr->value()).is(r0)); ASSERT(ToRegister(instr->value()).is(r0));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -3371,21 +3335,26 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0(); Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->TempAt(0)); Register scratch2 = ToRegister(instr->TempAt(0));
VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf __ EmitVFPTruncate(kRoundToZero,
: kRoundToNearest;
EmitVFPTruncate(rounding_mode,
single_scratch, single_scratch,
double_input, double_input,
scratch1, scratch1,
scratch2); scratch2);
// Deoptimize if we had a vfp invalid exception. // Deoptimize if we had a vfp invalid exception.
DeoptimizeIf(ne, instr->environment()); DeoptimizeIf(ne, instr->environment());
// Retrieve the result. // Retrieve the result.
__ vmov(result_reg, single_scratch); __ vmov(result_reg, single_scratch);
if (instr->truncating() && if (!instr->truncating()) {
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Convert result back to double and compare with input
// to check if the conversion was exact.
__ vmov(single_scratch, result_reg);
__ vcvt_f64_s32(double_scratch0(), single_scratch);
__ VFPCompareAndSetFlags(double_scratch0(), double_input);
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done; Label done;
__ cmp(result_reg, Operand(0)); __ cmp(result_reg, Operand(0));
__ b(ne, &done); __ b(ne, &done);
@ -3397,6 +3366,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ bind(&done); __ bind(&done);
} }
} }
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { void LCodeGen::DoCheckSmi(LCheckSmi* instr) {

5
deps/v8/src/arm/lithium-codegen-arm.h

@ -206,11 +206,6 @@ class LCodeGen BASE_EMBEDDED {
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr); void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
Register scratch1,
Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr);

125
deps/v8/src/arm/macro-assembler-arm.cc

@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
} }
void MacroAssembler::Bfi(Register dst,
Register src,
Register scratch,
int lsb,
int width,
Condition cond) {
ASSERT(0 <= lsb && lsb < 32);
ASSERT(0 <= width && width < 32);
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
if (!CpuFeatures::IsSupported(ARMv7)) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
mov(scratch, Operand(scratch, LSL, lsb));
orr(dst, dst, scratch);
} else {
bfi(dst, src, lsb, width, cond);
}
}
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) { void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
ASSERT(lsb < 32); ASSERT(lsb < 32);
if (!CpuFeatures::IsSupported(ARMv7)) { if (!CpuFeatures::IsSupported(ARMv7)) {
@ -1618,7 +1641,7 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn( MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
ApiFunction* function, int stack_space) { ExternalReference function, int stack_space) {
ExternalReference next_address = ExternalReference next_address =
ExternalReference::handle_scope_next_address(); ExternalReference::handle_scope_next_address();
const int kNextOffset = 0; const int kNextOffset = 0;
@ -1883,6 +1906,52 @@ void MacroAssembler::ConvertToInt32(Register source,
} }
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
Register scratch1,
Register scratch2,
CheckForInexactConversion check_inexact) {
ASSERT(CpuFeatures::IsSupported(VFP3));
CpuFeatures::Scope scope(VFP3);
Register prev_fpscr = scratch1;
Register scratch = scratch2;
int32_t check_inexact_conversion =
(check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
// Set custom FPCSR:
// - Set rounding mode.
// - Clear vfp cumulative exception flags.
// - Make sure Flush-to-zero mode control bit is unset.
vmrs(prev_fpscr);
bic(scratch,
prev_fpscr,
Operand(kVFPExceptionMask |
check_inexact_conversion |
kVFPRoundingModeMask |
kVFPFlushToZeroMask));
// 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
if (rounding_mode != kRoundToNearest) {
orr(scratch, scratch, Operand(rounding_mode));
}
vmsr(scratch);
// Convert the argument to an integer.
vcvt_s32_f64(result,
double_input,
(rounding_mode == kRoundToZero) ? kDefaultRoundToZero
: kFPSCRRounding);
// Retrieve FPSCR.
vmrs(scratch);
// Restore FPSCR.
vmsr(prev_fpscr);
// Check for vfp exceptions.
tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst, void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src, Register src,
int num_least_bits) { int num_least_bits) {
@ -2389,6 +2458,60 @@ void MacroAssembler::CopyFields(Register dst,
} }
void MacroAssembler::CopyBytes(Register src,
Register dst,
Register length,
Register scratch) {
Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
// Align src before copying in word size chunks.
bind(&align_loop);
cmp(length, Operand(0));
b(eq, &done);
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
b(eq, &word_loop);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
b(ne, &byte_loop_1);
// Copy bytes in word size chunks.
bind(&word_loop);
if (FLAG_debug_code) {
tst(src, Operand(kPointerSize - 1));
Assert(eq, "Expecting alignment for CopyBytes");
}
cmp(length, Operand(kPointerSize));
b(lt, &byte_loop);
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
#if CAN_USE_UNALIGNED_ACCESSES
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
#else
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
mov(scratch, Operand(scratch, LSR, 8));
strb(scratch, MemOperand(dst, 1, PostIndex));
#endif
sub(length, length, Operand(kPointerSize));
b(&word_loop);
// Copy the last bytes if any left.
bind(&byte_loop);
cmp(length, Operand(0));
b(eq, &done);
bind(&byte_loop_1);
ldrb(scratch, MemOperand(src, 1, PostIndex));
strb(scratch, MemOperand(dst, 1, PostIndex));
sub(length, length, Operand(1), SetCC);
b(ne, &byte_loop_1);
bind(&done);
}
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
Register source, // Input. Register source, // Input.
Register scratch) { Register scratch) {

51
deps/v8/src/arm/macro-assembler-arm.h

@ -121,6 +121,15 @@ class MacroAssembler: public Assembler {
Condition cond = al); Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width, void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al); Condition cond = al);
// The scratch register is not used for ARMv7.
// scratch can be the same register as src (in which case it is trashed), but
// not the same as dst.
void Bfi(Register dst,
Register src,
Register scratch,
int lsb,
int width,
Condition cond = al);
void Bfc(Register dst, int lsb, int width, Condition cond = al); void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Usat(Register dst, int satpos, const Operand& src, void Usat(Register dst, int satpos, const Operand& src,
Condition cond = al); Condition cond = al);
@ -234,6 +243,17 @@ class MacroAssembler: public Assembler {
} }
} }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
if (src1.code() > src2.code()) {
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
} else {
ldr(src2, MemOperand(sp, 4, PostIndex), cond);
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
}
}
// Push and pop the registers that can hold pointers, as defined by the // Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters. // RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters(); void PushSafepointRegisters();
@ -497,6 +517,14 @@ class MacroAssembler: public Assembler {
// Copies a fixed number of fields of heap objects from src to dst. // Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count); void CopyFields(Register dst, Register src, RegList temps, int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
// read or written and length will be zero.
void CopyBytes(Register src,
Register dst,
Register length,
Register scratch);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
@ -613,6 +641,19 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch, DwVfpRegister double_scratch,
Label *not_int32); Label *not_int32);
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion
// was inexact, ie. if the double value could not be converted exactly
// to a 32bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
Register scratch1,
Register scratch2,
CheckForInexactConversion check
= kDontCheckForInexactConversion);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer // instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case // for 0 (31 instead of 32). Source and scratch can be the same in which case
@ -690,7 +731,7 @@ class MacroAssembler: public Assembler {
// from handle and propagates exceptions. Restores context. // from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js // stack_space - space to be unwound on exit (includes the call js
// arguments space and the additional space allocated for the fast call). // arguments space and the additional space allocated for the fast call).
MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function, MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
int stack_space); int stack_space);
// Jump to a runtime routine. // Jump to a runtime routine.
@ -777,11 +818,11 @@ class MacroAssembler: public Assembler {
mov(reg, scratch); mov(reg, scratch);
} }
void SmiUntag(Register reg) { void SmiUntag(Register reg, SBit s = LeaveCC) {
mov(reg, Operand(reg, ASR, kSmiTagSize)); mov(reg, Operand(reg, ASR, kSmiTagSize), s);
} }
void SmiUntag(Register dst, Register src) { void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
mov(dst, Operand(src, ASR, kSmiTagSize)); mov(dst, Operand(src, ASR, kSmiTagSize), s);
} }
// Jump the register contains a smi. // Jump the register contains a smi.

65
deps/v8/src/arm/simulator-arm.cc

@ -1005,7 +1005,9 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr); intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr; return *ptr;
} }
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr); PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;
#endif #endif
@ -1023,7 +1025,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
*ptr = value; *ptr = value;
return; return;
} }
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr); PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif #endif
} }
@ -1038,7 +1042,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr); uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr; return *ptr;
} }
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr); PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED(); UNIMPLEMENTED();
return 0; return 0;
#endif #endif
@ -1072,7 +1078,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
*ptr = value; *ptr = value;
return; return;
} }
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr); PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif #endif
} }
@ -1089,7 +1097,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value; *ptr = value;
return; return;
} }
PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr); PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr,
reinterpret_cast<intptr_t>(instr));
UNIMPLEMENTED(); UNIMPLEMENTED();
#endif #endif
} }
@ -1531,7 +1541,11 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
// This signature supports direct call in to API function native callback // This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h). // (refer to InvocationCallback in v8.h).
typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0); typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
// This signature supports direct call to accessor getter callback.
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
int32_t arg1);
// Software interrupt instructions are used by the simulator to call into the // Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. // C-based V8 runtime.
@ -1572,14 +1586,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned); CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3); double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result); SetFpResult(result);
} else if (redirection->type() == ExternalReference::DIRECT_CALL) { } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeApiCall target = SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeApiCall>(external); reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF( PrintF("Call to host function at %p args %08x",
"Call to host function at %p args %08x", FUNCTION_ADDR(target), arg0);
FUNCTION_ADDR(target),
arg0);
if (!stack_aligned) { if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp)); PrintF(" with unaligned stack %08x\n", get_register(sp));
} }
@ -1591,6 +1603,23 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
} }
set_register(r0, (int32_t) *result); set_register(r0, (int32_t) *result);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
FUNCTION_ADDR(target), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
v8::Handle<v8::Value> result = target(arg0, arg1);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
set_register(r0, (int32_t) *result);
} else { } else {
// builtin call. // builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@ -2535,6 +2564,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn); double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm); double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value; double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
set_d_register_from_double(vd, dd_value); set_d_register_from_double(vd, dd_value);
} else { } else {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
@ -2769,14 +2799,17 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer); inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
double abs_diff =
unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
: fabs(val - temp);
inexact_vfp_flag_ = (abs_diff != 0);
if (inv_op_vfp_flag_) { if (inv_op_vfp_flag_) {
temp = VFPConversionSaturate(val, unsigned_integer); temp = VFPConversionSaturate(val, unsigned_integer);
} else { } else {
switch (mode) { switch (mode) {
case RN: { case RN: {
double abs_diff =
unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
: fabs(val - temp);
int val_sign = (val > 0) ? 1 : -1; int val_sign = (val > 0) ? 1 : -1;
if (abs_diff > 0.5) { if (abs_diff > 0.5) {
temp += val_sign; temp += val_sign;

62
deps/v8/src/arm/stub-cache-arm.cc

@ -655,12 +655,10 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
// already generated). Do not allow the assembler to perform a // already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure // garbage collection but instead return the allocation failure
// object. // object.
MaybeObject* result = masm->TryCallApiFunctionAndReturn( const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
&fun, argc + kFastApiCallArguments + 1); ExternalReference ref =
if (result->IsFailure()) { ExternalReference(&fun, ExternalReference::DIRECT_API_CALL);
return result; return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
}
return Heap::undefined_value();
} }
class CallInterceptorCompiler BASE_EMBEDDED { class CallInterceptorCompiler BASE_EMBEDDED {
@ -1245,18 +1243,38 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
name, miss); name, miss);
// Push the arguments on the JS stack of the caller. // Build AccessorInfo::args_ list on the stack and push property name below
__ push(receiver); // Receiver. // the exit frame to make GC aware of them and store pointers to them.
__ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data __ push(receiver);
__ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
__ Push(reg, ip, scratch3, name_reg); Handle<AccessorInfo> callback_handle(callback);
if (Heap::InNewSpace(callback_handle->data())) {
__ Move(scratch3, callback_handle);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
__ Move(scratch3, Handle<Object>(callback_handle->data()));
}
__ Push(reg, scratch3, name_reg);
__ mov(r0, sp); // r0 = Handle<String>
// Do tail-call to the runtime system. Address getter_address = v8::ToCData<Address>(callback->getter());
ExternalReference load_callback_property = ApiFunction fun(getter_address);
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallExternalReference(load_callback_property, 5, 1);
return Heap::undefined_value(); // Success. const int kApiStackSpace = 1;
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
// Emitting a stub call may try to allocate (if the code is not
// already generated). Do not allow the assembler to perform a
// garbage collection but instead return the allocation failure
// object.
const int kStackUnwindSpace = 4;
ExternalReference ref =
ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL);
return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
} }
@ -2653,10 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ Push(r1, r2, r0); // Receiver, name, value. __ Push(r1, r2, r0); // Receiver, name, value.
__ mov(r0, Operand(Smi::FromInt(strict_mode_)));
__ push(r0); // strict mode
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_ic_property = ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
__ TailCallExternalReference(store_ic_property, 3, 1); __ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss. // Handle store cache miss.
__ bind(&miss); __ bind(&miss);
@ -4038,7 +4059,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
// Push receiver, key and value for runtime call. // Push receiver, key and value for runtime call.
__ Push(r2, r1, r0); __ Push(r2, r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
__ mov(r0, Operand(Smi::FromInt(
Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
__ Push(r1, r0);
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
return GetCode(flags); return GetCode(flags);
} }

10
deps/v8/src/arm/virtual-frame-arm.cc

@ -332,8 +332,8 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
void VirtualFrame::CallStoreIC(Handle<String> name, void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual, bool is_contextual,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode Handle<Code> ic(Builtins::builtin(
? Builtins::StoreIC_Initialize_Strict (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize)); : Builtins::StoreIC_Initialize));
PopToR0(); PopToR0();
RelocInfo::Mode mode; RelocInfo::Mode mode;
@ -359,8 +359,10 @@ void VirtualFrame::CallKeyedLoadIC() {
} }
void VirtualFrame::CallKeyedStoreIC() { void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
PopToR1R0(); PopToR1R0();
SpillAll(); SpillAll();
EmitPop(r2); EmitPop(r2);

2
deps/v8/src/arm/virtual-frame-arm.h

@ -303,7 +303,7 @@ class VirtualFrame : public ZoneObject {
// Call keyed store IC. Value, key and receiver are on the stack. All three // Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in r0. // are consumed. Result is returned in r0.
void CallKeyedStoreIC(); void CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call into an IC stub given the number of arguments it removes // Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit, // from the stack. Register arguments to the IC stub are implicit,

9
deps/v8/src/array.js

@ -418,7 +418,6 @@ function ArrayPush() {
function ArrayConcat(arg1) { // length == 1 function ArrayConcat(arg1) { // length == 1
// TODO: can we just use arguments?
var arg_count = %_ArgumentsLength(); var arg_count = %_ArgumentsLength();
var arrays = new $Array(1 + arg_count); var arrays = new $Array(1 + arg_count);
arrays[0] = this; arrays[0] = this;
@ -1018,13 +1017,13 @@ function ArrayIndexOf(element, index) {
} }
var min = index; var min = index;
var max = length; var max = length;
if (UseSparseVariant(this, length, true)) { if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, length); var intervals = %GetArrayKeys(this, length);
if (intervals.length == 2 && intervals[0] < 0) { if (intervals.length == 2 && intervals[0] < 0) {
// A single interval. // A single interval.
var intervalMin = -(intervals[0] + 1); var intervalMin = -(intervals[0] + 1);
var intervalMax = intervalMin + intervals[1]; var intervalMax = intervalMin + intervals[1];
min = MAX(min, intervalMin); if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by length already. max = intervalMax; // Capped by length already.
// Fall through to loop below. // Fall through to loop below.
} else { } else {
@ -1074,13 +1073,13 @@ function ArrayLastIndexOf(element, index) {
} }
var min = 0; var min = 0;
var max = index; var max = index;
if (UseSparseVariant(this, length, true)) { if (UseSparseVariant(this, length, IS_ARRAY(this))) {
var intervals = %GetArrayKeys(this, index + 1); var intervals = %GetArrayKeys(this, index + 1);
if (intervals.length == 2 && intervals[0] < 0) { if (intervals.length == 2 && intervals[0] < 0) {
// A single interval. // A single interval.
var intervalMin = -(intervals[0] + 1); var intervalMin = -(intervals[0] + 1);
var intervalMax = intervalMin + intervals[1]; var intervalMax = intervalMin + intervals[1];
min = MAX(min, intervalMin); if (min < intervalMin) min = intervalMin;
max = intervalMax; // Capped by index already. max = intervalMax; // Capped by index already.
// Fall through to loop below. // Fall through to loop below.
} else { } else {

2
deps/v8/src/assembler.cc

@ -252,7 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpTag); WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data(); last_data_ = rinfo->data();
ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize); ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else { } else {
// For all other modes we simply use the mode as the extra tag. // For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component. // None of these modes need a data component.

31
deps/v8/src/assembler.h

@ -184,10 +184,10 @@ class RelocInfo BASE_EMBEDDED {
// we do not normally record relocation info. // we do not normally record relocation info.
static const char* kFillerCommentString; static const char* kFillerCommentString;
// The size of a comment is equal to tree bytes for the extra tagged pc + // The minimum size of a comment is equal to three bytes for the extra tagged
// the tag for the data, and kPointerSize for the actual pointer to the // pc + the tag for the data, and kPointerSize for the actual pointer to the
// comment. // comment.
static const int kRelocCommentSize = 3 + kPointerSize; static const int kMinRelocCommentSize = 3 + kPointerSize;
// The maximum size for a call instruction including pc-jump. // The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6; static const int kMaxCallSize = 6;
@ -481,21 +481,22 @@ class Debug_Address;
class ExternalReference BASE_EMBEDDED { class ExternalReference BASE_EMBEDDED {
public: public:
// Used in the simulator to support different native api calls. // Used in the simulator to support different native api calls.
//
// BUILTIN_CALL - builtin call.
// MaybeObject* f(v8::internal::Arguments).
//
// FP_RETURN_CALL - builtin call that returns floating point.
// double f(double, double).
//
// DIRECT_CALL - direct call to API function native callback
// from generated code.
// Handle<Value> f(v8::Arguments&)
//
enum Type { enum Type {
// Builtin call.
// MaybeObject* f(v8::internal::Arguments).
BUILTIN_CALL, // default BUILTIN_CALL, // default
// Builtin call that returns floating point.
// double f(double, double).
FP_RETURN_CALL, FP_RETURN_CALL,
DIRECT_CALL
// Direct call to API function callback.
// Handle<Value> f(v8::Arguments&)
DIRECT_API_CALL,
// Direct call to accessor getter callback.
// Handle<value> f(Local<String> property, AccessorInfo& info)
DIRECT_GETTER_CALL
}; };
typedef void* ExternalReferenceRedirector(void* original, Type type); typedef void* ExternalReferenceRedirector(void* original, Type type);

20
deps/v8/src/builtins.cc

@ -1328,12 +1328,12 @@ static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) { static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict); StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
} }
static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) { static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict); StoreIC::GenerateMegamorphic(masm, kStrictMode);
} }
@ -1348,17 +1348,22 @@ static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm); StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
} }
static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) { static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm); StoreIC::GenerateGlobalProxy(masm, kStrictMode);
} }
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm); KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
}
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
} }
@ -1372,6 +1377,11 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
} }
static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateInitialize(masm);
}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) { static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm); Debug::GenerateLoadICDebugBreak(masm);

15
deps/v8/src/builtins.h

@ -136,21 +136,26 @@ enum BuiltinExtraArguments {
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
StoreIC::kStoreICStrict) \ kStrictMode) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \ kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \ kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \ kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \ kStrictMode) \
\ \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
\ \
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
kStrictMode) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \ V(FunctionCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \

7
deps/v8/src/compiler.cc

@ -221,11 +221,12 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// or perform on-stack replacement for function with too many // or perform on-stack replacement for function with too many
// stack-allocated local variables. // stack-allocated local variables.
// //
// The encoding is as a signed value, with parameters using the negative // The encoding is as a signed value, with parameters and receiver using
// indices and locals the non-negative ones. // the negative indices and locals the non-negative ones.
const int limit = LUnallocated::kMaxFixedIndices / 2; const int limit = LUnallocated::kMaxFixedIndices / 2;
Scope* scope = info->scope(); Scope* scope = info->scope();
if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) { if ((scope->num_parameters() + 1) > limit ||
scope->num_stack_slots() > limit) {
AbortAndDisable(info); AbortAndDisable(info);
// True indicates the compilation pipeline is still going, not // True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code. // necessarily that we optimized the code.

8
deps/v8/src/d8.cc

@ -405,7 +405,7 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
void Shell::Initialize() { void Shell::Initialize() {
Shell::counter_map_ = new CounterMap(); Shell::counter_map_ = new CounterMap();
// Set up counters // Set up counters
if (i::FLAG_map_counters != NULL) if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters); MapCounters(i::FLAG_map_counters);
if (i::FLAG_dump_counters) { if (i::FLAG_dump_counters) {
V8::SetCounterFunction(LookupCounter); V8::SetCounterFunction(LookupCounter);
@ -425,6 +425,12 @@ void Shell::Initialize() {
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit)); global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version)); global_template->Set(String::New("version"), FunctionTemplate::New(Version));
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
#else
global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
#endif
Handle<ObjectTemplate> os_templ = ObjectTemplate::New(); Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
AddOSMethods(os_templ); AddOSMethods(os_templ);
global_template->Set(String::New("os"), os_templ); global_template->Set(String::New("os"), os_templ);

618
deps/v8/src/d8.js

@ -117,6 +117,10 @@ Debug.State = {
var trace_compile = false; // Tracing all compile events? var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets? var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd_line = ''; var last_cmd_line = '';
//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
var lol_next_dump_index = 0;
const kDefaultLolLinesToPrintAtATime = 10;
const kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = ''; var repeat_cmd_line = '';
var is_running = true; var is_running = true;
@ -495,6 +499,13 @@ function DebugRequest(cmd_line) {
this.request_ = void 0; this.request_ = void 0;
break; break;
case 'liveobjectlist':
case 'lol':
if (lol_is_enabled) {
this.request_ = this.lolToJSONRequest_(args, is_repeating);
break;
}
default: default:
throw new Error('Unknown command "' + cmd + '"'); throw new Error('Unknown command "' + cmd + '"');
} }
@ -539,10 +550,54 @@ DebugRequest.prototype.createRequest = function(command) {
}; };
// Note: we use detected command repetition as a signal for continuation here.
DebugRequest.prototype.createLOLRequest = function(command,
start_index,
lines_to_dump,
is_continuation) {
if (is_continuation) {
start_index = lol_next_dump_index;
}
if (lines_to_dump) {
lines_to_dump = parseInt(lines_to_dump);
} else {
lines_to_dump = kDefaultLolLinesToPrintAtATime;
}
if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
lines_to_dump = kMaxLolLinesToPrintAtATime;
}
// Save the next start_index to dump from:
lol_next_dump_index = start_index + lines_to_dump;
var request = this.createRequest(command);
request.arguments = {};
request.arguments.start = start_index;
request.arguments.count = lines_to_dump;
return request;
};
// Create a JSON request for the evaluation command. // Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) { DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
// Global varaible used to store whether a handle was requested. // Global varaible used to store whether a handle was requested.
lookup_handle = null; lookup_handle = null;
if (lol_is_enabled) {
// Check if the expression is a obj id in the form @<obj id>.
var obj_id_match = expression.match(/^@([0-9]+)$/);
if (obj_id_match) {
var obj_id = parseInt(obj_id_match[1]);
// Build a dump request.
var request = this.createRequest('getobj');
request.arguments = {};
request.arguments.obj_id = obj_id;
return request.toJSONProtocol();
}
}
// Check if the expression is a handle id in the form #<handle>#. // Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/); var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) { if (handle_match) {
@ -1103,6 +1158,10 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command. // Build a evaluate request from the text command.
request = this.createRequest('frame'); request = this.createRequest('frame');
last_cmd = 'info args'; last_cmd = 'info args';
} else if (lol_is_enabled &&
args && (args == 'liveobjectlist' || args == 'lol')) {
// Build a evaluate request from the text command.
return this.liveObjectListToJSONRequest_(null);
} else { } else {
throw new Error('Invalid info arguments.'); throw new Error('Invalid info arguments.');
} }
@ -1153,6 +1212,262 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) {
}; };
// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
DebugRequest.prototype.lolMakeListRequest =
function(cmd, args, first_arg_index, is_repeating) {
var request;
var start_index = 0;
var dump_limit = void 0;
var type_filter = void 0;
var space_filter = void 0;
var prop_filter = void 0;
var is_verbose = false;
var i;
for (i = first_arg_index; i < args.length; i++) {
var arg = args[i];
// Check for [v[erbose]]:
if (arg === 'verbose' || arg === 'v') {
// Nothing to do. This is already implied by args.length > 3.
is_verbose = true;
// Check for [<N>]:
} else if (arg.match(/^[0-9]+$/)) {
dump_limit = arg;
is_verbose = true;
// Check for i[ndex] <i>:
} else if (arg === 'index' || arg === 'i') {
i++;
if (args.length < i) {
throw new Error('Missing index after ' + arg + '.');
}
start_index = parseInt(args[i]);
// The user input start index starts at 1:
if (start_index <= 0) {
throw new Error('Invalid index ' + args[i] + '.');
}
start_index -= 1;
is_verbose = true;
// Check for t[ype] <type>:
} else if (arg === 'type' || arg === 't') {
i++;
if (args.length < i) {
throw new Error('Missing type after ' + arg + '.');
}
type_filter = args[i];
// Check for space <heap space name>:
} else if (arg === 'space' || arg === 'sp') {
i++;
if (args.length < i) {
throw new Error('Missing space name after ' + arg + '.');
}
space_filter = args[i];
// Check for property <prop name>:
} else if (arg === 'property' || arg === 'prop') {
i++;
if (args.length < i) {
throw new Error('Missing property name after ' + arg + '.');
}
prop_filter = args[i];
} else {
throw new Error('Unknown args at ' + arg + '.');
}
}
// Build the verbose request:
if (is_verbose) {
request = this.createLOLRequest('lol-'+cmd,
start_index,
dump_limit,
is_repeating);
request.arguments.verbose = true;
} else {
request = this.createRequest('lol-'+cmd);
request.arguments = {};
}
request.arguments.filter = {};
if (type_filter) {
request.arguments.filter.type = type_filter;
}
if (space_filter) {
request.arguments.filter.space = space_filter;
}
if (prop_filter) {
request.arguments.filter.prop = prop_filter;
}
return request;
}
function extractObjId(args) {
var id = args;
id = id.match(/^@([0-9]+)$/);
if (id) {
id = id[1];
} else {
throw new Error('Invalid obj id ' + args + '.');
}
return parseInt(id);
}
DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
var request;
// Use default command if one is not specified:
if (!args) {
args = 'info';
}
var orig_args = args;
var first_arg_index;
var arg, i;
var args = args.split(/\s+/g);
var cmd = args[0];
var id;
// Command: <id> [v[erbose]] ...
if (cmd.match(/^[0-9]+$/)) {
// Convert to the padded list command:
// Command: l[ist] <dummy> <id> [v[erbose]] ...
// Insert the implicit 'list' in front and process as normal:
cmd = 'list';
args.unshift(cmd);
}
switch(cmd) {
// Command: c[apture]
case 'capture':
case 'c':
request = this.createRequest('lol-capture');
break;
// Command: clear|d[elete] <id>|all
case 'clear':
case 'delete':
case 'del': {
if (args.length < 2) {
throw new Error('Missing argument after ' + cmd + '.');
} else if (args.length > 2) {
throw new Error('Too many arguments after ' + cmd + '.');
}
id = args[1];
if (id.match(/^[0-9]+$/)) {
// Delete a specific lol record:
request = this.createRequest('lol-delete');
request.arguments = {};
request.arguments.id = parseInt(id);
} else if (id === 'all') {
// Delete all:
request = this.createRequest('lol-reset');
} else {
throw new Error('Invalid argument after ' + cmd + '.');
}
break;
}
// Command: diff <id1> <id2> [<dump options>]
case 'diff':
first_arg_index = 3;
// Command: list <dummy> <id> [<dump options>]
case 'list':
// Command: ret[ainers] <obj id> [<dump options>]
case 'retainers':
case 'ret':
case 'retaining-paths':
case 'rp': {
if (cmd === 'ret') cmd = 'retainers';
else if (cmd === 'rp') cmd = 'retaining-paths';
if (!first_arg_index) first_arg_index = 2;
if (args.length < first_arg_index) {
throw new Error('Too few arguments after ' + cmd + '.');
}
var request_cmd = (cmd === 'list') ? 'diff':cmd;
request = this.lolMakeListRequest(request_cmd,
args,
first_arg_index,
is_repeating);
if (cmd === 'diff') {
request.arguments.id1 = parseInt(args[1]);
request.arguments.id2 = parseInt(args[2]);
} else if (cmd == 'list') {
request.arguments.id1 = 0;
request.arguments.id2 = parseInt(args[1]);
} else {
request.arguments.id = extractObjId(args[1]);
}
break;
}
// Command: getid
case 'getid': {
request = this.createRequest('lol-getid');
request.arguments = {};
request.arguments.address = args[1];
break;
}
// Command: inf[o] [<N>]
case 'info':
case 'inf': {
if (args.length > 2) {
throw new Error('Too many arguments after ' + cmd + '.');
}
// Built the info request:
request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
break;
}
// Command: path <obj id 1> <obj id 2>
case 'path': {
request = this.createRequest('lol-path');
request.arguments = {};
if (args.length > 2) {
request.arguments.id1 = extractObjId(args[1]);
request.arguments.id2 = extractObjId(args[2]);
} else {
request.arguments.id1 = 0;
request.arguments.id2 = extractObjId(args[1]);
}
break;
}
// Command: print
case 'print': {
request = this.createRequest('lol-print');
request.arguments = {};
request.arguments.id = extractObjId(args[1]);
break;
}
// Command: reset
case 'reset': {
request = this.createRequest('lol-reset');
break;
}
default:
throw new Error('Invalid arguments.');
}
return request.toJSONProtocol();
};
// Create a JSON request for the threads command. // Create a JSON request for the threads command.
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) { DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
// Build a threads request from the text command. // Build a threads request from the text command.
@ -1239,6 +1554,49 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print(''); print('');
print('gc - runs the garbage collector'); print('gc - runs the garbage collector');
print(''); print('');
if (lol_is_enabled) {
print('liveobjectlist|lol <command> - live object list tracking.');
print(' where <command> can be:');
print(' c[apture] - captures a LOL list.');
print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
print(' If \'all\' is unspecified instead, will clear all.');
print(' diff <id1> <id2> [<dump options>]');
print(' - prints the diff between LOLs id1 and id2.');
print(' - also see <dump options> below.');
print(' getid <address> - gets the obj id for the specified address if available.');
print(' The address must be in hex form prefixed with 0x.');
print(' inf[o] [<N>] - lists summary info of all LOL lists.');
print(' If N is specified, will print N items at a time.');
print(' [l[ist]] <id> [<dump options>]');
print(' - prints the listing of objects in LOL id.');
print(' - also see <dump options> below.');
print(' reset - clears all LOL lists.');
print(' ret[ainers] <id> [<dump options>]');
print(' - prints the list of retainers of obj id.');
print(' - also see <dump options> below.');
print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
print(' If only one id is specified, will print the path from');
print(' roots to the specified object if available.');
print(' print <id> - prints the obj for the specified obj id if available.');
print('');
print(' <dump options> includes:');
print(' [v[erbose]] - do verbose dump.');
print(' [<N>] - dump N items at a time. Implies verbose dump.');
print(' If unspecified, N will default to '+
kDefaultLolLinesToPrintAtATime+'. Max N is '+
kMaxLolLinesToPrintAtATime+'.');
print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
print(' [t[ype] <type>] - filter by type.');
print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
print(' { cell, code, lo, map, new, old-data, old-pointer }.');
print('');
print(' If the verbose option, or an option that implies a verbose dump');
print(' is specified, then a verbose dump will requested. Else, a summary dump');
print(' will be requested.');
print('');
}
print('trace compile'); print('trace compile');
// hidden command: trace debug json - toggles tracing of debug json packets // hidden command: trace debug json - toggles tracing of debug json packets
print(''); print('');
@ -1339,6 +1697,237 @@ function refObjectToString_(protocolPackage, handle) {
} }
function decodeLolCaptureResponse(body) {
var result;
result = 'Captured live object list '+ body.id +
': count '+ body.count + ' size ' + body.size;
return result;
}
function decodeLolDeleteResponse(body) {
var result;
result = 'Deleted live object list '+ body.id;
return result;
}
function digitsIn(value) {
var digits = 0;
if (value === 0) value = 1;
while (value >= 1) {
digits++;
value /= 10;
}
return digits;
}
function padding(value, max_digits) {
var padding_digits = max_digits - digitsIn(value);
var padding = '';
while (padding_digits > 0) {
padding += ' ';
padding_digits--;
}
return padding;
}
function decodeLolInfoResponse(body) {
var result;
var lists = body.lists;
var length = lists.length;
var first_index = body.first_index + 1;
var has_more = ((first_index + length) <= body.count);
result = 'captured live object lists';
if (has_more || (first_index != 1)) {
result += ' ['+ length +' of '+ body.count +
': starting from '+ first_index +']';
}
result += ':\n';
var max_digits = digitsIn(body.count);
var last_count = 0;
var last_size = 0;
for (var i = 0; i < length; i++) {
var entry = lists[i];
var count = entry.count;
var size = entry.size;
var index = first_index + i;
result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
': count '+ count;
if (last_count > 0) {
result += '(+' + (count - last_count) + ')';
}
result += ' size '+ size;
if (last_size > 0) {
result += '(+' + (size - last_size) + ')';
}
result += '\n';
last_count = count;
last_size = size;
}
result += ' total: '+length+' lists\n';
if (has_more) {
result += ' -- press <enter> for more --\n';
} else {
repeat_cmd_line = '';
}
if (length === 0) result += ' none\n';
return result;
}
function decodeLolListResponse(body, title) {
var result;
var total_count = body.count;
var total_size = body.size;
var length;
var max_digits;
var i;
var entry;
var index;
var max_count_digits = digitsIn(total_count);
var max_size_digits;
var summary = body.summary;
if (summary) {
var roots_count = 0;
var found_root = body.found_root || 0;
var found_weak_root = body.found_weak_root || 0;
// Print the summary result:
result = 'summary of objects:\n';
length = summary.length;
if (found_root !== 0) {
roots_count++;
}
if (found_weak_root !== 0) {
roots_count++;
}
max_digits = digitsIn(length + roots_count);
max_size_digits = digitsIn(total_size);
index = 1;
if (found_root !== 0) {
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ 1 + padding(0, max_count_digits) +
' '+ padding(0, max_size_digits+1) +
' : <root>\n';
index++;
}
if (found_weak_root !== 0) {
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ 1 + padding(0, max_count_digits) +
' '+ padding(0, max_size_digits+1) +
' : <weak root>\n';
index++;
}
for (i = 0; i < length; i++) {
entry = summary[i];
var count = entry.count;
var size = entry.size;
result += ' [' + padding(index, max_digits) + index + '] ' +
' count '+ count + padding(count, max_count_digits) +
' size '+ size + padding(size, max_size_digits) +
' : <' + entry.desc + '>\n';
index++;
}
result += '\n total count: '+(total_count+roots_count)+'\n';
if (body.size) {
result += ' total size: '+body.size+'\n';
}
} else {
// Print the full dump result:
var first_index = body.first_index + 1;
var elements = body.elements;
length = elements.length;
var has_more = ((first_index + length) <= total_count);
result = title;
if (has_more || (first_index != 1)) {
result += ' ['+ length +' of '+ total_count +
': starting from '+ first_index +']';
}
result += ':\n';
if (length === 0) result += ' none\n';
max_digits = digitsIn(length);
var max_id = 0;
var max_size = 0;
for (i = 0; i < length; i++) {
entry = elements[i];
if (entry.id > max_id) max_id = entry.id;
if (entry.size > max_size) max_size = entry.size;
}
var max_id_digits = digitsIn(max_id);
max_size_digits = digitsIn(max_size);
for (i = 0; i < length; i++) {
entry = elements[i];
index = first_index + i;
result += ' ['+ padding(index, max_digits) + index +']';
if (entry.id !== 0) {
result += ' @' + entry.id + padding(entry.id, max_id_digits) +
': size ' + entry.size + ', ' +
padding(entry.size, max_size_digits) + entry.desc + '\n';
} else {
// Must be a root or weak root:
result += ' ' + entry.desc + '\n';
}
}
if (has_more) {
result += ' -- press <enter> for more --\n';
} else {
repeat_cmd_line = '';
}
if (length === 0) result += ' none\n';
}
return result;
}
function decodeLolDiffResponse(body) {
var title = 'objects';
return decodeLolListResponse(body, title);
}
function decodeLolRetainersResponse(body) {
var title = 'retainers for @' + body.id;
return decodeLolListResponse(body, title);
}
function decodeLolPathResponse(body) {
return body.path;
}
function decodeLolResetResponse(body) {
return 'Reset all live object lists.';
}
function decodeLolGetIdResponse(body) {
if (body.id == 0) {
return 'Address is invalid, or object has been moved or collected';
}
return 'obj id is @' + body.id;
}
function decodeLolPrintResponse(body) {
return body.dump;
}
// Rounds number 'num' to 'length' decimal places. // Rounds number 'num' to 'length' decimal places.
function roundNumber(num, length) { function roundNumber(num, length) {
var factor = Math.pow(10, length); var factor = Math.pow(10, length);
@ -1510,6 +2099,7 @@ function DebugResponseDetails(response) {
case 'evaluate': case 'evaluate':
case 'lookup': case 'lookup':
case 'getobj':
if (last_cmd == 'p' || last_cmd == 'print') { if (last_cmd == 'p' || last_cmd == 'print') {
result = body.text; result = body.text;
} else { } else {
@ -1671,6 +2261,34 @@ function DebugResponseDetails(response) {
} }
break; break;
case 'lol-capture':
details.text = decodeLolCaptureResponse(body);
break;
case 'lol-delete':
details.text = decodeLolDeleteResponse(body);
break;
case 'lol-diff':
details.text = decodeLolDiffResponse(body);
break;
case 'lol-getid':
details.text = decodeLolGetIdResponse(body);
break;
case 'lol-info':
details.text = decodeLolInfoResponse(body);
break;
case 'lol-print':
details.text = decodeLolPrintResponse(body);
break;
case 'lol-reset':
details.text = decodeLolResetResponse(body);
break;
case 'lol-retainers':
details.text = decodeLolRetainersResponse(body);
break;
case 'lol-path':
details.text = decodeLolPathResponse(body);
break;
default: default:
details.text = details.text =
'Response for unknown command \'' + response.command() + '\'' + 'Response for unknown command \'' + response.command() + '\'' +

121
deps/v8/src/debug-debugger.js

@ -109,6 +109,7 @@ var debugger_flags = {
} }
}, },
}; };
var lol_is_enabled = %HasLOLEnabled();
// Create a new break point object and add it to the list of break points. // Create a new break point object and add it to the list of break points.
@ -1391,6 +1392,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.scopeRequest_(request, response); this.scopeRequest_(request, response);
} else if (request.command == 'evaluate') { } else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response); this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') {
this.getobjRequest_(request, response);
} else if (request.command == 'lookup') { } else if (request.command == 'lookup') {
this.lookupRequest_(request, response); this.lookupRequest_(request, response);
} else if (request.command == 'references') { } else if (request.command == 'references') {
@ -1418,6 +1421,28 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
} else if (request.command == 'gc') { } else if (request.command == 'gc') {
this.gcRequest_(request, response); this.gcRequest_(request, response);
// LiveObjectList tools:
} else if (lol_is_enabled && request.command == 'lol-capture') {
this.lolCaptureRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-delete') {
this.lolDeleteRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-diff') {
this.lolDiffRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-getid') {
this.lolGetIdRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-info') {
this.lolInfoRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-reset') {
this.lolResetRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-retainers') {
this.lolRetainersRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-path') {
this.lolPathRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-print') {
this.lolPrintRequest_(request, response);
} else if (lol_is_enabled && request.command == 'lol-stats') {
this.lolStatsRequest_(request, response);
} else { } else {
throw new Error('Unknown command "' + request.command + '" in request'); throw new Error('Unknown command "' + request.command + '" in request');
} }
@ -2011,6 +2036,24 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
}; };
DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
}
// Pull out arguments.
var obj_id = request.arguments.obj_id;
// Check for legal arguments.
if (IS_UNDEFINED(obj_id)) {
return response.failed('Argument "obj_id" missing');
}
// Dump the object.
response.body = MakeMirror(%GetLOLObj(obj_id));
};
DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) { DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
if (!request.arguments) { if (!request.arguments) {
return response.failed('Missing arguments'); return response.failed('Missing arguments');
@ -2341,6 +2384,84 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
}; };
DebugCommandProcessor.prototype.lolCaptureRequest_ =
function(request, response) {
response.body = %CaptureLOL();
};
DebugCommandProcessor.prototype.lolDeleteRequest_ =
function(request, response) {
var id = request.arguments.id;
var result = %DeleteLOL(id);
if (result) {
response.body = { id: id };
} else {
response.failed('Failed to delete: live object list ' + id + ' not found.');
}
};
DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
var id1 = request.arguments.id1;
var id2 = request.arguments.id2;
var verbose = request.arguments.verbose;
var filter = request.arguments.filter;
if (verbose === true) {
var start = request.arguments.start;
var count = request.arguments.count;
response.body = %DumpLOL(id1, id2, start, count, filter);
} else {
response.body = %SummarizeLOL(id1, id2, filter);
}
};
DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
var address = request.arguments.address;
response.body = {};
response.body.id = %GetLOLObjId(address);
};
DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
var start = request.arguments.start;
var count = request.arguments.count;
response.body = %InfoLOL(start, count);
};
DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
%ResetLOL();
};
DebugCommandProcessor.prototype.lolRetainersRequest_ =
function(request, response) {
var id = request.arguments.id;
var verbose = request.arguments.verbose;
var start = request.arguments.start;
var count = request.arguments.count;
var filter = request.arguments.filter;
response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
start, count, filter);
};
DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
var id1 = request.arguments.id1;
var id2 = request.arguments.id2;
response.body = {};
response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
};
DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
var id = request.arguments.id;
response.body = {};
response.body.dump = %PrintLOLObj(id);
};
// Check whether the previously processed command caused the VM to become // Check whether the previously processed command caused the VM to become

3
deps/v8/src/debug.cc

@ -836,7 +836,8 @@ bool Debug::Load() {
Handle<String> key = Factory::LookupAsciiSymbol("builtins"); Handle<String> key = Factory::LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global()); Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE( RETURN_IF_EMPTY_HANDLE_VALUE(
SetProperty(global, key, Handle<Object>(global->builtins()), NONE), SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
false); false);
// Compile the JavaScript for the debugger in the debugger context. // Compile the JavaScript for the debugger in the debugger context.

14
deps/v8/src/flag-definitions.h

@ -110,7 +110,6 @@ DEFINE_bool(use_lithium, true, "use lithium code generator")
DEFINE_bool(use_range, true, "use hydrogen range analysis") DEFINE_bool(use_range, true, "use hydrogen range analysis")
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis") DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
DEFINE_bool(use_peeling, false, "use loop peeling")
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
DEFINE_bool(use_inlining, true, "use function inlining") DEFINE_bool(use_inlining, true, "use function inlining")
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining") DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
@ -135,11 +134,8 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true, DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops") "aggressive motion of instructions out of loops")
#ifdef V8_TARGET_ARCH_X64
DEFINE_bool(use_osr, false, "use on-stack replacement")
#else
DEFINE_bool(use_osr, true, "use on-stack replacement") DEFINE_bool(use_osr, true, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures") DEFINE_bool(optimize_closures, true, "optimize closures")
@ -270,6 +266,12 @@ DEFINE_bool(use_idle_notification, true,
// ic.cc // ic.cc
DEFINE_bool(use_ic, true, "use inline caching") DEFINE_bool(use_ic, true, "use inline caching")
#ifdef LIVE_OBJECT_LIST
// liveobjectlist.cc
DEFINE_string(lol_workdir, NULL, "path for lol temp files")
DEFINE_bool(verify_lol, false, "perform debugging verification for lol")
#endif
// macro-assembler-ia32.cc // macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false, DEFINE_bool(native_code_counters, false,
"generate extra code for manipulating stats counters") "generate extra code for manipulating stats counters")
@ -358,7 +360,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process") "debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
DEFINE_string(map_counters, NULL, "Map counters to a file") DEFINE_string(map_counters, "", "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(), DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".") "Pass all remaining arguments to the script. Alias for \"--\".")

4
deps/v8/src/frame-element.h

@ -113,6 +113,10 @@ class FrameElement BASE_EMBEDDED {
static ZoneObjectList* ConstantList(); static ZoneObjectList* ConstantList();
static bool ConstantPoolOverflowed() {
return !DataField::is_valid(ConstantList()->length());
}
// Clear the constants indirection table. // Clear the constants indirection table.
static void ClearConstantList() { static void ClearConstantList() {
ConstantList()->Clear(); ConstantList()->Clear();

16
deps/v8/src/full-codegen.cc

@ -739,25 +739,13 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SHL: case Token::SHL:
case Token::SHR: case Token::SHR:
case Token::SAR: { case Token::SAR: {
// Figure out if either of the operands is a constant. // Load both operands.
ConstantOperand constant = ShouldInlineSmiCase(op)
? GetConstantOperand(op, left, right)
: kNoConstants;
// Load only the operands that we need to materialize.
if (constant == kNoConstants) {
VisitForStackValue(left); VisitForStackValue(left);
VisitForAccumulatorValue(right); VisitForAccumulatorValue(right);
} else if (constant == kRightConstant) {
VisitForAccumulatorValue(left);
} else {
ASSERT(constant == kLeftConstant);
VisitForAccumulatorValue(right);
}
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
if (ShouldInlineSmiCase(op)) { if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant); EmitInlineSmiBinaryOp(expr, op, mode, left, right);
} else { } else {
EmitBinaryOp(op, mode); EmitBinaryOp(op, mode);
} }

48
deps/v8/src/full-codegen.h

@ -274,12 +274,6 @@ class FullCodeGenerator: public AstVisitor {
ForwardBailoutStack* const parent_; ForwardBailoutStack* const parent_;
}; };
enum ConstantOperand {
kNoConstants,
kLeftConstant,
kRightConstant
};
// Type of a member function that generates inline code for a native function. // Type of a member function that generates inline code for a native function.
typedef void (FullCodeGenerator::*InlineFunctionGenerator) typedef void (FullCodeGenerator::*InlineFunctionGenerator)
(ZoneList<Expression*>*); (ZoneList<Expression*>*);
@ -298,11 +292,6 @@ class FullCodeGenerator: public AstVisitor {
// operation. // operation.
bool ShouldInlineSmiCase(Token::Value op); bool ShouldInlineSmiCase(Token::Value op);
// Compute which (if any) of the operands is a compile-time constant.
ConstantOperand GetConstantOperand(Token::Value op,
Expression* left,
Expression* right);
// Helper function to convert a pure value into a test context. The value // Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform. // is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details. // See the platform-specific implementation for details.
@ -432,6 +421,14 @@ class FullCodeGenerator: public AstVisitor {
Label* done); Label* done);
void EmitVariableLoad(Variable* expr); void EmitVariableLoad(Variable* expr);
enum ResolveEvalFlag {
SKIP_CONTEXT_LOOKUP,
PERFORM_CONTEXT_LOOKUP
};
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
// Platform-specific support for allocating a new closure based on // Platform-specific support for allocating a new closure based on
// the given function info. // the given function info.
void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure); void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
@ -457,34 +454,7 @@ class FullCodeGenerator: public AstVisitor {
Token::Value op, Token::Value op,
OverwriteMode mode, OverwriteMode mode,
Expression* left, Expression* left,
Expression* right, Expression* right);
ConstantOperand constant);
void EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiSub(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
// Assign to the given expression as if via '='. The right-hand-side value // Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. // is expected in the accumulator.

4
deps/v8/src/handles-inl.h

@ -36,14 +36,14 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
template<class T> template<typename T>
Handle<T>::Handle(T* obj) { Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure()); ASSERT(!obj->IsFailure());
location_ = HandleScope::CreateHandle(obj); location_ = HandleScope::CreateHandle(obj);
} }
template <class T> template <typename T>
inline T* Handle<T>::operator*() const { inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL); ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);

24
deps/v8/src/handles.cc

@ -242,17 +242,21 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> SetProperty(Handle<JSObject> object, Handle<Object> SetProperty(Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes) { PropertyAttributes attributes,
CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes), Object); StrictModeFlag strict) {
CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes, strict),
Object);
} }
Handle<Object> SetProperty(Handle<Object> object, Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
CALL_HEAP_FUNCTION( CALL_HEAP_FUNCTION(
Runtime::SetObjectProperty(object, key, value, attributes), Object); Runtime::SetObjectProperty(object, key, value, attributes, strict),
Object);
} }
@ -304,10 +308,12 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key, CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
*value, *value,
attributes), attributes,
strict),
Object); Object);
} }
@ -863,10 +869,12 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
} }
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) { bool CompileOptimized(Handle<JSFunction> function,
int osr_ast_id,
ClearExceptionFlag flag) {
CompilationInfo info(function); CompilationInfo info(function);
info.SetOptimizing(osr_ast_id); info.SetOptimizing(osr_ast_id);
return CompileLazyHelper(&info, KEEP_EXCEPTION); return CompileLazyHelper(&info, flag);
} }

63
deps/v8/src/handles.h

@ -39,7 +39,7 @@ namespace internal {
// Handles are only valid within a HandleScope. // Handles are only valid within a HandleScope.
// When a handle is created for an object a cell is allocated in the heap. // When a handle is created for an object a cell is allocated in the heap.
template<class T> template<typename T>
class Handle { class Handle {
public: public:
INLINE(explicit Handle(T** location)) { location_ = location; } INLINE(explicit Handle(T** location)) { location_ = location; }
@ -112,15 +112,7 @@ class HandleScope {
} }
~HandleScope() { ~HandleScope() {
current_.next = prev_next_; CloseScope();
current_.level--;
if (current_.limit != prev_limit_) {
current_.limit = prev_limit_;
DeleteExtensions();
}
#ifdef DEBUG
ZapRange(prev_next_, prev_limit_);
#endif
} }
// Counts the number of allocated handles. // Counts the number of allocated handles.
@ -148,6 +140,26 @@ class HandleScope {
static Address current_limit_address(); static Address current_limit_address();
static Address current_level_address(); static Address current_level_address();
// Closes the HandleScope (invalidating all handles
// created in the scope of the HandleScope) and returns
// a Handle backed by the parent scope holding the
// value of the argument handle.
template <typename T>
Handle<T> CloseAndEscape(Handle<T> handle_value) {
T* value = *handle_value;
// Throw away all handles in the current scope.
CloseScope();
// Allocate one handle in the parent scope.
ASSERT(current_.level > 0);
Handle<T> result(CreateHandle<T>(value));
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
prev_next_ = current_.next;
prev_limit_ = current_.limit;
current_.level++;
return result;
}
private: private:
// Prevent heap allocation or illegal handle scopes. // Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&); HandleScope(const HandleScope&);
@ -155,9 +167,23 @@ class HandleScope {
void* operator new(size_t size); void* operator new(size_t size);
void operator delete(void* size_t); void operator delete(void* size_t);
inline void CloseScope() {
current_.next = prev_next_;
current_.level--;
if (current_.limit != prev_limit_) {
current_.limit = prev_limit_;
DeleteExtensions();
}
#ifdef DEBUG
ZapRange(prev_next_, prev_limit_);
#endif
}
static v8::ImplementationUtilities::HandleScopeData current_; static v8::ImplementationUtilities::HandleScopeData current_;
Object** const prev_next_; // Holds values on entry. The prev_next_ value is never NULL
Object** const prev_limit_; // on_entry, but is set to NULL when this scope is closed.
Object** prev_next_;
Object** prev_limit_;
// Extend the handle scope making room for more handles. // Extend the handle scope making room for more handles.
static internal::Object** Extend(); static internal::Object** Extend();
@ -197,12 +223,14 @@ Handle<String> FlattenGetString(Handle<String> str);
Handle<Object> SetProperty(Handle<JSObject> object, Handle<Object> SetProperty(Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
Handle<Object> SetProperty(Handle<Object> object, Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
Handle<Object> ForceSetProperty(Handle<JSObject> object, Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key, Handle<Object> key,
@ -233,7 +261,8 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
Handle<Object> SetElement(Handle<JSObject> object, Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index, uint32_t index,
@ -354,7 +383,9 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag); bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id); bool CompileOptimized(Handle<JSFunction> function,
int osr_ast_id,
ClearExceptionFlag flag);
class NoHandleAllocation BASE_EMBEDDED { class NoHandleAllocation BASE_EMBEDDED {
public: public:

45
deps/v8/src/heap-profiler.cc

@ -911,22 +911,27 @@ static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
class CountingRetainersIterator { class CountingRetainersIterator {
public: public:
CountingRetainersIterator(const JSObjectsCluster& child_cluster, CountingRetainersIterator(const JSObjectsCluster& child_cluster,
HeapEntriesAllocator* allocator,
HeapEntriesMap* map) HeapEntriesMap* map)
: child_(ClusterAsHeapObject(child_cluster)), map_(map) { : child_(ClusterAsHeapObject(child_cluster)),
allocator_(allocator),
map_(map) {
if (map_->Map(child_) == NULL) if (map_->Map(child_) == NULL)
map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder); map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
} }
void Call(const JSObjectsCluster& cluster, void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) { const NumberAndSizeInfo& number_and_size) {
if (map_->Map(ClusterAsHeapObject(cluster)) == NULL) if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
map_->Pair(ClusterAsHeapObject(cluster), map_->Pair(ClusterAsHeapObject(cluster),
allocator_,
HeapEntriesMap::kHeapEntryPlaceholder); HeapEntriesMap::kHeapEntryPlaceholder);
map_->CountReference(ClusterAsHeapObject(cluster), child_); map_->CountReference(ClusterAsHeapObject(cluster), child_);
} }
private: private:
HeapObject* child_; HeapObject* child_;
HeapEntriesAllocator* allocator_;
HeapEntriesMap* map_; HeapEntriesMap* map_;
}; };
@ -934,6 +939,7 @@ class CountingRetainersIterator {
class AllocatingRetainersIterator { class AllocatingRetainersIterator {
public: public:
AllocatingRetainersIterator(const JSObjectsCluster& child_cluster, AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
HeapEntriesAllocator*,
HeapEntriesMap* map) HeapEntriesMap* map)
: child_(ClusterAsHeapObject(child_cluster)), map_(map) { : child_(ClusterAsHeapObject(child_cluster)), map_(map) {
child_entry_ = map_->Map(child_); child_entry_ = map_->Map(child_);
@ -966,8 +972,9 @@ template<class RetainersIterator>
class AggregatingRetainerTreeIterator { class AggregatingRetainerTreeIterator {
public: public:
explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser, explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
HeapEntriesAllocator* allocator,
HeapEntriesMap* map) HeapEntriesMap* map)
: coarser_(coarser), map_(map) { : coarser_(coarser), allocator_(allocator), map_(map) {
} }
void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) { void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
@ -981,25 +988,28 @@ class AggregatingRetainerTreeIterator {
tree->ForEach(&retainers_aggregator); tree->ForEach(&retainers_aggregator);
tree_to_iterate = &dest_tree_; tree_to_iterate = &dest_tree_;
} }
RetainersIterator iterator(cluster, map_); RetainersIterator iterator(cluster, allocator_, map_);
tree_to_iterate->ForEach(&iterator); tree_to_iterate->ForEach(&iterator);
} }
private: private:
ClustersCoarser* coarser_; ClustersCoarser* coarser_;
HeapEntriesAllocator* allocator_;
HeapEntriesMap* map_; HeapEntriesMap* map_;
}; };
class AggregatedRetainerTreeAllocator { class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
public: public:
AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot, AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
int* root_child_index) int* root_child_index)
: snapshot_(snapshot), root_child_index_(root_child_index) { : snapshot_(snapshot), root_child_index_(root_child_index) {
} }
~AggregatedRetainerTreeAllocator() { }
HeapEntry* GetEntry( HeapEntry* AllocateEntry(
HeapObject* obj, int children_count, int retainers_count) { HeapThing ptr, int children_count, int retainers_count) {
HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
JSObjectsCluster cluster = HeapObjectAsCluster(obj); JSObjectsCluster cluster = HeapObjectAsCluster(obj);
const char* name = cluster.GetSpecialCaseName(); const char* name = cluster.GetSpecialCaseName();
if (name == NULL) { if (name == NULL) {
@ -1018,12 +1028,13 @@ class AggregatedRetainerTreeAllocator {
template<class Iterator> template<class Iterator>
void AggregatedHeapSnapshotGenerator::IterateRetainers( void AggregatedHeapSnapshotGenerator::IterateRetainers(
HeapEntriesMap* entries_map) { HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile(); RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1( AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
p->coarser(), entries_map); p->coarser(), allocator, entries_map);
p->retainers_tree()->ForEach(&agg_ret_iter_1); p->retainers_tree()->ForEach(&agg_ret_iter_1);
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map); AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
NULL, allocator, entries_map);
p->aggregator()->output_tree().ForEach(&agg_ret_iter_2); p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
} }
@ -1042,7 +1053,9 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter); agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
histogram_entities_count += counting_cons_iter.entities_count(); histogram_entities_count += counting_cons_iter.entities_count();
HeapEntriesMap entries_map; HeapEntriesMap entries_map;
IterateRetainers<CountingRetainersIterator>(&entries_map); int root_child_index = 0;
AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
histogram_entities_count += entries_map.entries_count(); histogram_entities_count += entries_map.entries_count();
histogram_children_count += entries_map.total_children_count(); histogram_children_count += entries_map.total_children_count();
histogram_retainers_count += entries_map.total_retainers_count(); histogram_retainers_count += entries_map.total_retainers_count();
@ -1056,10 +1069,7 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
snapshot->AllocateEntries(histogram_entities_count, snapshot->AllocateEntries(histogram_entities_count,
histogram_children_count, histogram_children_count,
histogram_retainers_count); histogram_retainers_count);
snapshot->AddEntry(HeapSnapshot::kInternalRootObject, snapshot->AddRootEntry(root_children_count);
root_children_count,
0);
int root_child_index = 0;
for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) { for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
if (agg_snapshot_->info()[i].bytes() > 0) { if (agg_snapshot_->info()[i].bytes() > 0) {
AddEntryFromAggregatedSnapshot(snapshot, AddEntryFromAggregatedSnapshot(snapshot,
@ -1075,11 +1085,10 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
AllocatingConstructorHeapProfileIterator alloc_cons_iter( AllocatingConstructorHeapProfileIterator alloc_cons_iter(
snapshot, &root_child_index); snapshot, &root_child_index);
agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter); agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index); entries_map.AllocateEntries();
entries_map.UpdateEntries(&allocator);
// Fill up references. // Fill up references.
IterateRetainers<AllocatingRetainersIterator>(&entries_map); IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
snapshot->SetDominatorsToSelf(); snapshot->SetDominatorsToSelf();
} }

4
deps/v8/src/heap-profiler.h

@ -340,6 +340,7 @@ class AggregatedHeapSnapshot {
class HeapEntriesMap; class HeapEntriesMap;
class HeapEntriesAllocator;
class HeapSnapshot; class HeapSnapshot;
class AggregatedHeapSnapshotGenerator { class AggregatedHeapSnapshotGenerator {
@ -354,7 +355,8 @@ class AggregatedHeapSnapshotGenerator {
void CalculateStringsStats(); void CalculateStringsStats();
void CollectStats(HeapObject* obj); void CollectStats(HeapObject* obj);
template<class Iterator> template<class Iterator>
void IterateRetainers(HeapEntriesMap* entries_map); void IterateRetainers(
HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
AggregatedHeapSnapshot* agg_snapshot_; AggregatedHeapSnapshot* agg_snapshot_;
}; };

15
deps/v8/src/heap.cc

@ -844,8 +844,6 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
ContextSlotCache::Clear(); ContextSlotCache::Clear();
DescriptorLookupCache::Clear(); DescriptorLookupCache::Clear();
RuntimeProfiler::MarkCompactPrologue(is_compacting);
CompilationCache::MarkCompactPrologue(); CompilationCache::MarkCompactPrologue();
CompletelyClearInstanceofCache(); CompletelyClearInstanceofCache();
@ -1056,20 +1054,13 @@ void Heap::Scavenge() {
// Scavenge object reachable from the global contexts list directly. // Scavenge object reachable from the global contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
// Scavenge objects reachable from the runtime-profiler sampler
// window directly.
Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
scavenge_visitor.VisitPointers(
sampler_window_address,
sampler_window_address + sampler_window_size);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front); new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable( UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry); &UpdateNewSpaceReferenceInExternalStringTableEntry);
LiveObjectList::UpdateReferencesForScavengeGC(); LiveObjectList::UpdateReferencesForScavengeGC();
RuntimeProfiler::UpdateSamplesAfterScavenge();
ASSERT(new_space_front == new_space_.top()); ASSERT(new_space_front == new_space_.top());
@ -5336,7 +5327,11 @@ void PathTracer::ProcessResults() {
for (int i = 0; i < object_stack_.length(); i++) { for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n"); if (i > 0) PrintF("\n |\n |\n V\n\n");
Object* obj = object_stack_[i]; Object* obj = object_stack_[i];
#ifdef OBJECT_PRINT
obj->Print(); obj->Print();
#else
obj->ShortPrint();
#endif
} }
PrintF("=====================================\n"); PrintF("=====================================\n");
} }

401
deps/v8/src/hydrogen.cc

@ -482,128 +482,75 @@ HConstant* HGraph::GetConstantFalse() {
} }
void HSubgraph::AppendJoin(HBasicBlock* first, HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
HBasicBlock* second, HBasicBlock* second,
int join_id) { int join_id) {
if (first == NULL) { if (first == NULL) {
exit_block_ = second; return second;
} else if (second == NULL) { } else if (second == NULL) {
exit_block_ = first; return first;
} else { } else {
HBasicBlock* join_block = graph_->CreateBasicBlock(); HBasicBlock* join_block = graph_->CreateBasicBlock();
first->Goto(join_block); first->Goto(join_block);
second->Goto(join_block); second->Goto(join_block);
join_block->SetJoinId(join_id); join_block->SetJoinId(join_id);
exit_block_ = join_block; return join_block;
} }
} }
void HSubgraph::ResolveContinue(IterationStatement* statement, HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block) { HBasicBlock* continue_block) {
if (continue_block != NULL) { if (continue_block != NULL) {
continue_block->SetJoinId(statement->ContinueId()); continue_block->SetJoinId(statement->ContinueId());
} }
exit_block_ = return CreateJoin(exit_block, continue_block, statement->ContinueId());
JoinBlocks(exit_block(), continue_block, statement->ContinueId());
}
HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
if (a == NULL) return b;
if (b == NULL) return a;
HBasicBlock* target = graph_->CreateBasicBlock();
a->Goto(target);
b->Goto(target);
target->SetJoinId(id);
return target;
} }
void HSubgraph::AppendEndless(IterationStatement* statement, HBasicBlock* HGraphBuilder::CreateEndless(IterationStatement* statement,
HBasicBlock* body_entry, HBasicBlock* body_entry,
HBasicBlock* body_exit, HBasicBlock* body_exit,
HBasicBlock* break_block) { HBasicBlock* break_block) {
if (exit_block() != NULL) { if (body_exit != NULL) body_exit->Goto(body_entry, true);
exit_block()->Goto(body_entry, false);
}
if (body_exit != NULL) {
body_exit->Goto(body_entry, true);
}
if (break_block != NULL) break_block->SetJoinId(statement->ExitId()); if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ = break_block;
body_entry->PostProcessLoopHeader(statement); body_entry->PostProcessLoopHeader(statement);
return break_block;
} }
void HSubgraph::AppendDoWhile(IterationStatement* statement, HBasicBlock* HGraphBuilder::CreateDoWhile(IterationStatement* statement,
HBasicBlock* body_entry, HBasicBlock* body_entry,
HBasicBlock* go_back, HBasicBlock* go_back,
HBasicBlock* exit_block, HBasicBlock* exit_block,
HBasicBlock* break_block) { HBasicBlock* break_block) {
if (this->exit_block() != NULL) { if (go_back != NULL) go_back->Goto(body_entry, true);
this->exit_block()->Goto(body_entry, false);
}
if (go_back != NULL) {
go_back->Goto(body_entry, true);
}
if (break_block != NULL) break_block->SetJoinId(statement->ExitId()); if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ = HBasicBlock* new_exit =
JoinBlocks(exit_block, break_block, statement->ExitId()); CreateJoin(exit_block, break_block, statement->ExitId());
body_entry->PostProcessLoopHeader(statement); body_entry->PostProcessLoopHeader(statement);
return new_exit;
} }
void HSubgraph::AppendWhile(IterationStatement* statement, HBasicBlock* HGraphBuilder::CreateWhile(IterationStatement* statement,
HBasicBlock* condition_entry,
HBasicBlock* exit_block,
HBasicBlock* body_exit,
HBasicBlock* break_block,
HBasicBlock* loop_entry, HBasicBlock* loop_entry,
HBasicBlock* loop_exit) { HBasicBlock* cond_false,
if (this->exit_block() != NULL) { HBasicBlock* body_exit,
this->exit_block()->Goto(condition_entry, false);
}
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ =
JoinBlocks(exit_block, break_block, statement->ExitId());
if (loop_entry != NULL) {
if (body_exit != NULL) {
body_exit->Goto(loop_entry, true);
}
loop_entry->SetJoinId(statement->EntryId());
exit_block_ = JoinBlocks(exit_block_, loop_exit, statement->ExitId());
} else {
if (body_exit != NULL) {
body_exit->Goto(condition_entry, true);
}
}
condition_entry->PostProcessLoopHeader(statement);
}
void HSubgraph::Append(BreakableStatement* stmt,
HBasicBlock* entry_block,
HBasicBlock* exit_block,
HBasicBlock* break_block) { HBasicBlock* break_block) {
exit_block_->Goto(entry_block); if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
exit_block_ = exit_block; HBasicBlock* new_exit =
CreateJoin(cond_false, break_block, statement->ExitId());
if (stmt != NULL) { if (body_exit != NULL) body_exit->Goto(loop_entry, true);
entry_block->SetJoinId(stmt->EntryId()); loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) break_block->SetJoinId(stmt->EntryId()); return new_exit;
exit_block_ = JoinBlocks(exit_block, break_block, stmt->ExitId());
}
} }
void HSubgraph::FinishExit(HControlInstruction* instruction) { void HBasicBlock::FinishExit(HControlInstruction* instruction) {
ASSERT(exit_block() != NULL); Finish(instruction);
exit_block_->Finish(instruction); ClearEnvironment();
exit_block_->ClearEnvironment();
exit_block_ = NULL;
} }
@ -2165,16 +2112,16 @@ HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
ZoneList<Statement*>* stmts = info->function()->body(); ZoneList<Statement*>* stmts = info->function()->body();
HSubgraph* body = CreateGotoSubgraph(environment()); HSubgraph* body = CreateGotoSubgraph(environment());
current_block()->Goto(body->entry_block());
AddToSubgraph(body, stmts); AddToSubgraph(body, stmts);
if (HasStackOverflow()) return NULL; if (HasStackOverflow()) return NULL;
current_subgraph_->Append(NULL,
body->entry_block(),
body->exit_block(),
NULL);
body->entry_block()->SetJoinId(info->function()->id()); body->entry_block()->SetJoinId(info->function()->id());
set_current_block(body->exit_block());
if (graph()->exit_block() != NULL) { if (graph()->exit_block() != NULL) {
graph_->FinishExit(new HReturn(graph_->GetConstantUndefined())); HReturn* instr = new HReturn(graph()->GetConstantUndefined());
graph()->exit_block()->FinishExit(instr);
graph()->set_exit_block(NULL);
} }
} }
@ -2361,28 +2308,29 @@ HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) {
} }
HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) { HBasicBlock* HGraphBuilder::CreateLoopHeader() {
HSubgraph* subgraph = new HSubgraph(graph()); HBasicBlock* header = graph()->CreateBasicBlock();
HBasicBlock* block = graph()->CreateBasicBlock(); HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
HEnvironment* new_env = env->CopyAsLoopHeader(block); header->SetInitialEnvironment(entry_env);
block->SetInitialEnvironment(new_env); header->AttachLoopInformation();
subgraph->Initialize(block); return header;
subgraph->entry_block()->AttachLoopInformation();
return subgraph;
} }
void HGraphBuilder::VisitBlock(Block* stmt) { void HGraphBuilder::VisitBlock(Block* stmt) {
if (stmt->labels() != NULL) { if (stmt->labels() != NULL) {
HSubgraph* block_graph = CreateGotoSubgraph(environment()); HSubgraph* block_graph = CreateGotoSubgraph(environment());
current_block()->Goto(block_graph->entry_block());
block_graph->entry_block()->SetJoinId(stmt->EntryId());
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this); { BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(block_graph, stmt->statements()); ADD_TO_SUBGRAPH(block_graph, stmt->statements());
} }
subgraph()->Append(stmt, HBasicBlock* break_block = break_info.break_block();
block_graph->entry_block(), if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
block_graph->exit_block(), set_current_block(CreateJoin(block_graph->exit_block(),
break_info.break_block()); break_block,
stmt->ExitId()));
} else { } else {
VisitStatements(stmt->statements()); VisitStatements(stmt->statements());
} }
@ -2418,9 +2366,9 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
else_graph->entry_block()->SetJoinId(stmt->ElseId()); else_graph->entry_block()->SetJoinId(stmt->ElseId());
ADD_TO_SUBGRAPH(else_graph, stmt->else_statement()); ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
current_subgraph_->AppendJoin(then_graph->exit_block(), set_current_block(CreateJoin(then_graph->exit_block(),
else_graph->exit_block(), else_graph->exit_block(),
stmt->id()); stmt->id()));
} }
} }
@ -2476,7 +2424,8 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// Not an inlined return, so an actual one. // Not an inlined return, so an actual one.
VISIT_FOR_VALUE(stmt->expression()); VISIT_FOR_VALUE(stmt->expression());
HValue* result = environment()->Pop(); HValue* result = environment()->Pop();
subgraph()->FinishExit(new HReturn(result)); current_block()->FinishExit(new HReturn(result));
set_current_block(NULL);
} else { } else {
// Return from an inlined function, visit the subexpression in the // Return from an inlined function, visit the subexpression in the
// expression context of the call. // expression context of the call.
@ -2685,145 +2634,116 @@ bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
} }
void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) { void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!graph()->HasOsrEntryAt(statement)) return; if (!graph()->HasOsrEntryAt(statement)) return;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock(); HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock(); HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue(); HValue* true_value = graph()->GetConstantTrue();
HTest* test = new HTest(true_value, non_osr_entry, osr_entry); HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
exit_block()->Finish(test); current_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock(); HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
non_osr_entry->Goto(loop_predecessor); non_osr_entry->Goto(loop_predecessor);
set_current_block(osr_entry);
int osr_entry_id = statement->OsrEntryId(); int osr_entry_id = statement->OsrEntryId();
// We want the correct environment at the OsrEntry instruction. Build // We want the correct environment at the OsrEntry instruction. Build
// it explicitly. The expression stack should be empty. // it explicitly. The expression stack should be empty.
int count = osr_entry->last_environment()->length(); int count = environment()->length();
ASSERT(count == (osr_entry->last_environment()->parameter_count() + ASSERT(count ==
osr_entry->last_environment()->local_count())); (environment()->parameter_count() + environment()->local_count()));
for (int i = 0; i < count; ++i) { for (int i = 0; i < count; ++i) {
HUnknownOSRValue* unknown = new HUnknownOSRValue; HUnknownOSRValue* unknown = new HUnknownOSRValue;
osr_entry->AddInstruction(unknown); AddInstruction(unknown);
osr_entry->last_environment()->Bind(i, unknown); environment()->Bind(i, unknown);
} }
osr_entry->AddSimulate(osr_entry_id); AddSimulate(osr_entry_id);
osr_entry->AddInstruction(new HOsrEntry(osr_entry_id)); AddInstruction(new HOsrEntry(osr_entry_id));
osr_entry->Goto(loop_predecessor); current_block()->Goto(loop_predecessor);
loop_predecessor->SetJoinId(statement->EntryId()); loop_predecessor->SetJoinId(statement->EntryId());
set_exit_block(loop_predecessor); set_current_block(loop_predecessor);
} }
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt); PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeader();
current_block()->Goto(loop_entry, false);
set_current_block(loop_entry);
HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this); { BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(body_graph, stmt->body()); Visit(stmt->body());
CHECK_BAILOUT;
} }
body_graph->ResolveContinue(stmt, break_info.continue_block()); HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (body_graph->exit_block() == NULL || stmt->cond()->ToBooleanIsTrue()) { HBasicBlock* loop_exit = NULL;
subgraph()->AppendEndless(stmt, if (body_exit == NULL || stmt->cond()->ToBooleanIsTrue()) {
body_graph->entry_block(), loop_exit = CreateEndless(stmt,
body_graph->exit_block(), loop_entry,
body_exit,
break_info.break_block()); break_info.break_block());
} else { } else {
HSubgraph* go_back = CreateEmptySubgraph(); set_current_block(body_exit);
HSubgraph* exit = CreateEmptySubgraph(); HBasicBlock* cond_true = graph()->CreateBasicBlock();
{ HBasicBlock* cond_false = graph()->CreateBasicBlock();
SubgraphScope scope(this, body_graph); VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
VISIT_FOR_CONTROL(stmt->cond(), cond_true->SetJoinId(stmt->BackEdgeId());
go_back->entry_block(), cond_false->SetJoinId(stmt->ExitId());
exit->entry_block()); loop_exit = CreateDoWhile(stmt,
go_back->entry_block()->SetJoinId(stmt->BackEdgeId()); loop_entry,
exit->entry_block()->SetJoinId(stmt->ExitId()); cond_true,
} cond_false,
subgraph()->AppendDoWhile(stmt,
body_graph->entry_block(),
go_back->exit_block(),
exit->exit_block(),
break_info.break_block()); break_info.break_block());
} }
set_current_block(loop_exit);
} }
void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt); PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeader();
HSubgraph* cond_graph = NULL; current_block()->Goto(loop_entry, false);
HSubgraph* body_graph = NULL; set_current_block(loop_entry);
HSubgraph* exit_graph = NULL;
// If the condition is constant true, do not generate a branch.
// If the condition is constant true, do not generate a condition subgraph. HBasicBlock* cond_false = NULL;
if (stmt->cond()->ToBooleanIsTrue()) { if (!stmt->cond()->ToBooleanIsTrue()) {
body_graph = CreateLoopHeaderSubgraph(environment()); HBasicBlock* cond_true = graph()->CreateBasicBlock();
} else { cond_false = graph()->CreateBasicBlock();
cond_graph = CreateLoopHeaderSubgraph(environment()); VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
body_graph = CreateEmptySubgraph(); cond_true->SetJoinId(stmt->BodyId());
exit_graph = CreateEmptySubgraph(); cond_false->SetJoinId(stmt->ExitId());
{ set_current_block(cond_true);
SubgraphScope scope(this, cond_graph);
VISIT_FOR_CONTROL(stmt->cond(),
body_graph->entry_block(),
exit_graph->entry_block());
body_graph->entry_block()->SetJoinId(stmt->BodyId());
exit_graph->entry_block()->SetJoinId(stmt->ExitId());
}
} }
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this); { BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(body_graph, stmt->body()); Visit(stmt->body());
CHECK_BAILOUT;
} }
body_graph->ResolveContinue(stmt, break_info.continue_block()); HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
if (cond_graph != NULL) { HBasicBlock* loop_exit = NULL;
AppendPeeledWhile(stmt, if (stmt->cond()->ToBooleanIsTrue()) {
cond_graph->entry_block(), // TODO(fschneider): Implement peeling for endless loops as well.
exit_graph->exit_block(), loop_exit = CreateEndless(stmt,
body_graph->exit_block(), loop_entry,
body_exit,
break_info.break_block()); break_info.break_block());
} else { } else {
// TODO(fschneider): Implement peeling for endless loops as well. loop_exit = CreateWhile(stmt,
subgraph()->AppendEndless(stmt, loop_entry,
body_graph->entry_block(), cond_false,
body_graph->exit_block(), body_exit,
break_info.break_block()); break_info.break_block());
} }
} set_current_block(loop_exit);
void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
HBasicBlock* condition_entry,
HBasicBlock* exit_block,
HBasicBlock* body_exit,
HBasicBlock* break_block) {
HBasicBlock* loop_entry = NULL;
HBasicBlock* loop_exit = NULL;
if (FLAG_use_peeling && body_exit != NULL && stmt != peeled_statement_) {
// Save the last peeled iteration statement to prevent infinite recursion.
IterationStatement* outer_peeled_statement = peeled_statement_;
peeled_statement_ = stmt;
HSubgraph* loop = CreateGotoSubgraph(body_exit->last_environment());
ADD_TO_SUBGRAPH(loop, stmt);
peeled_statement_ = outer_peeled_statement;
loop_entry = loop->entry_block();
loop_exit = loop->exit_block();
}
subgraph()->AppendWhile(stmt,
condition_entry,
exit_block,
body_exit,
break_block,
loop_entry,
loop_exit);
} }
@ -2834,57 +2754,50 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
CHECK_BAILOUT; CHECK_BAILOUT;
} }
ASSERT(current_block() != NULL); ASSERT(current_block() != NULL);
subgraph()->PreProcessOsrEntry(stmt); PreProcessOsrEntry(stmt);
HBasicBlock* loop_entry = CreateLoopHeader();
current_block()->Goto(loop_entry, false);
set_current_block(loop_entry);
HSubgraph* cond_graph = NULL; HBasicBlock* cond_false = NULL;
HSubgraph* body_graph = NULL;
HSubgraph* exit_graph = NULL;
if (stmt->cond() != NULL) { if (stmt->cond() != NULL) {
cond_graph = CreateLoopHeaderSubgraph(environment()); HBasicBlock* cond_true = graph()->CreateBasicBlock();
body_graph = CreateEmptySubgraph(); cond_false = graph()->CreateBasicBlock();
exit_graph = CreateEmptySubgraph(); VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
{ cond_true->SetJoinId(stmt->BodyId());
SubgraphScope scope(this, cond_graph); cond_false->SetJoinId(stmt->ExitId());
VISIT_FOR_CONTROL(stmt->cond(), set_current_block(cond_true);
body_graph->entry_block(),
exit_graph->entry_block());
body_graph->entry_block()->SetJoinId(stmt->BodyId());
exit_graph->entry_block()->SetJoinId(stmt->ExitId());
}
} else {
body_graph = CreateLoopHeaderSubgraph(environment());
} }
BreakAndContinueInfo break_info(stmt); BreakAndContinueInfo break_info(stmt);
{ BreakAndContinueScope push(&break_info, this); { BreakAndContinueScope push(&break_info, this);
ADD_TO_SUBGRAPH(body_graph, stmt->body()); Visit(stmt->body());
CHECK_BAILOUT;
} }
HBasicBlock* body_exit =
JoinContinue(stmt, current_block(), break_info.continue_block());
HSubgraph* next_graph = NULL; if (stmt->next() != NULL && body_exit != NULL) {
body_graph->ResolveContinue(stmt, break_info.continue_block()); set_current_block(body_exit);
Visit(stmt->next());
if (stmt->next() != NULL && body_graph->exit_block() != NULL) { CHECK_BAILOUT;
next_graph = body_exit = current_block();
CreateGotoSubgraph(body_graph->exit_block()->last_environment());
ADD_TO_SUBGRAPH(next_graph, stmt->next());
body_graph->Append(NULL,
next_graph->entry_block(),
next_graph->exit_block(),
NULL);
next_graph->entry_block()->SetJoinId(stmt->ContinueId());
} }
if (cond_graph != NULL) { HBasicBlock* loop_exit = NULL;
AppendPeeledWhile(stmt, if (stmt->cond() == NULL) {
cond_graph->entry_block(), loop_exit = CreateEndless(stmt,
exit_graph->exit_block(), loop_entry,
body_graph->exit_block(), body_exit,
break_info.break_block()); break_info.break_block());
} else { } else {
subgraph()->AppendEndless(stmt, loop_exit = CreateWhile(stmt,
body_graph->entry_block(), loop_entry,
body_graph->exit_block(), cond_false,
body_exit,
break_info.break_block()); break_info.break_block());
} }
set_current_block(loop_exit);
} }
@ -2937,9 +2850,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
else_graph->entry_block()->SetJoinId(expr->ElseId()); else_graph->entry_block()->SetJoinId(expr->ElseId());
ADD_TO_SUBGRAPH(else_graph, expr->else_expression()); ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
current_subgraph_->AppendJoin(then_graph->exit_block(), set_current_block(CreateJoin(then_graph->exit_block(),
else_graph->exit_block(), else_graph->exit_block(),
expr->id()); expr->id()));
ast_context()->ReturnValue(Pop()); ast_context()->ReturnValue(Pop());
} }
@ -3317,7 +3230,8 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment()); HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph); { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) { if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
default_graph->FinishExit(new HDeoptimize()); default_graph->exit_block()->FinishExit(new HDeoptimize());
default_graph->set_exit_block(NULL);
} else { } else {
HInstruction* instr = BuildStoreNamedGeneric(object, name, value); HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value); Push(value);
@ -3604,7 +3518,8 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
instr->set_position(expr->position()); instr->set_position(expr->position());
AddInstruction(instr); AddInstruction(instr);
AddSimulate(expr->id()); AddSimulate(expr->id());
current_subgraph_->FinishExit(new HAbnormalExit); current_block()->FinishExit(new HAbnormalExit);
set_current_block(NULL);
} }
@ -3652,7 +3567,8 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment()); HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph); { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) { if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
default_graph->FinishExit(new HDeoptimize()); default_graph->exit_block()->FinishExit(new HDeoptimize());
default_graph->set_exit_block(NULL);
} else { } else {
HInstruction* instr = BuildLoadNamedGeneric(object, expr); HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position()); instr->set_position(expr->position());
@ -3853,9 +3769,11 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* elements = AddInstruction(new HArgumentsElements); HInstruction* elements = AddInstruction(new HArgumentsElements);
result = new HArgumentsLength(elements); result = new HArgumentsLength(elements);
} else { } else {
Push(graph()->GetArgumentsObject());
VisitForValue(expr->key()); VisitForValue(expr->key());
if (HasStackOverflow()) return false; if (HasStackOverflow()) return false;
HValue* key = Pop(); HValue* key = Pop();
Drop(1); // Arguments object.
HInstruction* elements = AddInstruction(new HArgumentsElements); HInstruction* elements = AddInstruction(new HArgumentsElements);
HInstruction* length = AddInstruction(new HArgumentsLength(elements)); HInstruction* length = AddInstruction(new HArgumentsLength(elements));
AddInstruction(new HBoundsCheck(key, length)); AddInstruction(new HBoundsCheck(key, length));
@ -4010,7 +3928,8 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HSubgraph* default_graph = CreateBranchSubgraph(environment()); HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph); { SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) { if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
default_graph->FinishExit(new HDeoptimize()); default_graph->exit_block()->FinishExit(new HDeoptimize());
default_graph->set_exit_block(NULL);
} else { } else {
HContext* context = new HContext; HContext* context = new HContext;
AddInstruction(context); AddInstruction(context);
@ -4091,6 +4010,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
!Scope::Analyze(&inner_info)) { !Scope::Analyze(&inner_info)) {
if (Top::has_pending_exception()) { if (Top::has_pending_exception()) {
SetStackOverflow(); SetStackOverflow();
// Stop trying to optimize and inline this function.
target->shared()->set_optimization_disabled(true);
} }
return false; return false;
} }
@ -4730,9 +4651,9 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
false_graph->exit_block()->last_environment()->Push( false_graph->exit_block()->last_environment()->Push(
graph_->GetConstantFalse()); graph_->GetConstantFalse());
current_subgraph_->AppendJoin(true_graph->exit_block(), set_current_block(CreateJoin(true_graph->exit_block(),
false_graph->exit_block(), false_graph->exit_block(),
expr->id()); expr->id()));
ast_context()->ReturnValue(Pop()); ast_context()->ReturnValue(Pop());
} else { } else {
ASSERT(ast_context()->IsEffect()); ASSERT(ast_context()->IsEffect());

53
deps/v8/src/hydrogen.h

@ -117,6 +117,7 @@ class HBasicBlock: public ZoneObject {
void SetJoinId(int id); void SetJoinId(int id);
void Finish(HControlInstruction* last); void Finish(HControlInstruction* last);
void FinishExit(HControlInstruction* instruction);
void Goto(HBasicBlock* block, bool include_stack_check = false); void Goto(HBasicBlock* block, bool include_stack_check = false);
int PredecessorIndexOf(HBasicBlock* predecessor) const; int PredecessorIndexOf(HBasicBlock* predecessor) const;
@ -206,34 +207,6 @@ class HSubgraph: public ZoneObject {
exit_block_ = block; exit_block_ = block;
} }
void PreProcessOsrEntry(IterationStatement* statement);
void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id);
void AppendWhile(IterationStatement* statement,
HBasicBlock* condition_entry,
HBasicBlock* exit_block,
HBasicBlock* body_exit,
HBasicBlock* break_block,
HBasicBlock* loop_entry,
HBasicBlock* loop_exit);
void AppendDoWhile(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* go_back,
HBasicBlock* exit_block,
HBasicBlock* break_block);
void AppendEndless(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* body_exit,
HBasicBlock* break_block);
void Append(BreakableStatement* stmt,
HBasicBlock* entry_block,
HBasicBlock* exit_block,
HBasicBlock* break_block);
void ResolveContinue(IterationStatement* statement,
HBasicBlock* continue_block);
HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
void FinishExit(HControlInstruction* instruction);
void Initialize(HBasicBlock* block) { void Initialize(HBasicBlock* block) {
ASSERT(entry_block_ == NULL); ASSERT(entry_block_ == NULL);
entry_block_ = block; entry_block_ = block;
@ -698,11 +671,29 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason); void Bailout(const char* reason);
void AppendPeeledWhile(IterationStatement* stmt, void PreProcessOsrEntry(IterationStatement* statement);
HBasicBlock* condition_entry,
HBasicBlock* CreateJoin(HBasicBlock* first,
HBasicBlock* second,
int join_id);
HBasicBlock* CreateWhile(IterationStatement* statement,
HBasicBlock* loop_entry,
HBasicBlock* cond_false,
HBasicBlock* body_exit,
HBasicBlock* break_block);
HBasicBlock* CreateDoWhile(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* go_back,
HBasicBlock* exit_block, HBasicBlock* exit_block,
HBasicBlock* break_block);
HBasicBlock* CreateEndless(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* body_exit, HBasicBlock* body_exit,
HBasicBlock* break_block); HBasicBlock* break_block);
HBasicBlock* JoinContinue(IterationStatement* statement,
HBasicBlock* exit_block,
HBasicBlock* continue_block);
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts); void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt); void AddToSubgraph(HSubgraph* graph, Statement* stmt);
@ -748,7 +739,7 @@ class HGraphBuilder: public AstVisitor {
HSubgraph* CreateEmptySubgraph(); HSubgraph* CreateEmptySubgraph();
HSubgraph* CreateGotoSubgraph(HEnvironment* env); HSubgraph* CreateGotoSubgraph(HEnvironment* env);
HSubgraph* CreateBranchSubgraph(HEnvironment* env); HSubgraph* CreateBranchSubgraph(HEnvironment* env);
HSubgraph* CreateLoopHeaderSubgraph(HEnvironment* env); HBasicBlock* CreateLoopHeader();
HSubgraph* CreateInlinedSubgraph(HEnvironment* outer, HSubgraph* CreateInlinedSubgraph(HEnvironment* outer,
Handle<JSFunction> target, Handle<JSFunction> target,
FunctionLiteral* function); FunctionLiteral* function);

5
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3399,7 +3399,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ test(edx, Immediate(kSmiTagMask)); __ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &base_nonsmi); __ j(not_zero, &base_nonsmi);
// Optimized version when both exponent and base is a smi. // Optimized version when both exponent and base are smis.
Label powi; Label powi;
__ SmiUntag(edx); __ SmiUntag(edx);
__ cvtsi2sd(xmm0, Operand(edx)); __ cvtsi2sd(xmm0, Operand(edx));
@ -3438,7 +3438,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_carry, &no_multiply); __ j(not_carry, &no_multiply);
__ mulsd(xmm1, xmm0); __ mulsd(xmm1, xmm0);
__ bind(&no_multiply); __ bind(&no_multiply);
__ test(eax, Operand(eax));
__ mulsd(xmm0, xmm0); __ mulsd(xmm0, xmm0);
__ j(not_zero, &while_true); __ j(not_zero, &while_true);
@ -3525,7 +3524,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ AllocateHeapNumber(ecx, eax, edx, &call_runtime); __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1); __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
__ mov(eax, ecx); __ mov(eax, ecx);
__ ret(2); __ ret(2 * kPointerSize);
__ bind(&call_runtime); __ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);

39
deps/v8/src/ia32/codegen-ia32.cc

@ -3526,7 +3526,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(esi); // The context is the first argument. frame_->EmitPush(esi); // The context is the first argument.
frame_->EmitPush(Immediate(pairs)); frame_->EmitPush(Immediate(pairs));
frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0))); frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored. // Return value is ignored.
} }
@ -5259,7 +5260,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// by initialization. // by initialization.
value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else { } else {
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); frame_->Push(Smi::FromInt(strict_mode_flag()));
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
} }
// Storing a variable must keep the (new) value on the expression // Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling chained assignment // stack. This is necessary for compiling chained assignment
@ -5360,12 +5362,22 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
void CodeGenerator::VisitLiteral(Literal* node) { void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal"); Comment cmnt(masm_, "[ Literal");
if (frame_->ConstantPoolOverflowed()) {
Result temp = allocator_->Allocate();
ASSERT(temp.is_valid());
if (in_safe_int32_mode()) {
temp.set_untagged_int32(true);
}
__ Set(temp.reg(), Immediate(node->handle()));
frame_->Push(&temp);
} else {
if (in_safe_int32_mode()) { if (in_safe_int32_mode()) {
frame_->PushUntaggedElement(node->handle()); frame_->PushUntaggedElement(node->handle());
} else { } else {
frame_->Push(node->handle()); frame_->Push(node->handle());
} }
} }
}
void CodeGenerator::PushUnsafeSmi(Handle<Object> value) { void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
@ -5608,8 +5620,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->key()); Load(property->key());
Load(property->value()); Load(property->value());
if (property->emit_store()) { if (property->emit_store()) {
frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
// Ignore the result. // Ignore the result.
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3); Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
} else { } else {
frame_->Drop(3); frame_->Drop(3);
} }
@ -8300,6 +8313,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
switch (op) { switch (op) {
case Token::SUB: { case Token::SUB: {
__ neg(value.reg()); __ neg(value.reg());
frame_->Push(&value);
if (node->no_negative_zero()) { if (node->no_negative_zero()) {
// -MIN_INT is MIN_INT with the overflow flag set. // -MIN_INT is MIN_INT with the overflow flag set.
unsafe_bailout_->Branch(overflow); unsafe_bailout_->Branch(overflow);
@ -8312,17 +8326,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} }
case Token::BIT_NOT: { case Token::BIT_NOT: {
__ not_(value.reg()); __ not_(value.reg());
frame_->Push(&value);
break; break;
} }
case Token::ADD: { case Token::ADD: {
// Unary plus has no effect on int32 values. // Unary plus has no effect on int32 values.
frame_->Push(&value);
break; break;
} }
default: default:
UNREACHABLE(); UNREACHABLE();
break; break;
} }
frame_->Push(&value);
} else { } else {
Load(node->expression()); Load(node->expression());
bool can_overwrite = node->expression()->ResultOverwriteAllowed(); bool can_overwrite = node->expression()->ResultOverwriteAllowed();
@ -9458,11 +9473,13 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
DeferredReferenceSetKeyedValue(Register value, DeferredReferenceSetKeyedValue(Register value,
Register key, Register key,
Register receiver, Register receiver,
Register scratch) Register scratch,
StrictModeFlag strict_mode)
: value_(value), : value_(value),
key_(key), key_(key),
receiver_(receiver), receiver_(receiver),
scratch_(scratch) { scratch_(scratch),
strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue"); set_comment("[ DeferredReferenceSetKeyedValue");
} }
@ -9476,6 +9493,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register receiver_; Register receiver_;
Register scratch_; Register scratch_;
Label patch_site_; Label patch_site_;
StrictModeFlag strict_mode_;
}; };
@ -9534,7 +9552,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
} }
// Call the IC stub. // Call the IC stub.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instruction to the // The delta from the start of the map-compare instruction to the
// test instruction. We use masm_-> directly here instead of the // test instruction. We use masm_-> directly here instead of the
@ -9896,7 +9916,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
new DeferredReferenceSetKeyedValue(result.reg(), new DeferredReferenceSetKeyedValue(result.reg(),
key.reg(), key.reg(),
receiver.reg(), receiver.reg(),
tmp.reg()); tmp.reg(),
strict_mode_flag());
// Check that the receiver is not a smi. // Check that the receiver is not a smi.
__ test(receiver.reg(), Immediate(kSmiTagMask)); __ test(receiver.reg(), Immediate(kSmiTagMask));
@ -9951,7 +9972,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
deferred->BindExit(); deferred->BindExit();
} else { } else {
result = frame()->CallKeyedStoreIC(); result = frame()->CallKeyedStoreIC(strict_mode_flag());
// Make sure that we do not have a test instruction after the // Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to // call. A test instruction after the call is used to
// indicate that we have generated an inline version of the // indicate that we have generated an inline version of the

378
deps/v8/src/ia32/full-codegen-ia32.cc

@ -322,23 +322,6 @@ void FullCodeGenerator::EmitReturnSequence() {
} }
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
Token::Value op, Expression* left, Expression* right) {
ASSERT(ShouldInlineSmiCase(op));
if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
// We never generate inlined constant smi operations for these.
return kNoConstants;
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
// Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
}
}
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
} }
@ -548,7 +531,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
__ j(equal, if_true); __ j(equal, if_true);
__ cmp(result_register(), Factory::false_value()); __ cmp(result_register(), Factory::false_value());
__ j(equal, if_false); __ j(equal, if_false);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ test(result_register(), Operand(result_register())); __ test(result_register(), Operand(result_register()));
__ j(zero, if_false); __ j(zero, if_false);
__ test(result_register(), Immediate(kSmiTagMask)); __ test(result_register(), Immediate(kSmiTagMask));
@ -655,6 +638,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
ASSERT(variable != NULL); // Must have been resolved. ASSERT(variable != NULL); // Must have been resolved.
Slot* slot = variable->AsSlot(); Slot* slot = variable->AsSlot();
Property* prop = variable->AsProperty(); Property* prop = variable->AsProperty();
if (slot != NULL) { if (slot != NULL) {
switch (slot->type()) { switch (slot->type()) {
case Slot::PARAMETER: case Slot::PARAMETER:
@ -740,7 +724,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
prop->key()->AsLiteral()->handle()->IsSmi()); prop->key()->AsLiteral()->handle()->IsSmi());
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle())); __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(is_strict()
? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
} }
} }
@ -757,7 +743,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ push(esi); // The context is the first argument. __ push(esi); // The context is the first argument.
__ push(Immediate(pairs)); __ push(Immediate(pairs));
__ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0))); __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
__ CallRuntime(Runtime::kDeclareGlobals, 3); __ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored. // Return value is ignored.
} }
@ -814,7 +801,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
SetSourcePosition(clause->position()); SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site); EmitCallIC(ic, &patch_site);
__ test(eax, Operand(eax)); __ test(eax, Operand(eax));
__ j(not_equal, &next_test); __ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed. __ Drop(1); // Switch value is no longer needed.
@ -895,7 +881,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, Factory::empty_descriptor_array()); __ cmp(edx, Factory::empty_descriptor_array());
__ j(equal, &call_runtime); __ j(equal, &call_runtime);
// Check that there in an enum cache in the non-empty instance // Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration // descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi. // index field does not contain a smi.
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset)); __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
@ -1380,7 +1366,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value); VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle())); __ mov(ecx, Immediate(key->handle()));
__ mov(edx, Operand(esp, 0)); __ mov(edx, Operand(esp, 0));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(key->id(), NO_REGISTERS); PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else { } else {
@ -1394,7 +1382,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key); VisitForStackValue(key);
VisitForStackValue(value); VisitForStackValue(value);
if (property->emit_store()) { if (property->emit_store()) {
__ CallRuntime(Runtime::kSetProperty, 3); __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
__ CallRuntime(Runtime::kSetProperty, 4);
} else { } else {
__ Drop(3); __ Drop(3);
} }
@ -1572,14 +1561,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} }
Token::Value op = expr->binary_op(); Token::Value op = expr->binary_op();
ConstantOperand constant = ShouldInlineSmiCase(op)
? GetConstantOperand(op, expr->target(), expr->value())
: kNoConstants;
ASSERT(constant == kRightConstant || constant == kNoConstants);
if (constant == kNoConstants) {
__ push(eax); // Left operand goes on the stack. __ push(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value()); VisitForAccumulatorValue(expr->value());
}
OverwriteMode mode = expr->value()->ResultOverwriteAllowed() OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT ? OVERWRITE_RIGHT
@ -1591,8 +1574,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
op, op,
mode, mode,
expr->target(), expr->target(),
expr->value(), expr->value());
constant);
} else { } else {
EmitBinaryOp(op, mode); EmitBinaryOp(op, mode);
} }
@ -1640,220 +1622,11 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
} }
void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically add smi value with unknown object. If result overflows or is
// not a smi then we had either a smi overflow or added a smi with a tagged
// pointer.
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &done);
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
__ sub(Operand(eax), Immediate(value));
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
if (left_is_constant_smi) {
__ mov(edx, Immediate(value));
} else {
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically subtract smi value with unknown object. If result overflows
// or is not a smi then we had either a smi overflow or added a smi with a
// tagged pointer.
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
__ sub(Operand(eax), ecx);
} else {
__ sub(Operand(eax), Immediate(value));
}
__ j(overflow, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &done);
__ bind(&call_stub);
if (left_is_constant_smi) {
__ mov(edx, Immediate(value));
__ mov(eax, ecx);
} else {
__ add(Operand(eax), Immediate(value)); // Undo the subtraction.
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
NearLabel call_stub, smi_case, done;
int shift_value = value->value() & 0x1f;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
// Call stub.
__ bind(&call_stub);
__ mov(edx, eax);
__ mov(eax, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::SHL:
if (shift_value != 0) {
__ mov(edx, eax);
if (shift_value > 1) {
__ shl(edx, shift_value - 1);
}
// Convert int result to smi, checking that it is in int range.
STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ add(edx, Operand(edx));
__ j(overflow, &call_stub);
__ mov(eax, edx); // Put result back into eax.
}
break;
case Token::SAR:
if (shift_value != 0) {
__ sar(eax, shift_value);
__ and_(eax, ~kSmiTagMask);
}
break;
case Token::SHR:
// SHR must return a positive value. When shifting by 0 or 1 we need to
// check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(edx, eax);
__ SmiUntag(edx);
__ shr(edx, shift_value);
__ test(edx, Immediate(0xc0000000));
__ j(not_zero, &call_stub);
__ SmiTag(edx);
__ mov(eax, edx); // Put result back into eax.
} else {
__ SmiUntag(eax);
__ shr(eax, shift_value);
__ SmiTag(eax);
}
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
NearLabel smi_case, done;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
// The order of the arguments does not matter for bit-ops with a
// constant operand.
__ mov(edx, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::BIT_OR:
__ or_(Operand(eax), Immediate(value));
break;
case Token::BIT_XOR:
__ xor_(Operand(eax), Immediate(value));
break;
case Token::BIT_AND:
__ and_(Operand(eax), Immediate(value));
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(eax);
}
void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
switch (op) {
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
EmitConstantSmiBitOp(expr, op, mode, value);
break;
case Token::SHL:
case Token::SAR:
case Token::SHR:
ASSERT(!left_is_constant_smi);
EmitConstantSmiShiftOp(expr, op, mode, value);
break;
case Token::ADD:
EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
break;
case Token::SUB:
EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
break;
default:
UNREACHABLE();
}
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op, Token::Value op,
OverwriteMode mode, OverwriteMode mode,
Expression* left, Expression* left,
Expression* right, Expression* right) {
ConstantOperand constant) {
if (constant == kRightConstant) {
Smi* value = Smi::cast(*right->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, false, value);
return;
} else if (constant == kLeftConstant) {
Smi* value = Smi::cast(*left->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, true, value);
return;
}
// Do combined smi check of the operands. Left operand is on the // Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax. // stack. Right operand is in eax.
NearLabel done, smi_case, stub_call; NearLabel done, smi_case, stub_call;
@ -1985,7 +1758,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ mov(edx, eax); __ mov(edx, eax);
__ pop(eax); // Restore value. __ pop(eax); // Restore value.
__ mov(ecx, prop->key()->AsLiteral()->handle()); __ mov(ecx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
break; break;
} }
@ -2006,7 +1781,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ pop(edx); __ pop(edx);
} }
__ pop(eax); // Restore value. __ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
break; break;
} }
@ -2101,7 +1878,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ push(eax); // Value. __ push(eax); // Value.
__ push(esi); // Context. __ push(esi); // Context.
__ push(Immediate(var->name())); __ push(Immediate(var->name()));
__ CallRuntime(Runtime::kStoreContextSlot, 3); __ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ CallRuntime(Runtime::kStoreContextSlot, 4);
break; break;
} }
} }
@ -2132,7 +1910,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else { } else {
__ pop(edx); __ pop(edx);
} }
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
@ -2170,7 +1950,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
} }
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
@ -2283,6 +2065,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
} }
void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(esp, arg_count * kPointerSize));
} else {
__ push(Immediate(Factory::undefined_value()));
}
// Push the receiver of the enclosing function.
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
? Runtime::kResolvePossiblyDirectEvalNoLookup
: Runtime::kResolvePossiblyDirectEval, 4);
}
void FullCodeGenerator::VisitCall(Call* expr) { void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG #ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths // We want to verify that RecordJSReturnSite gets called on all paths
@ -2311,21 +2114,30 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i)); VisitForStackValue(args->at(i));
} }
// Push copy of the function - found below the arguments. // If we know that eval can only be shadowed by eval-introduced
__ push(Operand(esp, (arg_count + 1) * kPointerSize)); // variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
// Push copy of the first argument or undefined if it doesn't exist. // context lookup in the runtime system.
if (arg_count > 0) { Label done;
__ push(Operand(esp, arg_count * kPointerSize)); if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
} else { Label slow;
__ push(Immediate(Factory::undefined_value())); EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow);
// Push the function and resolve eval.
__ push(eax);
EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
__ jmp(&done);
__ bind(&slow);
} }
// Push the receiver of the enclosing function and do runtime call. // Push copy of the function (found below the arguments) and
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize)); // resolve eval.
// Push the strict mode flag. __ push(Operand(esp, (arg_count + 1) * kPointerSize));
__ push(Immediate(Smi::FromInt(strict_mode_flag()))); EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); if (done.is_linked()) {
__ bind(&done);
}
// The runtime call returns a pair of values in eax (function) and // The runtime call returns a pair of values in eax (function) and
// edx (receiver). Touch up the stack with the right values. // edx (receiver). Touch up the stack with the right values.
@ -2390,7 +2202,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
if (key != NULL && key->handle()->IsSymbol()) { if (key != NULL && key->handle()->IsSymbol()) {
// Call to a named property, use call IC. // Call to a named property, use call IC.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
}
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET); EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
} else { } else {
// Call to a keyed property. // Call to a keyed property.
@ -3401,7 +3215,6 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -3417,7 +3230,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Label bailout, done, one_char_separator, long_separator, Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop, loop_condition, non_trivial_array, not_size_one_array, loop,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry; loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
@ -3459,7 +3272,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// If the array has length zero, return the empty string. // If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset)); __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ sar(array_length, 1); __ SmiUntag(array_length);
__ j(not_zero, &non_trivial_array); __ j(not_zero, &non_trivial_array);
__ mov(result_operand, Factory::empty_string()); __ mov(result_operand, Factory::empty_string());
__ jmp(&done); __ jmp(&done);
@ -3482,12 +3295,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
// Loop condition: while (index < length). // Loop condition: while (index < length).
// Live loop registers: index, array_length, string, // Live loop registers: index, array_length, string,
// scratch, string_length, elements. // scratch, string_length, elements.
__ jmp(&loop_condition); if (FLAG_debug_code) {
__ bind(&loop);
__ cmp(index, Operand(array_length)); __ cmp(index, Operand(array_length));
__ j(greater_equal, &done); __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ mov(string, FieldOperand(elements, index, __ bind(&loop);
__ mov(string, FieldOperand(elements,
index,
times_pointer_size, times_pointer_size,
FixedArray::kHeaderSize)); FixedArray::kHeaderSize));
__ test(string, Immediate(kSmiTagMask)); __ test(string, Immediate(kSmiTagMask));
@ -3502,7 +3316,6 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
FieldOperand(string, SeqAsciiString::kLengthOffset)); FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout); __ j(overflow, &bailout);
__ add(Operand(index), Immediate(1)); __ add(Operand(index), Immediate(1));
__ bind(&loop_condition);
__ cmp(index, Operand(array_length)); __ cmp(index, Operand(array_length));
__ j(less, &loop); __ j(less, &loop);
@ -3531,7 +3344,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate( __ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag); __ cmp(scratch, ASCII_STRING_TYPE);
__ j(not_equal, &bailout); __ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length // Add (separator length times array_length) - separator length
@ -3791,6 +3604,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label* if_true = NULL; Label* if_true = NULL;
Label* if_false = NULL; Label* if_false = NULL;
Label* fall_through = NULL; Label* fall_through = NULL;
// Notice that the labels are swapped. // Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false, context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through); &if_false, &if_true, &fall_through);
@ -4023,7 +3837,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ mov(ecx, prop->key()->AsLiteral()->handle()); __ mov(ecx, prop->key()->AsLiteral()->handle());
__ pop(edx); __ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4038,7 +3854,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
__ pop(ecx); __ pop(ecx);
__ pop(edx); __ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4386,6 +4204,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(&Counters::named_load_full, 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(&Counters::keyed_load_full, 1);
break;
case Code::STORE_IC:
__ IncrementCounter(&Counters::named_store_full, 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(&Counters::keyed_store_full, 1);
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) { if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo(); patch_site->EmitPatchInfo();

27
deps/v8/src/ia32/ic-ia32.cc

@ -761,7 +761,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : key // -- ecx : key
@ -801,7 +802,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Slow case: call runtime. // Slow case: call runtime.
__ bind(&slow); __ bind(&slow);
GenerateRuntimeSetProperty(masm); GenerateRuntimeSetProperty(masm, strict_mode);
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
@ -1488,7 +1489,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm, void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : name // -- ecx : name
@ -1499,7 +1500,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP, NOT_IN_LOOP,
MONOMORPHIC, MONOMORPHIC,
extra_ic_state); strict_mode);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg); StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
@ -1617,7 +1618,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
} }
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : name // -- ecx : name
@ -1628,14 +1630,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
__ push(edx); __ push(edx);
__ push(ecx); __ push(ecx);
__ push(eax); __ push(eax);
__ push(ebx); __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
__ push(Immediate(Smi::FromInt(strict_mode)));
__ push(ebx); // return address
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : value // -- eax : value
// -- ecx : key // -- ecx : key
@ -1647,10 +1652,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
__ push(edx); __ push(edx);
__ push(ecx); __ push(ecx);
__ push(eax); __ push(eax);
__ push(ebx); __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
__ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
__ push(ebx); // return address
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }

20
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -127,7 +127,7 @@ bool LCodeGen::GenerateRelocPadding() {
int reloc_size = masm()->relocation_writer_size(); int reloc_size = masm()->relocation_writer_size();
while (reloc_size < deoptimization_reloc_size.min_size) { while (reloc_size < deoptimization_reloc_size.min_size) {
__ RecordComment(RelocInfo::kFillerCommentString, true); __ RecordComment(RelocInfo::kFillerCommentString, true);
reloc_size += RelocInfo::kRelocCommentSize; reloc_size += RelocInfo::kMinRelocCommentSize;
} }
return !is_aborted(); return !is_aborted();
} }
@ -588,7 +588,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data = Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED); Factory::NewDeoptimizationInputData(length, TENURED);
data->SetTranslationByteArray(*translations_.CreateByteArray()); Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals = Handle<FixedArray> literals =
@ -1912,12 +1913,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta); __ bind(&before_push_delta);
__ mov(temp, Immediate(delta)); __ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp); __ StoreToSafepointRegisterSlot(temp, temp);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->SizeOfCodeGeneratedSince(&before_push_delta));
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the eax slot and restore all registers. // Put the result value into the eax slot and restore all registers.
__ StoreToSafepointRegisterSlot(eax, eax); __ StoreToSafepointRegisterSlot(eax, eax);
__ PopSafepointRegisters(); __ PopSafepointRegisters();
@ -2786,7 +2782,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(eax)); ASSERT(ToRegister(instr->value()).is(eax));
__ mov(ecx, instr->name()); __ mov(ecx, instr->name());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -2854,7 +2852,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(ecx)); ASSERT(ToRegister(instr->key()).is(ecx));
ASSERT(ToRegister(instr->value()).is(eax)); ASSERT(ToRegister(instr->value()).is(eax));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }

11
deps/v8/src/ia32/lithium-ia32.cc

@ -870,11 +870,19 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
ASSERT(instr->representation().IsDouble()); ASSERT(instr->representation().IsDouble());
ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble());
if (op == Token::MOD) {
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = UseFixedDouble(instr->right(), xmm1);
LArithmeticD* result = new LArithmeticD(op, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* left = UseRegisterAtStart(instr->left()); LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right()); LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right); LArithmeticD* result = new LArithmeticD(op, left, right);
return DefineSameAsFirst(result); return DefineSameAsFirst(result);
} }
}
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
@ -1165,8 +1173,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
new LInstanceOfKnownGlobal( new LInstanceOfKnownGlobal(
UseFixed(instr->value(), InstanceofStub::left()), UseFixed(instr->value(), InstanceofStub::left()),
FixedTemp(edi)); FixedTemp(edi));
MarkAsSaveDoubles(result); return MarkAsCall(DefineFixed(result, eax), instr);
return AssignEnvironment(AssignPointerMap(DefineFixed(result, eax)));
} }

10
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2552,12 +2552,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
__ push(edx); // receiver __ push(edx); // receiver
__ push(ecx); // name __ push(ecx); // name
__ push(eax); // value __ push(eax); // value
__ push(Immediate(Smi::FromInt(strict_mode_)));
__ push(ebx); // restore return address __ push(ebx); // restore return address
// Do tail-call to the runtime system. // Do tail-call to the runtime system.
ExternalReference store_ic_property = ExternalReference store_ic_property =
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
__ TailCallExternalReference(store_ic_property, 3, 1); __ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss. // Handle store cache miss.
__ bind(&miss); __ bind(&miss);
@ -3712,10 +3713,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
__ push(edx); __ push(edx);
__ push(ecx); __ push(ecx);
__ push(eax); __ push(eax);
__ push(ebx); __ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
__ push(Immediate(Smi::FromInt(
Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
__ push(ebx); // return address
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
return GetCode(flags); return GetCode(flags);
} }

25
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1038,8 +1038,8 @@ Result VirtualFrame::CallStoreIC(Handle<String> name,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode) {
// Value and (if not contextual) receiver are on top of the frame. // Value and (if not contextual) receiver are on top of the frame.
// The IC expects name in ecx, value in eax, and receiver in edx. // The IC expects name in ecx, value in eax, and receiver in edx.
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode Handle<Code> ic(Builtins::builtin(
? Builtins::StoreIC_Initialize_Strict (strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize)); : Builtins::StoreIC_Initialize));
Result value = Pop(); Result value = Pop();
@ -1061,7 +1061,7 @@ Result VirtualFrame::CallStoreIC(Handle<String> name,
} }
Result VirtualFrame::CallKeyedStoreIC() { Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
// Value, key, and receiver are on the top of the frame. The IC // Value, key, and receiver are on the top of the frame. The IC
// expects value in eax, key in ecx, and receiver in edx. // expects value in eax, key in ecx, and receiver in edx.
Result value = Pop(); Result value = Pop();
@ -1105,7 +1105,9 @@ Result VirtualFrame::CallKeyedStoreIC() {
receiver.Unuse(); receiver.Unuse();
} }
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET); return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
} }
@ -1306,6 +1308,7 @@ void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
void VirtualFrame::PushUntaggedElement(Handle<Object> value) { void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
ASSERT(!ConstantPoolOverflowed());
elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED)); elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
elements_[element_count() - 1].set_untagged_int32(true); elements_[element_count() - 1].set_untagged_int32(true);
} }
@ -1336,6 +1339,20 @@ void VirtualFrame::Push(Expression* expr) {
} }
void VirtualFrame::Push(Handle<Object> value) {
if (ConstantPoolOverflowed()) {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ Set(temp.reg(), Immediate(value));
Push(&temp);
} else {
FrameElement element =
FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
elements_.Add(element);
}
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

6
deps/v8/src/ia32/virtual-frame-ia32.h

@ -370,7 +370,7 @@ class VirtualFrame: public ZoneObject {
// Call keyed store IC. Value, key, and receiver are found on top // Call keyed store IC. Value, key, and receiver are found on top
// of the frame. All three are dropped. // of the frame. All three are dropped.
Result CallKeyedStoreIC(); Result CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call call IC. Function name, arguments, and receiver are found on top // Call call IC. Function name, arguments, and receiver are found on top
// of the frame and dropped by the call. The argument count does not // of the frame and dropped by the call. The argument count does not
@ -419,9 +419,11 @@ class VirtualFrame: public ZoneObject {
void EmitPush(Immediate immediate, void EmitPush(Immediate immediate,
TypeInfo info = TypeInfo::Unknown()); TypeInfo info = TypeInfo::Unknown());
inline bool ConstantPoolOverflowed();
// Push an element on the virtual frame. // Push an element on the virtual frame.
void Push(Handle<Object> value);
inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown()); inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
inline void Push(Handle<Object> value);
inline void Push(Smi* value); inline void Push(Smi* value);
void PushUntaggedElement(Handle<Object> value); void PushUntaggedElement(Handle<Object> value);

9
deps/v8/src/ic-inl.h

@ -76,6 +76,15 @@ Code* IC::GetTargetAtAddress(Address address) {
void IC::SetTargetAtAddress(Address address, Code* target) { void IC::SetTargetAtAddress(Address address, Code* target) {
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub()); ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
#ifdef DEBUG
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
// ICs as strict mode. The strict-ness of the IC must be preserved.
Code* old_target = GetTargetAtAddress(address);
if (old_target->kind() == Code::STORE_IC ||
old_target->kind() == Code::KEYED_STORE_IC) {
ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
}
#endif
Assembler::set_target_address_at(address, target->instruction_start()); Assembler::set_target_address_at(address, target->instruction_start());
} }

101
deps/v8/src/ic.cc

@ -343,7 +343,7 @@ void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return; if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address); ClearInlinedVersion(address);
SetTargetAtAddress(address, SetTargetAtAddress(address,
target->extra_ic_state() == kStoreICStrict (target->extra_ic_state() == kStrictMode)
? initialize_stub_strict() ? initialize_stub_strict()
: initialize_stub()); : initialize_stub());
} }
@ -366,7 +366,10 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
void KeyedStoreIC::Clear(Address address, Code* target) { void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return; if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address, initialize_stub()); SetTargetAtAddress(address,
(target->extra_ic_state() == kStrictMode)
? initialize_stub_strict()
: initialize_stub());
} }
@ -804,6 +807,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
HandleScope scope; HandleScope scope;
Handle<Object> result = GetProperty(object, key); Handle<Object> result = GetProperty(object, key);
RETURN_IF_EMPTY_HANDLE(result);
// Make receiver an object if the callee requires it. Strict mode or builtin // Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects // functions do not wrap the receiver, non-strict functions and objects
@ -1226,7 +1230,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
MaybeObject* probe = MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
false); false,
kNonStrictMode);
stub = probe->IsFailure() ? stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked()); NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasIndexedInterceptor()) { } else if (receiver->HasIndexedInterceptor()) {
@ -1382,7 +1387,7 @@ static bool LookupForWrite(JSObject* object,
MaybeObject* StoreIC::Store(State state, MaybeObject* StoreIC::Store(State state,
Code::ExtraICState extra_ic_state, StrictModeFlag strict_mode,
Handle<Object> object, Handle<Object> object,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
@ -1412,11 +1417,11 @@ MaybeObject* StoreIC::Store(State state,
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n"); if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
#endif #endif
Builtins::Name target = (extra_ic_state == kStoreICStrict) Builtins::Name target = (strict_mode == kStrictMode)
? Builtins::StoreIC_ArrayLength_Strict ? Builtins::StoreIC_ArrayLength_Strict
: Builtins::StoreIC_ArrayLength; : Builtins::StoreIC_ArrayLength;
set_target(Builtins::builtin(target)); set_target(Builtins::builtin(target));
return receiver->SetProperty(*name, *value, NONE); return receiver->SetProperty(*name, *value, NONE, strict_mode);
} }
// Lookup the property locally in the receiver. // Lookup the property locally in the receiver.
@ -1440,13 +1445,15 @@ MaybeObject* StoreIC::Store(State state,
// Index is an offset from the end of the object. // Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize); int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedStore(address(), map, offset)) { if (PatchInlinedStore(address(), map, offset)) {
set_target(megamorphic_stub()); set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_ic) { if (FLAG_trace_ic) {
PrintF("[StoreIC : inline patch %s]\n", *name->ToCString()); PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
} }
#endif #endif
return receiver->SetProperty(*name, *value, NONE); return receiver->SetProperty(*name, *value, NONE, strict_mode);
#ifdef DEBUG #ifdef DEBUG
} else { } else {
@ -1473,19 +1480,24 @@ MaybeObject* StoreIC::Store(State state,
// If no inlined store ic was patched, generate a stub for this // If no inlined store ic was patched, generate a stub for this
// store. // store.
UpdateCaches(&lookup, state, extra_ic_state, receiver, name, value); UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} else { } else {
// Strict mode doesn't allow setting non-existent global property. // Strict mode doesn't allow setting non-existent global property
if (extra_ic_state == kStoreICStrict && IsContextual(object)) { // or an assignment to a read only property.
if (strict_mode == kStrictMode) {
if (lookup.IsFound() && lookup.IsReadOnly()) {
return TypeError("strict_read_only_property", object, name);
} else if (IsContextual(object)) {
return ReferenceError("not_defined", name); return ReferenceError("not_defined", name);
} }
} }
} }
}
if (receiver->IsJSGlobalProxy()) { if (receiver->IsJSGlobalProxy()) {
// Generate a generic stub that goes to the runtime when we see a global // Generate a generic stub that goes to the runtime when we see a global
// proxy as receiver. // proxy as receiver.
Code* stub = (extra_ic_state == kStoreICStrict) Code* stub = (strict_mode == kStrictMode)
? global_proxy_stub_strict() ? global_proxy_stub_strict()
: global_proxy_stub(); : global_proxy_stub();
if (target() != stub) { if (target() != stub) {
@ -1497,13 +1509,13 @@ MaybeObject* StoreIC::Store(State state,
} }
// Set the property. // Set the property.
return receiver->SetProperty(*name, *value, NONE); return receiver->SetProperty(*name, *value, NONE, strict_mode);
} }
void StoreIC::UpdateCaches(LookupResult* lookup, void StoreIC::UpdateCaches(LookupResult* lookup,
State state, State state,
Code::ExtraICState extra_ic_state, StrictModeFlag strict_mode,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
@ -1525,7 +1537,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
switch (type) { switch (type) {
case FIELD: { case FIELD: {
maybe_code = StubCache::ComputeStoreField( maybe_code = StubCache::ComputeStoreField(
*name, *receiver, lookup->GetFieldIndex(), NULL, extra_ic_state); *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break; break;
} }
case MAP_TRANSITION: { case MAP_TRANSITION: {
@ -1535,7 +1547,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Map> transition(lookup->GetTransitionMap()); Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name); int index = transition->PropertyIndexFor(*name);
maybe_code = StubCache::ComputeStoreField( maybe_code = StubCache::ComputeStoreField(
*name, *receiver, index, *transition, extra_ic_state); *name, *receiver, index, *transition, strict_mode);
break; break;
} }
case NORMAL: { case NORMAL: {
@ -1547,10 +1559,10 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
JSGlobalPropertyCell* cell = JSGlobalPropertyCell* cell =
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup)); JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
maybe_code = StubCache::ComputeStoreGlobal( maybe_code = StubCache::ComputeStoreGlobal(
*name, *global, cell, extra_ic_state); *name, *global, cell, strict_mode);
} else { } else {
if (lookup->holder() != *receiver) return; if (lookup->holder() != *receiver) return;
maybe_code = StubCache::ComputeStoreNormal(extra_ic_state); maybe_code = StubCache::ComputeStoreNormal(strict_mode);
} }
break; break;
} }
@ -1559,13 +1571,13 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
if (v8::ToCData<Address>(callback->setter()) == 0) return; if (v8::ToCData<Address>(callback->setter()) == 0) return;
maybe_code = StubCache::ComputeStoreCallback( maybe_code = StubCache::ComputeStoreCallback(
*name, *receiver, callback, extra_ic_state); *name, *receiver, callback, strict_mode);
break; break;
} }
case INTERCEPTOR: { case INTERCEPTOR: {
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined()); ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
maybe_code = StubCache::ComputeStoreInterceptor( maybe_code = StubCache::ComputeStoreInterceptor(
*name, *receiver, extra_ic_state); *name, *receiver, strict_mode);
break; break;
} }
default: default:
@ -1582,7 +1594,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
} else if (state == MONOMORPHIC) { } else if (state == MONOMORPHIC) {
// Only move to megamorphic if the target changes. // Only move to megamorphic if the target changes.
if (target() != Code::cast(code)) { if (target() != Code::cast(code)) {
set_target(extra_ic_state == kStoreICStrict set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict() ? megamorphic_stub_strict()
: megamorphic_stub()); : megamorphic_stub());
} }
@ -1598,6 +1610,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
MaybeObject* KeyedStoreIC::Store(State state, MaybeObject* KeyedStoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object, Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value) { Handle<Object> value) {
@ -1629,11 +1642,11 @@ MaybeObject* KeyedStoreIC::Store(State state,
// Update inline cache and stub cache. // Update inline cache and stub cache.
if (FLAG_use_ic) { if (FLAG_use_ic) {
UpdateCaches(&lookup, state, receiver, name, value); UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
} }
// Set the property. // Set the property.
return receiver->SetProperty(*name, *value, NONE); return receiver->SetProperty(*name, *value, NONE, strict_mode);
} }
// Do not use ICs for objects that require access checks (including // Do not use ICs for objects that require access checks (including
@ -1642,23 +1655,25 @@ MaybeObject* KeyedStoreIC::Store(State state,
ASSERT(!(use_ic && object->IsJSGlobalProxy())); ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) { if (use_ic) {
Code* stub = generic_stub(); Code* stub =
(strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
if (state == UNINITIALIZED) { if (state == UNINITIALIZED) {
if (object->IsJSObject()) { if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) { if (receiver->HasExternalArrayElements()) {
MaybeObject* probe = MaybeObject* probe =
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true); StubCache::ComputeKeyedLoadOrStoreExternalArray(
*receiver, true, strict_mode);
stub = probe->IsFailure() ? stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked()); NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasPixelElements()) { } else if (receiver->HasPixelElements()) {
MaybeObject* probe = MaybeObject* probe =
StubCache::ComputeKeyedStorePixelArray(*receiver); StubCache::ComputeKeyedStorePixelArray(*receiver, strict_mode);
stub = probe->IsFailure() ? stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked()); NULL : Code::cast(probe->ToObjectUnchecked());
} else if (key->IsSmi() && receiver->map()->has_fast_elements()) { } else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
MaybeObject* probe = MaybeObject* probe =
StubCache::ComputeKeyedStoreSpecialized(*receiver); StubCache::ComputeKeyedStoreSpecialized(*receiver, strict_mode);
stub = probe->IsFailure() ? stub = probe->IsFailure() ?
NULL : Code::cast(probe->ToObjectUnchecked()); NULL : Code::cast(probe->ToObjectUnchecked());
} }
@ -1668,12 +1683,13 @@ MaybeObject* KeyedStoreIC::Store(State state,
} }
// Set the property. // Set the property.
return Runtime::SetObjectProperty(object, key, value, NONE); return Runtime::SetObjectProperty(object, key, value, NONE, strict_mode);
} }
void KeyedStoreIC::UpdateCaches(LookupResult* lookup, void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
State state, State state,
StrictModeFlag strict_mode,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value) { Handle<Object> value) {
@ -1700,8 +1716,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
switch (type) { switch (type) {
case FIELD: { case FIELD: {
maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver, maybe_code = StubCache::ComputeKeyedStoreField(
lookup->GetFieldIndex()); *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
break; break;
} }
case MAP_TRANSITION: { case MAP_TRANSITION: {
@ -1710,8 +1726,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
ASSERT(type == MAP_TRANSITION); ASSERT(type == MAP_TRANSITION);
Handle<Map> transition(lookup->GetTransitionMap()); Handle<Map> transition(lookup->GetTransitionMap());
int index = transition->PropertyIndexFor(*name); int index = transition->PropertyIndexFor(*name);
maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver, maybe_code = StubCache::ComputeKeyedStoreField(
index, *transition); *name, *receiver, index, *transition, strict_mode);
break; break;
} }
// fall through. // fall through.
@ -1719,7 +1735,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
default: { default: {
// Always rewrite to the generic case so that we do not // Always rewrite to the generic case so that we do not
// repeatedly try to rewrite. // repeatedly try to rewrite.
maybe_code = generic_stub(); maybe_code = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
break; break;
} }
} }
@ -1734,7 +1752,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
if (state == UNINITIALIZED || state == PREMONOMORPHIC) { if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
set_target(Code::cast(code)); set_target(Code::cast(code));
} else if (state == MONOMORPHIC) { } else if (state == MONOMORPHIC) {
set_target(megamorphic_stub()); set_target((strict_mode == kStrictMode)
? megamorphic_stub_strict()
: megamorphic_stub());
} }
#ifdef DEBUG #ifdef DEBUG
@ -1835,8 +1855,11 @@ MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
StoreIC ic; StoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state, extra_ic_state, args.at<Object>(0), return ic.Store(state,
args.at<String>(1), args.at<Object>(2)); static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<String>(1),
args.at<Object>(2));
} }
@ -1900,7 +1923,11 @@ MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(Arguments args) {
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
KeyedStoreIC ic; KeyedStoreIC ic;
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Store(state, args.at<Object>(0), args.at<Object>(1), Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
return ic.Store(state,
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2)); args.at<Object>(2));
} }

45
deps/v8/src/ic.h

@ -398,16 +398,10 @@ class KeyedLoadIC: public IC {
class StoreIC: public IC { class StoreIC: public IC {
public: public:
enum StoreICStrictMode {
kStoreICNonStrict = kNonStrictMode,
kStoreICStrict = kStrictMode
};
StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); } StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
MUST_USE_RESULT MaybeObject* Store(State state, MUST_USE_RESULT MaybeObject* Store(State state,
Code::ExtraICState extra_ic_state, StrictModeFlag strict_mode,
Handle<Object> object, Handle<Object> object,
Handle<String> name, Handle<String> name,
Handle<Object> value); Handle<Object> value);
@ -416,10 +410,11 @@ class StoreIC: public IC {
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm, static void GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
static void GenerateArrayLength(MacroAssembler* masm); static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm); static void GenerateNormal(MacroAssembler* masm);
static void GenerateGlobalProxy(MacroAssembler* masm); static void GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode);
// Clear the use of an inlined version. // Clear the use of an inlined version.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -433,11 +428,18 @@ class StoreIC: public IC {
// lookup result. // lookup result.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,
State state, State state,
Code::ExtraICState extra_ic_state, StrictModeFlag strict_mode,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value); Handle<Object> value);
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
ASSERT((code->extra_ic_state() & kStrictMode) ==
(target()->extra_ic_state() & kStrictMode));
IC::set_target(code);
}
// Stub accessors. // Stub accessors.
static Code* megamorphic_stub() { static Code* megamorphic_stub() {
return Builtins::builtin(Builtins::StoreIC_Megamorphic); return Builtins::builtin(Builtins::StoreIC_Megamorphic);
@ -473,6 +475,7 @@ class KeyedStoreIC: public IC {
KeyedStoreIC() : IC(NO_EXTRA_FRAME) { } KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
MUST_USE_RESULT MaybeObject* Store(State state, MUST_USE_RESULT MaybeObject* Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object, Handle<Object> object,
Handle<Object> name, Handle<Object> name,
Handle<Object> value); Handle<Object> value);
@ -480,8 +483,9 @@ class KeyedStoreIC: public IC {
// Code generators for stub routines. Only called once at startup. // Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm); static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm); static void GenerateRuntimeSetProperty(MacroAssembler* masm,
static void GenerateGeneric(MacroAssembler* masm); StrictModeFlag strict_mode);
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
// Clear the inlined version so the IC is always hit. // Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address); static void ClearInlinedVersion(Address address);
@ -493,20 +497,37 @@ class KeyedStoreIC: public IC {
// Update the inline cache. // Update the inline cache.
void UpdateCaches(LookupResult* lookup, void UpdateCaches(LookupResult* lookup,
State state, State state,
StrictModeFlag strict_mode,
Handle<JSObject> receiver, Handle<JSObject> receiver,
Handle<String> name, Handle<String> name,
Handle<Object> value); Handle<Object> value);
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
ASSERT((code->extra_ic_state() & kStrictMode) ==
(target()->extra_ic_state() & kStrictMode));
IC::set_target(code);
}
// Stub accessors. // Stub accessors.
static Code* initialize_stub() { static Code* initialize_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Initialize); return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
} }
static Code* initialize_stub_strict() {
return Builtins::builtin(Builtins::KeyedStoreIC_Initialize_Strict);
}
static Code* megamorphic_stub() { static Code* megamorphic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic); return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
} }
static Code* megamorphic_stub_strict() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
}
static Code* generic_stub() { static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic); return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
} }
static Code* generic_stub_strict() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
}
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);

90
deps/v8/src/liveobjectlist-inl.h

@ -32,5 +32,95 @@
#include "liveobjectlist.h" #include "liveobjectlist.h"
namespace v8 {
namespace internal {
#ifdef LIVE_OBJECT_LIST
void LiveObjectList::GCEpilogue() {
if (!NeedLOLProcessing()) return;
GCEpiloguePrivate();
}
void LiveObjectList::GCPrologue() {
if (!NeedLOLProcessing()) return;
#ifdef VERIFY_LOL
if (FLAG_verify_lol) {
Verify();
}
#endif
}
void LiveObjectList::IterateElements(ObjectVisitor* v) {
if (!NeedLOLProcessing()) return;
IterateElementsPrivate(v);
}
void LiveObjectList::ProcessNonLive(HeapObject *obj) {
// Only do work if we have at least one list to process.
if (last()) DoProcessNonLive(obj);
}
void LiveObjectList::UpdateReferencesForScavengeGC() {
if (LiveObjectList::NeedLOLProcessing()) {
UpdateLiveObjectListVisitor update_visitor;
LiveObjectList::IterateElements(&update_visitor);
}
}
LiveObjectList* LiveObjectList::FindLolForId(int id,
LiveObjectList* start_lol) {
if (id != 0) {
LiveObjectList* lol = start_lol;
while (lol != NULL) {
if (lol->id() == id) {
return lol;
}
lol = lol->prev_;
}
}
return NULL;
}
// Iterates the elements in every lol and returns the one that matches the
// specified key. If no matching element is found, then it returns NULL.
template <typename T>
inline LiveObjectList::Element*
LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
LiveObjectList *lol = last();
while (lol != NULL) {
Element* elements = lol->elements_;
for (int i = 0; i < lol->obj_count_; i++) {
Element* element = &elements[i];
if (GetValue(element) == key) {
return element;
}
}
lol = lol->prev_;
}
return NULL;
}
inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) {
return element->id_;
}
inline HeapObject*
LiveObjectList::GetElementObj(LiveObjectList::Element* element) {
return element->obj_;
}
#endif // LIVE_OBJECT_LIST
} } // namespace v8::internal
#endif // V8_LIVEOBJECTLIST_INL_H_ #endif // V8_LIVEOBJECTLIST_INL_H_

2476
deps/v8/src/liveobjectlist.cc

File diff suppressed because it is too large

260
deps/v8/src/liveobjectlist.h

@ -40,8 +40,231 @@ namespace internal {
#ifdef LIVE_OBJECT_LIST #ifdef LIVE_OBJECT_LIST
#ifdef DEBUG
// The following symbol when defined enables thorough verification of lol data.
// FLAG_verify_lol will also need to set to true to enable the verification.
#define VERIFY_LOL
#endif
typedef int LiveObjectType;
class LolFilter;
class LiveObjectSummary;
class DumpWriter;
class SummaryWriter;
// The LiveObjectList is both a mechanism for tracking a live capture of
// objects in the JS heap, as well as is the data structure which represents
// each of those captures. Unlike a snapshot, the lol is live. For example,
// if an object in a captured lol dies and is collected by the GC, the lol
// will reflect that the object is no longer available. The term
// LiveObjectList (and lol) is used to describe both the mechanism and the
// data structure depending on context of use.
//
// In captured lols, objects are tracked using their address and an object id.
// The object id is unique. Once assigned to an object, the object id can never
// be assigned to another object. That is unless all captured lols are deleted
// which allows the user to start over with a fresh set of lols and object ids.
// The uniqueness of the object ids allows the user to track specific objects
// and inspect its longevity while debugging JS code in execution.
//
// The lol comes with utility functions to capture, dump, summarize, and diff
// captured lols amongst other functionality. These functionality are
// accessible via the v8 debugger interface.
class LiveObjectList {
public:
inline static void GCEpilogue();
inline static void GCPrologue();
inline static void IterateElements(ObjectVisitor* v);
inline static void ProcessNonLive(HeapObject *obj);
inline static void UpdateReferencesForScavengeGC();
// Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
// compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield
// a verbose dump of all the objects in the resultant lists.
// Similarly, a summarized result of a LOL listing or a diff can be
// attained using the Summarize(0, <lol id>) and Summarize(<lol id1,
// <lol id2>, ...) respectively.
static MaybeObject* Capture();
static bool Delete(int id);
static MaybeObject* Dump(int id1,
int id2,
int start_idx,
int dump_limit,
Handle<JSObject> filter_obj);
static MaybeObject* Info(int start_idx, int dump_limit);
static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj);
static void Reset();
static Object* GetObj(int obj_id);
static int GetObjId(Object* obj);
static Object* GetObjId(Handle<String> address);
static MaybeObject* GetObjRetainers(int obj_id,
Handle<JSObject> instance_filter,
bool verbose,
int start,
int count,
Handle<JSObject> filter_obj);
static Object* GetPath(int obj_id1,
int obj_id2,
Handle<JSObject> instance_filter);
static Object* PrintObj(int obj_id);
private:
struct Element {
int id_;
HeapObject* obj_;
};
explicit LiveObjectList(LiveObjectList* prev, int capacity);
~LiveObjectList();
static void GCEpiloguePrivate();
static void IterateElementsPrivate(ObjectVisitor* v);
static void DoProcessNonLive(HeapObject *obj);
static int CompareElement(const Element* a, const Element* b);
static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2);
static int GetRetainers(Handle<HeapObject> target,
Handle<JSObject> instance_filter,
Handle<FixedArray> retainers_arr,
int start,
int dump_limit,
int* total_count,
LolFilter* filter,
LiveObjectSummary *summary,
JSFunction* arguments_function,
Handle<Object> error);
static MaybeObject* DumpPrivate(DumpWriter* writer,
int start,
int dump_limit,
LolFilter* filter);
static MaybeObject* SummarizePrivate(SummaryWriter* writer,
LolFilter* filter,
bool is_tracking_roots);
static bool NeedLOLProcessing() { return (last() != NULL); }
static void NullifyNonLivePointer(HeapObject **p) {
// Mask out the low bit that marks this as a heap object. We'll use this
// cleared bit as an indicator that this pointer needs to be collected.
//
// Meanwhile, we still preserve its approximate value so that we don't
// have to resort the elements list all the time.
//
// Note: Doing so also makes this HeapObject* look like an SMI. Hence,
// GC pointer updater will ignore it when it gets scanned.
*p = reinterpret_cast<HeapObject*>((*p)->address());
}
LiveObjectList* prev() { return prev_; }
LiveObjectList* next() { return next_; }
int id() { return id_; }
static int list_count() { return list_count_; }
static LiveObjectList* last() { return last_; }
inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol);
int TotalObjCount() { return GetTotalObjCountAndSize(NULL); }
int GetTotalObjCountAndSize(int* size_p);
bool Add(HeapObject* obj);
Element* Find(HeapObject* obj);
static void NullifyMostRecent(HeapObject* obj);
void Sort();
static void SortAll();
static void PurgeDuplicates(); // Only to be called by GCEpilogue.
#ifdef VERIFY_LOL
static void Verify(bool match_heap_exactly = false);
static void VerifyNotInFromSpace();
#endif
// Iterates the elements in every lol and returns the one that matches the
// specified key. If no matching element is found, then it returns NULL.
template <typename T>
inline static LiveObjectList::Element*
FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key);
inline static int GetElementId(Element* element);
inline static HeapObject* GetElementObj(Element* element);
// Instance fields.
LiveObjectList* prev_;
LiveObjectList* next_;
int id_;
int capacity_;
int obj_count_;
Element *elements_;
// Statics for managing all the lists.
static uint32_t next_element_id_;
static int list_count_;
static int last_id_;
static LiveObjectList* first_;
static LiveObjectList* last_;
friend class LolIterator;
friend class LolForwardIterator;
friend class LolDumpWriter;
friend class RetainersDumpWriter;
friend class RetainersSummaryWriter;
friend class UpdateLiveObjectListVisitor;
};
// Helper class for updating the LiveObjectList HeapObject pointers.
class UpdateLiveObjectListVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) { UpdatePointer(p); }
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
private:
// Based on Heap::ScavengeObject() but only does forwarding of pointers
// to live new space objects, and not actually keep them alive.
void UpdatePointer(Object** p) {
Object* object = *p;
if (!Heap::InNewSpace(object)) return;
HeapObject* heap_obj = HeapObject::cast(object);
ASSERT(Heap::InFromSpace(heap_obj));
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
// point to an old space, the code space, or the to space of the new
// generation.
MapWord first_word = heap_obj->map_word();
// If the first word is a forwarding address, the object has already been
// copied.
if (first_word.IsForwardingAddress()) {
*p = first_word.ToForwardingAddress();
return;
// Else, it's a dead object.
} else {
LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p));
}
}
};
#else // !LIVE_OBJECT_LIST
// Temporary stubbed out LiveObjectList implementation.
class LiveObjectList { class LiveObjectList {
public: public:
inline static void GCEpilogue() {} inline static void GCEpilogue() {}
@ -50,30 +273,30 @@ class LiveObjectList {
inline static void ProcessNonLive(HeapObject* obj) {} inline static void ProcessNonLive(HeapObject* obj) {}
inline static void UpdateReferencesForScavengeGC() {} inline static void UpdateReferencesForScavengeGC() {}
static MaybeObject* Capture() { return Heap::undefined_value(); } inline static MaybeObject* Capture() { return Heap::undefined_value(); }
static bool Delete(int id) { return false; } inline static bool Delete(int id) { return false; }
static MaybeObject* Dump(int id1, inline static MaybeObject* Dump(int id1,
int id2, int id2,
int start_idx, int start_idx,
int dump_limit, int dump_limit,
Handle<JSObject> filter_obj) { Handle<JSObject> filter_obj) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static MaybeObject* Info(int start_idx, int dump_limit) { inline static MaybeObject* Info(int start_idx, int dump_limit) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static MaybeObject* Summarize(int id1, inline static MaybeObject* Summarize(int id1,
int id2, int id2,
Handle<JSObject> filter_obj) { Handle<JSObject> filter_obj) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static void Reset() {} inline static void Reset() {}
static Object* GetObj(int obj_id) { return Heap::undefined_value(); } inline static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
static Object* GetObjId(Handle<String> address) { inline static Object* GetObjId(Handle<String> address) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static MaybeObject* GetObjRetainers(int obj_id, inline static MaybeObject* GetObjRetainers(int obj_id,
Handle<JSObject> instance_filter, Handle<JSObject> instance_filter,
bool verbose, bool verbose,
int start, int start,
@ -82,25 +305,12 @@ class LiveObjectList {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static Object* GetPath(int obj_id1, inline static Object* GetPath(int obj_id1,
int obj_id2, int obj_id2,
Handle<JSObject> instance_filter) { Handle<JSObject> instance_filter) {
return Heap::undefined_value(); return Heap::undefined_value();
} }
static Object* PrintObj(int obj_id) { return Heap::undefined_value(); } inline static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
};
#else // !LIVE_OBJECT_LIST
class LiveObjectList {
public:
static void GCEpilogue() {}
static void GCPrologue() {}
static void IterateElements(ObjectVisitor* v) {}
static void ProcessNonLive(HeapObject *obj) {}
static void UpdateReferencesForScavengeGC() {}
}; };

7
deps/v8/src/mark-compact.cc

@ -1353,6 +1353,9 @@ void MarkCompactCollector::MarkLiveObjects() {
// Flush code from collected candidates. // Flush code from collected candidates.
FlushCode::ProcessCandidates(); FlushCode::ProcessCandidates();
// Clean up dead objects from the runtime profiler.
RuntimeProfiler::RemoveDeadSamples();
} }
@ -1937,6 +1940,9 @@ static void SweepNewSpace(NewSpace* space) {
// All pointers were updated. Update auxiliary allocation info. // All pointers were updated. Update auxiliary allocation info.
Heap::IncrementYoungSurvivorsCounter(survivors_size); Heap::IncrementYoungSurvivorsCounter(survivors_size);
space->set_age_mark(space->top()); space->set_age_mark(space->top());
// Update JSFunction pointers from the runtime profiler.
RuntimeProfiler::UpdateSamplesAfterScavenge();
} }
@ -2535,6 +2541,7 @@ void MarkCompactCollector::UpdatePointers() {
state_ = UPDATE_POINTERS; state_ = UPDATE_POINTERS;
#endif #endif
UpdatingVisitor updating_visitor; UpdatingVisitor updating_visitor;
RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor);
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor); GlobalHandles::IterateWeakRoots(&updating_visitor);

8
deps/v8/src/messages.js

@ -226,6 +226,10 @@ function FormatMessage(message) {
strict_reserved_word: ["Use of future reserved word in strict mode"], strict_reserved_word: ["Use of future reserved word in strict mode"],
strict_delete: ["Delete of an unqualified identifier in strict mode."], strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"], strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
strict_const: ["Use of const in strict mode."],
strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
}; };
} }
var message_type = %MessageGetType(message); var message_type = %MessageGetType(message);
@ -1059,8 +1063,8 @@ function errorToString() {
} }
} }
%FunctionSetName(errorToString, 'toString');
%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM); InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
// Boilerplate for exceptions for stack overflows. Used from // Boilerplate for exceptions for stack overflows. Used from
// Top::StackOverflow(). // Top::StackOverflow().

10
deps/v8/src/objects-inl.h

@ -769,6 +769,10 @@ bool Object::HasSpecificClassOf(String* name) {
MaybeObject* Object::GetElement(uint32_t index) { MaybeObject* Object::GetElement(uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(Heap::IsAllocationAllowed());
return GetElementWithReceiver(this, index); return GetElementWithReceiver(this, index);
} }
@ -2615,7 +2619,8 @@ Code::Flags Code::ComputeFlags(Kind kind,
ASSERT(extra_ic_state == kNoExtraICState || ASSERT(extra_ic_state == kNoExtraICState ||
(kind == CALL_IC && (ic_state == MONOMORPHIC || (kind == CALL_IC && (ic_state == MONOMORPHIC ||
ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) || ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
(kind == STORE_IC)); (kind == STORE_IC) ||
(kind == KEYED_STORE_IC));
// Compute the bit mask. // Compute the bit mask.
int bits = kind << kFlagsKindShift; int bits = kind << kFlagsKindShift;
if (in_loop) bits |= kFlagsICInLoopMask; if (in_loop) bits |= kFlagsICInLoopMask;
@ -3737,7 +3742,8 @@ MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
ASSERT(!IsJSGlobalProxy()); ASSERT(!IsJSGlobalProxy());
return SetPropertyPostInterceptor(Heap::hidden_symbol(), return SetPropertyPostInterceptor(Heap::hidden_symbol(),
hidden_obj, hidden_obj,
DONT_ENUM); DONT_ENUM,
kNonStrictMode);
} }

59
deps/v8/src/objects.cc

@ -1444,14 +1444,15 @@ MaybeObject* JSObject::AddProperty(String* name,
MaybeObject* JSObject::SetPropertyPostInterceptor( MaybeObject* JSObject::SetPropertyPostInterceptor(
String* name, String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
// Check local property, ignore interceptor. // Check local property, ignore interceptor.
LookupResult result; LookupResult result;
LocalLookupRealNamedProperty(name, &result); LocalLookupRealNamedProperty(name, &result);
if (result.IsFound()) { if (result.IsFound()) {
// An existing property, a map transition or a null descriptor was // An existing property, a map transition or a null descriptor was
// found. Use set property to handle all these cases. // found. Use set property to handle all these cases.
return SetProperty(&result, name, value, attributes); return SetProperty(&result, name, value, attributes, strict);
} }
// Add a new real property. // Add a new real property.
return AddProperty(name, value, attributes); return AddProperty(name, value, attributes);
@ -1576,7 +1577,8 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name,
MaybeObject* JSObject::SetPropertyWithInterceptor( MaybeObject* JSObject::SetPropertyWithInterceptor(
String* name, String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
HandleScope scope; HandleScope scope;
Handle<JSObject> this_handle(this); Handle<JSObject> this_handle(this);
Handle<String> name_handle(name); Handle<String> name_handle(name);
@ -1605,7 +1607,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
MaybeObject* raw_result = MaybeObject* raw_result =
this_handle->SetPropertyPostInterceptor(*name_handle, this_handle->SetPropertyPostInterceptor(*name_handle,
*value_handle, *value_handle,
attributes); attributes,
strict);
RETURN_IF_SCHEDULED_EXCEPTION(); RETURN_IF_SCHEDULED_EXCEPTION();
return raw_result; return raw_result;
} }
@ -1613,10 +1616,11 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
MaybeObject* JSObject::SetProperty(String* name, MaybeObject* JSObject::SetProperty(String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
LookupResult result; LookupResult result;
LocalLookup(name, &result); LocalLookup(name, &result);
return SetProperty(&result, name, value, attributes); return SetProperty(&result, name, value, attributes, strict);
} }
@ -1896,7 +1900,8 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
MaybeObject* JSObject::SetProperty(LookupResult* result, MaybeObject* JSObject::SetProperty(LookupResult* result,
String* name, String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes,
StrictModeFlag strict) {
// Make sure that the top context does not change when doing callbacks or // Make sure that the top context does not change when doing callbacks or
// interceptor calls. // interceptor calls.
AssertNoContextChange ncc; AssertNoContextChange ncc;
@ -1923,7 +1928,8 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
Object* proto = GetPrototype(); Object* proto = GetPrototype();
if (proto->IsNull()) return value; if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject()); ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetProperty(result, name, value, attributes); return JSObject::cast(proto)->SetProperty(
result, name, value, attributes, strict);
} }
if (!result->IsProperty() && !IsJSContextExtensionObject()) { if (!result->IsProperty() && !IsJSContextExtensionObject()) {
@ -1942,7 +1948,19 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
// Neither properties nor transitions found. // Neither properties nor transitions found.
return AddProperty(name, value, attributes); return AddProperty(name, value, attributes);
} }
if (result->IsReadOnly() && result->IsProperty()) return value; if (result->IsReadOnly() && result->IsProperty()) {
if (strict == kStrictMode) {
HandleScope scope;
Handle<String> key(name);
Handle<Object> holder(this);
Handle<Object> args[2] = { key, holder };
return Top::Throw(*Factory::NewTypeError("strict_read_only_property",
HandleVector(args, 2)));
} else {
return value;
}
}
// This is a real property that is not read-only, or it is a // This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes. // transition or null descriptor and there are no setters in the prototypes.
switch (result->type()) { switch (result->type()) {
@ -1970,7 +1988,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
value, value,
result->holder()); result->holder());
case INTERCEPTOR: case INTERCEPTOR:
return SetPropertyWithInterceptor(name, value, attributes); return SetPropertyWithInterceptor(name, value, attributes, strict);
case CONSTANT_TRANSITION: { case CONSTANT_TRANSITION: {
// If the same constant function is being added we can simply // If the same constant function is being added we can simply
// transition to the target map. // transition to the target map.
@ -5476,9 +5494,11 @@ uint32_t JSFunction::SourceHash() {
bool JSFunction::IsInlineable() { bool JSFunction::IsInlineable() {
if (IsBuiltin()) return false; if (IsBuiltin()) return false;
SharedFunctionInfo* shared_info = shared();
// Check that the function has a script associated with it. // Check that the function has a script associated with it.
if (!shared()->script()->IsScript()) return false; if (!shared_info->script()->IsScript()) return false;
Code* code = shared()->code(); if (shared_info->optimization_disabled()) return false;
Code* code = shared_info->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) return true; if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
// If we never ran this (unlikely) then lets try to optimize it. // If we never ran this (unlikely) then lets try to optimize it.
if (code->kind() != Code::FUNCTION) return true; if (code->kind() != Code::FUNCTION) return true;
@ -6285,7 +6305,8 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
} }
break; break;
case STORE_IC: case STORE_IC:
if (extra == StoreIC::kStoreICStrict) { case KEYED_STORE_IC:
if (extra == kStrictMode) {
name = "STRICT"; name = "STRICT";
} }
break; break;
@ -7277,8 +7298,10 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
case EXTERNAL_INT_ELEMENTS: case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: { case EXTERNAL_FLOAT_ELEMENTS: {
MaybeObject* value = GetExternalElement(index); MaybeObject* maybe_value = GetExternalElement(index);
if (!value->ToObjectUnchecked()->IsUndefined()) return value; Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsUndefined()) return value;
break; break;
} }
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {
@ -7374,8 +7397,10 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
case EXTERNAL_INT_ELEMENTS: case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: { case EXTERNAL_FLOAT_ELEMENTS: {
MaybeObject* value = GetExternalElement(index); MaybeObject* maybe_value = GetExternalElement(index);
if (!value->ToObjectUnchecked()->IsUndefined()) return value; Object* value;
if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsUndefined()) return value;
break; break;
} }
case DICTIONARY_ELEMENTS: { case DICTIONARY_ELEMENTS: {

12
deps/v8/src/objects.h

@ -1361,11 +1361,13 @@ class JSObject: public HeapObject {
MUST_USE_RESULT MaybeObject* SetProperty(String* key, MUST_USE_RESULT MaybeObject* SetProperty(String* key,
Object* value, Object* value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result, MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
String* key, String* key,
Object* value, Object* value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck( MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
LookupResult* result, LookupResult* result,
String* name, String* name,
@ -1380,11 +1382,13 @@ class JSObject: public HeapObject {
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor( MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
String* name, String* name,
Object* value, Object* value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor( MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
String* name, String* name,
Object* value, Object* value,
PropertyAttributes attributes); PropertyAttributes attributes,
StrictModeFlag strict);
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes( MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
String* key, String* key,
Object* value, Object* value,

72
deps/v8/src/parser.cc

@ -1106,7 +1106,20 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
} }
Scanner::Location token_loc = scanner().peek_location(); Scanner::Location token_loc = scanner().peek_location();
Statement* stat = ParseStatement(NULL, CHECK_OK);
Statement* stat;
if (peek() == Token::FUNCTION) {
// FunctionDeclaration is only allowed in the context of SourceElements
// (Ecma 262 5th Edition, clause 14):
// SourceElement:
// Statement
// FunctionDeclaration
// Common language extension is to allow function declaration in place
// of any statement. This language extension is disabled in strict mode.
stat = ParseFunctionDeclaration(CHECK_OK);
} else {
stat = ParseStatement(NULL, CHECK_OK);
}
if (stat == NULL || stat->IsEmpty()) { if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue. directive_prologue = false; // End of directive prologue.
@ -1263,8 +1276,17 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
return result; return result;
} }
case Token::FUNCTION: case Token::FUNCTION: {
// In strict mode, FunctionDeclaration is only allowed in the context
// of SourceElements.
if (temp_scope_->StrictMode()) {
ReportMessageAt(scanner().peek_location(), "strict_function",
Vector<const char*>::empty());
*ok = false;
return NULL;
}
return ParseFunctionDeclaration(ok); return ParseFunctionDeclaration(ok);
}
case Token::NATIVE: case Token::NATIVE:
return ParseNativeDeclaration(ok); return ParseNativeDeclaration(ok);
@ -1515,6 +1537,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
Consume(Token::VAR); Consume(Token::VAR);
} else if (peek() == Token::CONST) { } else if (peek() == Token::CONST) {
Consume(Token::CONST); Consume(Token::CONST);
if (temp_scope_->StrictMode()) {
ReportMessage("strict_const", Vector<const char*>::empty());
*ok = false;
return NULL;
}
mode = Variable::CONST; mode = Variable::CONST;
is_const = true; is_const = true;
} else { } else {
@ -1634,34 +1661,49 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
if (top_scope_->is_global_scope()) { if (top_scope_->is_global_scope()) {
// Compute the arguments for the runtime call. // Compute the arguments for the runtime call.
ZoneList<Expression*>* arguments = new ZoneList<Expression*>(2); ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
arguments->Add(new Literal(name)); // we have at least 1 parameter arguments->Add(new Literal(name)); // we have at least 1 parameter
if (is_const || (value != NULL && !inside_with())) {
arguments->Add(value);
value = NULL; // zap the value to avoid the unnecessary assignment
}
// Construct the call to Runtime::DeclareGlobal{Variable,Const}Locally
// and add it to the initialization statement block. Note that
// this function does different things depending on if we have
// 1 or 2 parameters.
CallRuntime* initialize; CallRuntime* initialize;
if (is_const) { if (is_const) {
arguments->Add(value);
value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeConstGlobal
// and add it to the initialization statement block.
// Note that the function does different things depending on
// the number of arguments (1 or 2).
initialize = initialize =
new CallRuntime( new CallRuntime(
Factory::InitializeConstGlobal_symbol(), Factory::InitializeConstGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal), Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
arguments); arguments);
} else { } else {
// Add strict mode.
// We may want to pass singleton to avoid Literal allocations.
arguments->Add(NewNumberLiteral(
temp_scope_->StrictMode() ? kStrictMode : kNonStrictMode));
// Be careful not to assign a value to the global variable if
// we're in a with. The initialization value should not
// necessarily be stored in the global object in that case,
// which is why we need to generate a separate assignment node.
if (value != NULL && !inside_with()) {
arguments->Add(value);
value = NULL; // zap the value to avoid the unnecessary assignment
}
// Construct the call to Runtime_InitializeVarGlobal
// and add it to the initialization statement block.
// Note that the function does different things depending on
// the number of arguments (2 or 3).
initialize = initialize =
new CallRuntime( new CallRuntime(
Factory::InitializeVarGlobal_symbol(), Factory::InitializeVarGlobal_symbol(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal), Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
arguments); arguments);
} }
block->AddStatement(new ExpressionStatement(initialize)); block->AddStatement(new ExpressionStatement(initialize));
} }

132
deps/v8/src/platform-solaris.cc

@ -45,7 +45,7 @@
#include <errno.h> #include <errno.h>
#include <ieeefp.h> // finite() #include <ieeefp.h> // finite()
#include <signal.h> // sigemptyset(), etc #include <signal.h> // sigemptyset(), etc
#include <sys/kdi_regs.h> #include <sys/regset.h>
#undef MAP_TYPE #undef MAP_TYPE
@ -612,11 +612,16 @@ static Sampler* active_sampler_ = NULL;
static pthread_t vm_tid_ = 0; static pthread_t vm_tid_ = 0;
static pthread_t GetThreadID() {
return pthread_self();
}
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info); USE(info);
if (signal != SIGPROF) return; if (signal != SIGPROF) return;
if (active_sampler_ == NULL || !active_sampler_->IsActive()) return; if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
if (vm_tid_ != pthread_self()) return; if (vm_tid_ != GetThreadID()) return;
TickSample sample_obj; TickSample sample_obj;
TickSample* sample = CpuProfiler::TickSampleEvent(); TickSample* sample = CpuProfiler::TickSampleEvent();
@ -627,17 +632,10 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
mcontext_t& mcontext = ucontext->uc_mcontext; mcontext_t& mcontext = ucontext->uc_mcontext;
sample->state = Top::current_vm_state(); sample->state = Top::current_vm_state();
#if V8_HOST_ARCH_IA32 sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EIP]); sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_ESP]); sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EBP]);
#elif V8_HOST_ARCH_X64
sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RBP]);
#else
UNIMPLEMENTED();
#endif
active_sampler_->SampleStack(sample); active_sampler_->SampleStack(sample);
active_sampler_->Tick(sample); active_sampler_->Tick(sample);
} }
@ -645,26 +643,86 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
class Sampler::PlatformData : public Malloced { class Sampler::PlatformData : public Malloced {
public: public:
PlatformData() { enum SleepInterval {
signal_handler_installed_ = false; FULL_INTERVAL,
HALF_INTERVAL
};
explicit PlatformData(Sampler* sampler)
: sampler_(sampler),
signal_handler_installed_(false),
vm_tgid_(getpid()),
signal_sender_launched_(false) {
}
void SignalSender() {
while (sampler_->IsActive()) {
if (rate_limiter_.SuspendIfNecessary()) continue;
if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
SendProfilingSignal();
Sleep(HALF_INTERVAL);
RuntimeProfiler::NotifyTick();
Sleep(HALF_INTERVAL);
} else {
if (sampler_->IsProfiling()) SendProfilingSignal();
if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
Sleep(FULL_INTERVAL);
}
}
} }
void SendProfilingSignal() {
if (!signal_handler_installed_) return;
pthread_kill(vm_tid_, SIGPROF);
}
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = sampler_->interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
fprintf(stderr,
"SignalSender usleep error; interval = %u, errno = %d\n",
interval,
errno);
ASSERT(result == 0 || errno == EINTR);
}
#endif
USE(result);
}
Sampler* sampler_;
bool signal_handler_installed_; bool signal_handler_installed_;
struct sigaction old_signal_handler_; struct sigaction old_signal_handler_;
struct itimerval old_timer_value_; int vm_tgid_;
bool signal_sender_launched_;
pthread_t signal_sender_thread_;
RuntimeProfilerRateLimiter rate_limiter_;
}; };
static void* SenderEntry(void* arg) {
Sampler::PlatformData* data =
reinterpret_cast<Sampler::PlatformData*>(arg);
data->SignalSender();
return 0;
}
Sampler::Sampler(int interval) Sampler::Sampler(int interval)
: interval_(interval), : interval_(interval),
profiling_(false), profiling_(false),
active_(false), active_(false),
samples_taken_(0) { samples_taken_(0) {
data_ = new PlatformData(); data_ = new PlatformData(this);
} }
Sampler::~Sampler() { Sampler::~Sampler() {
ASSERT(!data_->signal_sender_launched_);
delete data_; delete data_;
} }
@ -672,43 +730,53 @@ Sampler::~Sampler() {
void Sampler::Start() { void Sampler::Start() {
// There can only be one active sampler at the time on POSIX // There can only be one active sampler at the time on POSIX
// platforms. // platforms.
if (active_sampler_ != NULL) return; ASSERT(!IsActive());
vm_tid_ = GetThreadID();
// Request profiling signals. // Request profiling signals.
struct sigaction sa; struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler; sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask); sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO; sa.sa_flags = SA_RESTART | SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; data_->signal_handler_installed_ =
data_->signal_handler_installed_ = true; sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
// Set the itimer to generate a tick for each interval. // Start a thread that sends SIGPROF signal to VM thread.
itimerval itimer; // Sending the signal ourselves instead of relying on itimer provides
itimer.it_interval.tv_sec = interval_ / 1000; // much better accuracy.
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000; SetActive(true);
itimer.it_value.tv_sec = itimer.it_interval.tv_sec; if (pthread_create(
itimer.it_value.tv_usec = itimer.it_interval.tv_usec; &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_); data_->signal_sender_launched_ = true;
}
// Set this sampler as the active sampler. // Set this sampler as the active sampler.
active_sampler_ = this; active_sampler_ = this;
active_ = true;
} }
void Sampler::Stop() { void Sampler::Stop() {
SetActive(false);
// Wait for signal sender termination (it will exit after setting
// active_ to false).
if (data_->signal_sender_launched_) {
Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
pthread_join(data_->signal_sender_thread_, NULL);
data_->signal_sender_launched_ = false;
}
// Restore old signal handler // Restore old signal handler
if (data_->signal_handler_installed_) { if (data_->signal_handler_installed_) {
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
sigaction(SIGPROF, &data_->old_signal_handler_, 0); sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false; data_->signal_handler_installed_ = false;
} }
// This sampler is no longer the active sampler. // This sampler is no longer the active sampler.
active_sampler_ = NULL; active_sampler_ = NULL;
active_ = false;
} }
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal } } // namespace v8::internal

28
deps/v8/src/profile-generator-inl.h

@ -121,34 +121,6 @@ uint64_t HeapEntry::id() {
return id_adaptor.returned_id; return id_adaptor.returned_id;
} }
template<class Visitor>
void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
for (HashMap::Entry* p = entries_.Start();
p != NULL;
p = entries_.Next(p)) {
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
entry_info->entry = visitor->GetEntry(
reinterpret_cast<HeapObject*>(p->key),
entry_info->children_count,
entry_info->retainers_count);
entry_info->children_count = 0;
entry_info->retainers_count = 0;
}
}
bool HeapSnapshotGenerator::ReportProgress(bool force) {
const int kProgressReportGranularity = 10000;
if (control_ != NULL
&& (force || progress_counter_ % kProgressReportGranularity == 0)) {
return
control_->ReportProgressValue(progress_counter_, progress_total_) ==
v8::ActivityControl::kContinue;
}
return true;
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING

747
deps/v8/src/profile-generator.cc

@ -1177,12 +1177,6 @@ void HeapGraphPath::Print() {
} }
HeapObject *const HeapSnapshot::kInternalRootObject =
reinterpret_cast<HeapObject*>(1);
HeapObject *const HeapSnapshot::kGcRootsObject =
reinterpret_cast<HeapObject*>(2);
// It is very important to keep objects that form a heap snapshot // It is very important to keep objects that form a heap snapshot
// as small as possible. // as small as possible.
namespace { // Avoid littering the global namespace. namespace { // Avoid littering the global namespace.
@ -1253,19 +1247,28 @@ void HeapSnapshot::AllocateEntries(int entries_count,
} }
HeapEntry* HeapSnapshot::AddEntry(HeapObject* object, static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
int children_count, (*entry_ptr)->clear_paint();
int retainers_count) { }
if (object == kInternalRootObject) {
void HeapSnapshot::ClearPaint() {
entries_.Iterate(HeapEntryClearPaint);
}
HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
ASSERT(root_entry_ == NULL); ASSERT(root_entry_ == NULL);
ASSERT(retainers_count == 0);
return (root_entry_ = AddEntry(HeapEntry::kObject, return (root_entry_ = AddEntry(HeapEntry::kObject,
"", "",
HeapObjectsMap::kInternalRootObjectId, HeapObjectsMap::kInternalRootObjectId,
0, 0,
children_count, children_count,
retainers_count)); 0));
} else if (object == kGcRootsObject) { }
HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
int retainers_count) {
ASSERT(gc_roots_entry_ == NULL); ASSERT(gc_roots_entry_ == NULL);
return (gc_roots_entry_ = AddEntry(HeapEntry::kObject, return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
"(GC roots)", "(GC roots)",
@ -1273,96 +1276,6 @@ HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
0, 0,
children_count, children_count,
retainers_count)); retainers_count));
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
return AddEntry(object,
HeapEntry::kClosure,
collection_->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
collection_->GetName(re->Pattern()),
children_count,
retainers_count);
} else if (object->IsJSObject()) {
return AddEntry(object,
HeapEntry::kObject,
collection_->GetName(GetConstructorNameForHeapProfile(
JSObject::cast(object))),
children_count,
retainers_count);
} else if (object->IsString()) {
return AddEntry(object,
HeapEntry::kString,
collection_->GetName(String::cast(object)),
children_count,
retainers_count);
} else if (object->IsCode()) {
return AddEntry(object,
HeapEntry::kCode,
"",
children_count,
retainers_count);
} else if (object->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
return AddEntry(object,
HeapEntry::kCode,
collection_->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsScript()) {
Script* script = Script::cast(object);
return AddEntry(object,
HeapEntry::kCode,
script->name()->IsString() ?
collection_->GetName(String::cast(script->name())) : "",
children_count,
retainers_count);
} else if (object->IsFixedArray()) {
return AddEntry(object,
HeapEntry::kArray,
"",
children_count,
retainers_count);
} else if (object->IsHeapNumber()) {
return AddEntry(object,
HeapEntry::kHeapNumber,
"number",
children_count,
retainers_count);
}
return AddEntry(object,
HeapEntry::kHidden,
"system",
children_count,
retainers_count);
}
static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
(*entry_ptr)->clear_paint();
}
void HeapSnapshot::ClearPaint() {
entries_.Iterate(HeapEntryClearPaint);
}
HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name,
int children_count,
int retainers_count) {
return AddEntry(type,
name,
collection_->GetObjectId(object->address()),
object->Size(),
children_count,
retainers_count);
} }
@ -1615,7 +1528,7 @@ HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
reinterpret_cast<HeapEntry*>(1); reinterpret_cast<HeapEntry*>(1);
HeapEntriesMap::HeapEntriesMap() HeapEntriesMap::HeapEntriesMap()
: entries_(HeapObjectsMatch), : entries_(HeapThingsMatch),
entries_count_(0), entries_count_(0),
total_children_count_(0), total_children_count_(0),
total_retainers_count_(0) { total_retainers_count_(0) {
@ -1629,8 +1542,23 @@ HeapEntriesMap::~HeapEntriesMap() {
} }
HeapEntry* HeapEntriesMap::Map(HeapObject* object) { void HeapEntriesMap::AllocateEntries() {
HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false); for (HashMap::Entry* p = entries_.Start();
p != NULL;
p = entries_.Next(p)) {
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
entry_info->entry = entry_info->allocator->AllocateEntry(
p->key,
entry_info->children_count,
entry_info->retainers_count);
entry_info->children_count = 0;
entry_info->retainers_count = 0;
}
}
HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
if (cache_entry != NULL) { if (cache_entry != NULL) {
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value); EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
return entry_info->entry; return entry_info->entry;
@ -1640,15 +1568,16 @@ HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
} }
void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) { void HeapEntriesMap::Pair(
HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true); HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
ASSERT(cache_entry->value == NULL); ASSERT(cache_entry->value == NULL);
cache_entry->value = new EntryInfo(entry); cache_entry->value = new EntryInfo(entry, allocator);
++entries_count_; ++entries_count_;
} }
void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to, void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
int* prev_children_count, int* prev_children_count,
int* prev_retainers_count) { int* prev_retainers_count) {
HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false); HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
@ -1671,7 +1600,7 @@ void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
HeapObjectsSet::HeapObjectsSet() HeapObjectsSet::HeapObjectsSet()
: entries_(HeapEntriesMap::HeapObjectsMatch) { : entries_(HeapEntriesMap::HeapThingsMatch) {
} }
@ -1700,206 +1629,144 @@ void HeapObjectsSet::Insert(Object* obj) {
} }
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot, HeapObject *const V8HeapExplorer::kInternalRootObject =
v8::ActivityControl* control) reinterpret_cast<HeapObject*>(1);
HeapObject *const V8HeapExplorer::kGcRootsObject =
reinterpret_cast<HeapObject*>(2);
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress)
: snapshot_(snapshot), : snapshot_(snapshot),
control_(control), collection_(snapshot_->collection()),
collection_(snapshot->collection()), progress_(progress),
filler_(NULL) { filler_(NULL) {
} }
class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
public: V8HeapExplorer::~V8HeapExplorer() {
explicit SnapshotCounter(HeapEntriesMap* entries)
: entries_(entries) { }
HeapEntry* AddEntry(HeapObject* obj) {
entries_->Pair(obj, HeapEntriesMap::kHeapEntryPlaceholder);
return HeapEntriesMap::kHeapEntryPlaceholder;
}
void SetIndexedReference(HeapGraphEdge::Type,
HeapObject* parent_obj,
HeapEntry*,
int,
Object* child_obj,
HeapEntry*) {
entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
}
void SetNamedReference(HeapGraphEdge::Type,
HeapObject* parent_obj,
HeapEntry*,
const char*,
Object* child_obj,
HeapEntry*) {
entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
}
void SetRootShortcutReference(Object* child_obj, HeapEntry*) {
entries_->CountReference(
HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj));
}
void SetRootGcRootsReference() {
entries_->CountReference(
HeapSnapshot::kInternalRootObject, HeapSnapshot::kGcRootsObject);
}
void SetStrongRootReference(Object* child_obj, HeapEntry*) {
entries_->CountReference(
HeapSnapshot::kGcRootsObject, HeapObject::cast(child_obj));
} }
private:
HeapEntriesMap* entries_;
};
class SnapshotFiller : public HeapSnapshotGenerator::SnapshotFillerInterface { HeapEntry* V8HeapExplorer::AllocateEntry(
public: HeapThing ptr, int children_count, int retainers_count) {
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries) return AddEntry(
: snapshot_(snapshot), reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
collection_(snapshot->collection()),
entries_(entries) { }
HeapEntry* AddEntry(HeapObject* obj) {
UNREACHABLE();
return NULL;
}
void SetIndexedReference(HeapGraphEdge::Type type,
HeapObject* parent_obj,
HeapEntry* parent_entry,
int index,
Object* child_obj,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(parent_obj,
HeapObject::cast(child_obj),
&child_index,
&retainer_index);
parent_entry->SetIndexedReference(
type, child_index, index, child_entry, retainer_index);
}
void SetNamedReference(HeapGraphEdge::Type type,
HeapObject* parent_obj,
HeapEntry* parent_entry,
const char* reference_name,
Object* child_obj,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
&child_index, &retainer_index);
parent_entry->SetNamedReference(type,
child_index,
reference_name,
child_entry,
retainer_index);
}
void SetRootGcRootsReference() {
int child_index, retainer_index;
entries_->CountReference(HeapSnapshot::kInternalRootObject,
HeapSnapshot::kGcRootsObject,
&child_index,
&retainer_index);
snapshot_->root()->SetIndexedReference(HeapGraphEdge::kElement,
child_index,
child_index + 1,
snapshot_->gc_roots(),
retainer_index);
} }
void SetRootShortcutReference(Object* child_obj,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(HeapSnapshot::kInternalRootObject,
HeapObject::cast(child_obj),
&child_index,
&retainer_index);
snapshot_->root()->SetNamedReference(HeapGraphEdge::kShortcut,
child_index,
collection_->GetName(child_index + 1),
child_entry,
retainer_index);
}
void SetStrongRootReference(Object* child_obj,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(HeapSnapshot::kGcRootsObject,
HeapObject::cast(child_obj),
&child_index,
&retainer_index);
snapshot_->gc_roots()->SetIndexedReference(HeapGraphEdge::kElement,
child_index,
child_index + 1,
child_entry,
retainer_index);
}
private:
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
HeapEntriesMap* entries_;
};
class SnapshotAllocator {
public:
explicit SnapshotAllocator(HeapSnapshot* snapshot)
: snapshot_(snapshot) { }
HeapEntry* GetEntry(
HeapObject* obj, int children_count, int retainers_count) {
HeapEntry* entry =
snapshot_->AddEntry(obj, children_count, retainers_count);
ASSERT(entry != NULL);
return entry;
}
private:
HeapSnapshot* snapshot_;
};
class RootsReferencesExtractor : public ObjectVisitor { HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
public: int children_count,
explicit RootsReferencesExtractor(HeapSnapshotGenerator* generator) int retainers_count) {
: generator_(generator) { if (object == kInternalRootObject) {
ASSERT(retainers_count == 0);
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
} else if (object->IsJSFunction()) {
JSFunction* func = JSFunction::cast(object);
SharedFunctionInfo* shared = func->shared();
return AddEntry(object,
HeapEntry::kClosure,
collection_->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsJSRegExp()) {
JSRegExp* re = JSRegExp::cast(object);
return AddEntry(object,
HeapEntry::kRegExp,
collection_->GetName(re->Pattern()),
children_count,
retainers_count);
} else if (object->IsJSObject()) {
return AddEntry(object,
HeapEntry::kObject,
collection_->GetName(GetConstructorNameForHeapProfile(
JSObject::cast(object))),
children_count,
retainers_count);
} else if (object->IsString()) {
return AddEntry(object,
HeapEntry::kString,
collection_->GetName(String::cast(object)),
children_count,
retainers_count);
} else if (object->IsCode()) {
return AddEntry(object,
HeapEntry::kCode,
"",
children_count,
retainers_count);
} else if (object->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
return AddEntry(object,
HeapEntry::kCode,
collection_->GetName(String::cast(shared->name())),
children_count,
retainers_count);
} else if (object->IsScript()) {
Script* script = Script::cast(object);
return AddEntry(object,
HeapEntry::kCode,
script->name()->IsString() ?
collection_->GetName(String::cast(script->name())) : "",
children_count,
retainers_count);
} else if (object->IsFixedArray()) {
return AddEntry(object,
HeapEntry::kArray,
"",
children_count,
retainers_count);
} else if (object->IsHeapNumber()) {
return AddEntry(object,
HeapEntry::kHeapNumber,
"number",
children_count,
retainers_count);
} }
void VisitPointers(Object** start, Object** end) { return AddEntry(object,
for (Object** p = start; p < end; p++) generator_->SetGcRootsReference(*p); HeapEntry::kHidden,
"system",
children_count,
retainers_count);
} }
private:
HeapSnapshotGenerator* generator_;
};
bool HeapSnapshotGenerator::GenerateSnapshot() {
AssertNoAllocation no_alloc;
SetProgressTotal(4); // 2 passes + dominators + sizes.
// Pass 1. Iterate heap contents to count entries and references.
if (!CountEntriesAndReferences()) return false;
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
entries_.total_children_count(),
entries_.total_retainers_count());
SnapshotAllocator allocator(snapshot_);
entries_.UpdateEntries(&allocator);
// Pass 2. Fill references. HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
if (!FillReferences()) return false; HeapEntry::Type type,
const char* name,
int children_count,
int retainers_count) {
return snapshot_->AddEntry(type,
name,
collection_->GetObjectId(object->address()),
object->Size(),
children_count,
retainers_count);
}
if (!SetEntriesDominators()) return false;
if (!ApproximateRetainedSizes()) return false;
progress_counter_ = progress_total_; void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
if (!ReportProgress(true)) return false; filler->AddEntry(kInternalRootObject);
return true; filler->AddEntry(kGcRootsObject);
} }
HeapEntry* HeapSnapshotGenerator::GetEntry(Object* obj) { int V8HeapExplorer::EstimateObjectsCount() {
if (!obj->IsHeapObject()) return NULL; HeapIterator iterator(HeapIterator::kFilterUnreachable);
HeapObject* object = HeapObject::cast(obj); int objects_count = 0;
HeapEntry* entry = entries_.Map(object); for (HeapObject* obj = iterator.next();
// A new entry. obj != NULL;
if (entry == NULL) entry = filler_->AddEntry(object); obj = iterator.next(), ++objects_count) {}
return entry; return objects_count;
} }
class IndexedReferencesExtractor : public ObjectVisitor { class IndexedReferencesExtractor : public ObjectVisitor {
public: public:
IndexedReferencesExtractor(HeapSnapshotGenerator* generator, IndexedReferencesExtractor(V8HeapExplorer* generator,
HeapObject* parent_obj, HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
HeapObjectsSet* known_references = NULL) HeapObjectsSet* known_references = NULL)
@ -1917,7 +1784,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
} }
} }
private: private:
HeapSnapshotGenerator* generator_; V8HeapExplorer* generator_;
HeapObject* parent_obj_; HeapObject* parent_obj_;
HeapEntry* parent_; HeapEntry* parent_;
HeapObjectsSet* known_references_; HeapObjectsSet* known_references_;
@ -1925,7 +1792,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}; };
void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) { void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
HeapEntry* entry = GetEntry(obj); HeapEntry* entry = GetEntry(obj);
if (entry == NULL) return; // No interest in this object. if (entry == NULL) return; // No interest in this object.
@ -1969,7 +1836,7 @@ void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
} }
void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj, void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
HeapEntry* entry) { HeapEntry* entry) {
if (js_obj->IsJSFunction()) { if (js_obj->IsJSFunction()) {
HandleScope hs; HandleScope hs;
@ -1992,7 +1859,7 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
} }
void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj, void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
HeapEntry* entry) { HeapEntry* entry) {
if (js_obj->HasFastProperties()) { if (js_obj->HasFastProperties()) {
DescriptorArray* descs = js_obj->map()->instance_descriptors(); DescriptorArray* descs = js_obj->map()->instance_descriptors();
@ -2034,7 +1901,7 @@ void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
} }
void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj, void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
HeapEntry* entry) { HeapEntry* entry) {
if (js_obj->HasFastElements()) { if (js_obj->HasFastElements()) {
FixedArray* elements = FixedArray::cast(js_obj->elements()); FixedArray* elements = FixedArray::cast(js_obj->elements());
@ -2061,7 +1928,7 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
} }
void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj, void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
HeapEntry* entry) { HeapEntry* entry) {
int length = js_obj->GetInternalFieldCount(); int length = js_obj->GetInternalFieldCount();
for (int i = 0; i < length; ++i) { for (int i = 0; i < length; ++i) {
@ -2071,7 +1938,52 @@ void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj,
} }
void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj, HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
if (!obj->IsHeapObject()) return NULL;
return filler_->FindOrAddEntry(obj);
}
class RootsReferencesExtractor : public ObjectVisitor {
public:
explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
: explorer_(explorer) {
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
}
private:
V8HeapExplorer* explorer_;
};
bool V8HeapExplorer::IterateAndExtractReferences(
SnapshotFillerInterface* filler) {
filler_ = filler;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
bool interrupted = false;
// Heap iteration with filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next(), progress_->ProgressStep()) {
if (!interrupted) {
ExtractReferences(obj);
if (!progress_->ProgressReport(false)) interrupted = true;
}
}
if (interrupted) {
filler_ = NULL;
return false;
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
Heap::IterateRoots(&extractor, VISIT_ALL);
filler_ = NULL;
return progress_->ProgressReport(false);
}
void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
String* reference_name, String* reference_name,
Object* child_obj) { Object* child_obj) {
@ -2088,7 +2000,7 @@ void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj, void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
int index, int index,
Object* child_obj) { Object* child_obj) {
@ -2105,7 +2017,7 @@ void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
const char* reference_name, const char* reference_name,
Object* child_obj) { Object* child_obj) {
@ -2122,7 +2034,7 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj, void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
int index, int index,
Object* child_obj) { Object* child_obj) {
@ -2139,7 +2051,7 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj, void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
int index, int index,
Object* child_obj) { Object* child_obj) {
@ -2155,7 +2067,7 @@ void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj, void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
String* reference_name, String* reference_name,
Object* child_obj) { Object* child_obj) {
@ -2174,7 +2086,7 @@ void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj,
} }
void HeapSnapshotGenerator::SetPropertyShortcutReference( void V8HeapExplorer::SetPropertyShortcutReference(
HeapObject* parent_obj, HeapObject* parent_obj,
HeapEntry* parent_entry, HeapEntry* parent_entry,
String* reference_name, String* reference_name,
@ -2191,52 +2103,221 @@ void HeapSnapshotGenerator::SetPropertyShortcutReference(
} }
void HeapSnapshotGenerator::SetRootGcRootsReference() { void V8HeapExplorer::SetRootGcRootsReference() {
filler_->SetRootGcRootsReference(); filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
kInternalRootObject, snapshot_->root(),
kGcRootsObject, snapshot_->gc_roots());
} }
void HeapSnapshotGenerator::SetRootShortcutReference(Object* child_obj) { void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj); HeapEntry* child_entry = GetEntry(child_obj);
ASSERT(child_entry != NULL); ASSERT(child_entry != NULL);
filler_->SetRootShortcutReference(child_obj, child_entry); filler_->SetNamedAutoIndexReference(
HeapGraphEdge::kShortcut,
kInternalRootObject, snapshot_->root(),
child_obj, child_entry);
} }
void HeapSnapshotGenerator::SetGcRootsReference(Object* child_obj) { void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj); HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) { if (child_entry != NULL) {
filler_->SetStrongRootReference(child_obj, child_entry); filler_->SetIndexedAutoIndexReference(
HeapGraphEdge::kElement,
kGcRootsObject, snapshot_->gc_roots(),
child_obj, child_entry);
}
}
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control)
: snapshot_(snapshot),
control_(control),
v8_heap_explorer_(snapshot_, this) {
}
class SnapshotCounter : public SnapshotFillerInterface {
public:
SnapshotCounter(HeapEntriesAllocator* allocator, HeapEntriesMap* entries)
: allocator_(allocator), entries_(entries) { }
HeapEntry* AddEntry(HeapThing ptr) {
entries_->Pair(ptr, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
return HeapEntriesMap::kHeapEntryPlaceholder;
} }
HeapEntry* FindOrAddEntry(HeapThing ptr) {
HeapEntry* entry = entries_->Map(ptr);
return entry != NULL ? entry : AddEntry(ptr);
}
void SetIndexedReference(HeapGraphEdge::Type,
HeapThing parent_ptr,
HeapEntry*,
int,
HeapThing child_ptr,
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
HeapThing parent_ptr,
HeapEntry*,
HeapThing child_ptr,
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
void SetNamedReference(HeapGraphEdge::Type,
HeapThing parent_ptr,
HeapEntry*,
const char*,
HeapThing child_ptr,
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
void SetNamedAutoIndexReference(HeapGraphEdge::Type,
HeapThing parent_ptr,
HeapEntry*,
HeapThing child_ptr,
HeapEntry*) {
entries_->CountReference(parent_ptr, child_ptr);
}
private:
HeapEntriesAllocator* allocator_;
HeapEntriesMap* entries_;
};
class SnapshotFiller : public SnapshotFillerInterface {
public:
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
: snapshot_(snapshot),
collection_(snapshot->collection()),
entries_(entries) { }
HeapEntry* AddEntry(HeapThing ptr) {
UNREACHABLE();
return NULL;
}
HeapEntry* FindOrAddEntry(HeapThing ptr) {
HeapEntry* entry = entries_->Map(ptr);
return entry != NULL ? entry : AddEntry(ptr);
}
void SetIndexedReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
int index,
HeapThing child_ptr,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(
parent_ptr, child_ptr, &child_index, &retainer_index);
parent_entry->SetIndexedReference(
type, child_index, index, child_entry, retainer_index);
}
void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
HeapThing child_ptr,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(
parent_ptr, child_ptr, &child_index, &retainer_index);
parent_entry->SetIndexedReference(
type, child_index, child_index + 1, child_entry, retainer_index);
}
void SetNamedReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
const char* reference_name,
HeapThing child_ptr,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(
parent_ptr, child_ptr, &child_index, &retainer_index);
parent_entry->SetNamedReference(
type, child_index, reference_name, child_entry, retainer_index);
}
void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
HeapThing child_ptr,
HeapEntry* child_entry) {
int child_index, retainer_index;
entries_->CountReference(
parent_ptr, child_ptr, &child_index, &retainer_index);
parent_entry->SetNamedReference(type,
child_index,
collection_->GetName(child_index + 1),
child_entry,
retainer_index);
}
private:
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
HeapEntriesMap* entries_;
};
bool HeapSnapshotGenerator::GenerateSnapshot() {
AssertNoAllocation no_alloc;
SetProgressTotal(4); // 2 passes + dominators + sizes.
// Pass 1. Iterate heap contents to count entries and references.
if (!CountEntriesAndReferences()) return false;
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
entries_.total_children_count(),
entries_.total_retainers_count());
entries_.AllocateEntries();
// Pass 2. Fill references.
if (!FillReferences()) return false;
if (!SetEntriesDominators()) return false;
if (!ApproximateRetainedSizes()) return false;
progress_counter_ = progress_total_;
if (!ProgressReport(true)) return false;
return true;
}
void HeapSnapshotGenerator::ProgressStep() {
++progress_counter_;
}
bool HeapSnapshotGenerator::ProgressReport(bool force) {
const int kProgressReportGranularity = 10000;
if (control_ != NULL
&& (force || progress_counter_ % kProgressReportGranularity == 0)) {
return
control_->ReportProgressValue(progress_counter_, progress_total_) ==
v8::ActivityControl::kContinue;
}
return true;
} }
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return; if (control_ == NULL) return;
progress_total_ = v8_heap_explorer_.EstimateObjectsCount() * iterations_count;
HeapIterator iterator(HeapIterator::kFilterUnreachable);
int objects_count = 0;
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next(), ++objects_count) {}
progress_total_ = objects_count * iterations_count;
progress_counter_ = 0; progress_counter_ = 0;
} }
bool HeapSnapshotGenerator::CountEntriesAndReferences() { bool HeapSnapshotGenerator::CountEntriesAndReferences() {
SnapshotCounter counter(&entries_); SnapshotCounter counter(&v8_heap_explorer_, &entries_);
filler_ = &counter; v8_heap_explorer_.AddRootEntries(&counter);
filler_->AddEntry(HeapSnapshot::kInternalRootObject); return v8_heap_explorer_.IterateAndExtractReferences(&counter);
filler_->AddEntry(HeapSnapshot::kGcRootsObject);
return IterateAndExtractReferences();
} }
bool HeapSnapshotGenerator::FillReferences() { bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_); SnapshotFiller filler(snapshot_, &entries_);
filler_ = &filler; return v8_heap_explorer_.IterateAndExtractReferences(&filler);
return IterateAndExtractReferences();
} }
@ -2322,7 +2403,7 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
int remaining = entries_length - changed; int remaining = entries_length - changed;
if (remaining < 0) remaining = 0; if (remaining < 0) remaining = 0;
progress_counter_ = base_progress_counter + remaining; progress_counter_ = base_progress_counter + remaining;
if (!ReportProgress(true)) return false; if (!ProgressReport(true)) return false;
} }
return true; return true;
} }
@ -2352,7 +2433,7 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
} }
for (int i = 0; for (int i = 0;
i < snapshot_->entries()->length(); i < snapshot_->entries()->length();
++i, IncProgressCounter()) { ++i, ProgressStep()) {
HeapEntry* entry = snapshot_->entries()->at(i); HeapEntry* entry = snapshot_->entries()->at(i);
int entry_size = entry->self_size(); int entry_size = entry->self_size();
for (HeapEntry* dominator = entry->dominator(); for (HeapEntry* dominator = entry->dominator();
@ -2360,32 +2441,12 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
entry = dominator, dominator = entry->dominator()) { entry = dominator, dominator = entry->dominator()) {
dominator->add_retained_size(entry_size); dominator->add_retained_size(entry_size);
} }
if (!ReportProgress()) return false; if (!ProgressReport()) return false;
} }
return true; return true;
} }
bool HeapSnapshotGenerator::IterateAndExtractReferences() {
HeapIterator iterator(HeapIterator::kFilterUnreachable);
bool interrupted = false;
// Heap iteration with filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next(), IncProgressCounter()) {
if (!interrupted) {
ExtractReferences(obj);
if (!ReportProgress()) interrupted = true;
}
}
if (interrupted) return false;
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
Heap::IterateRoots(&extractor, VISIT_ALL);
return ReportProgress();
}
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) { void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
raw_additions_root_ = raw_additions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0)); NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));

166
deps/v8/src/profile-generator.h

@ -681,14 +681,14 @@ class HeapSnapshot {
void AllocateEntries( void AllocateEntries(
int entries_count, int children_count, int retainers_count); int entries_count, int children_count, int retainers_count);
HeapEntry* AddEntry(
HeapObject* object, int children_count, int retainers_count);
HeapEntry* AddEntry(HeapEntry::Type type, HeapEntry* AddEntry(HeapEntry::Type type,
const char* name, const char* name,
uint64_t id, uint64_t id,
int size, int size,
int children_count, int children_count,
int retainers_count); int retainers_count);
HeapEntry* AddRootEntry(int children_count);
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
void ClearPaint(); void ClearPaint();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot); HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
HeapEntry* GetEntryById(uint64_t id); HeapEntry* GetEntryById(uint64_t id);
@ -701,15 +701,7 @@ class HeapSnapshot {
void Print(int max_depth); void Print(int max_depth);
void PrintEntriesSize(); void PrintEntriesSize();
static HeapObject* const kInternalRootObject;
static HeapObject* const kGcRootsObject;
private: private:
HeapEntry* AddEntry(HeapObject* object,
HeapEntry::Type type,
const char* name,
int children_count,
int retainers_count);
HeapEntry* GetNextEntryToInit(); HeapEntry* GetNextEntryToInit();
HeapSnapshotsCollection* collection_; HeapSnapshotsCollection* collection_;
@ -873,6 +865,20 @@ class HeapSnapshotsCollection {
}; };
// A typedef for referencing anything that can be snapshotted living
// in any kind of heap memory.
typedef void* HeapThing;
// An interface that creates HeapEntries by HeapThings.
class HeapEntriesAllocator {
public:
virtual ~HeapEntriesAllocator() { }
virtual HeapEntry* AllocateEntry(
HeapThing ptr, int children_count, int retainers_count) = 0;
};
// The HeapEntriesMap instance is used to track a mapping between // The HeapEntriesMap instance is used to track a mapping between
// real heap objects and their representations in heap snapshots. // real heap objects and their representations in heap snapshots.
class HeapEntriesMap { class HeapEntriesMap {
@ -880,13 +886,12 @@ class HeapEntriesMap {
HeapEntriesMap(); HeapEntriesMap();
~HeapEntriesMap(); ~HeapEntriesMap();
HeapEntry* Map(HeapObject* object); void AllocateEntries();
void Pair(HeapObject* object, HeapEntry* entry); HeapEntry* Map(HeapThing thing);
void CountReference(HeapObject* from, HeapObject* to, void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
void CountReference(HeapThing from, HeapThing to,
int* prev_children_count = NULL, int* prev_children_count = NULL,
int* prev_retainers_count = NULL); int* prev_retainers_count = NULL);
template<class Visitor>
void UpdateEntries(Visitor* visitor);
int entries_count() { return entries_count_; } int entries_count() { return entries_count_; }
int total_children_count() { return total_children_count_; } int total_children_count() { return total_children_count_; }
@ -896,18 +901,25 @@ class HeapEntriesMap {
private: private:
struct EntryInfo { struct EntryInfo {
explicit EntryInfo(HeapEntry* entry) EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
: entry(entry), children_count(0), retainers_count(0) { } : entry(entry),
allocator(allocator),
children_count(0),
retainers_count(0) {
}
HeapEntry* entry; HeapEntry* entry;
HeapEntriesAllocator* allocator;
int children_count; int children_count;
int retainers_count; int retainers_count;
}; };
static uint32_t Hash(HeapObject* object) { static uint32_t Hash(HeapThing thing) {
return ComputeIntegerHash( return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object))); static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
}
static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
return key1 == key2;
} }
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
HashMap entries_; HashMap entries_;
int entries_count_; int entries_count_;
@ -934,52 +946,70 @@ class HeapObjectsSet {
}; };
class HeapSnapshotGenerator { // An interface used to populate a snapshot with nodes and edges.
public:
class SnapshotFillerInterface { class SnapshotFillerInterface {
public: public:
virtual ~SnapshotFillerInterface() { } virtual ~SnapshotFillerInterface() { }
virtual HeapEntry* AddEntry(HeapObject* obj) = 0; virtual HeapEntry* AddEntry(HeapThing ptr) = 0;
virtual HeapEntry* FindOrAddEntry(HeapThing ptr) = 0;
virtual void SetIndexedReference(HeapGraphEdge::Type type, virtual void SetIndexedReference(HeapGraphEdge::Type type,
HeapObject* parent_obj, HeapThing parent_ptr,
HeapEntry* parent_entry, HeapEntry* parent_entry,
int index, int index,
Object* child_obj, HeapThing child_ptr,
HeapEntry* child_entry) = 0;
virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
HeapThing child_ptr,
HeapEntry* child_entry) = 0; HeapEntry* child_entry) = 0;
virtual void SetNamedReference(HeapGraphEdge::Type type, virtual void SetNamedReference(HeapGraphEdge::Type type,
HeapObject* parent_obj, HeapThing parent_ptr,
HeapEntry* parent_entry, HeapEntry* parent_entry,
const char* reference_name, const char* reference_name,
Object* child_obj, HeapThing child_ptr,
HeapEntry* child_entry) = 0;
virtual void SetRootGcRootsReference() = 0;
virtual void SetRootShortcutReference(Object* child_obj,
HeapEntry* child_entry) = 0; HeapEntry* child_entry) = 0;
virtual void SetStrongRootReference(Object* child_obj, virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
HeapThing parent_ptr,
HeapEntry* parent_entry,
HeapThing child_ptr,
HeapEntry* child_entry) = 0; HeapEntry* child_entry) = 0;
}; };
HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control); class SnapshottingProgressReportingInterface {
bool GenerateSnapshot(); public:
virtual ~SnapshottingProgressReportingInterface() { }
virtual void ProgressStep() = 0;
virtual bool ProgressReport(bool force) = 0;
};
// An implementation of V8 heap graph extractor.
class V8HeapExplorer : public HeapEntriesAllocator {
public:
V8HeapExplorer(HeapSnapshot* snapshot,
SnapshottingProgressReportingInterface* progress);
~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(
HeapThing ptr, int children_count, int retainers_count);
void AddRootEntries(SnapshotFillerInterface* filler);
int EstimateObjectsCount();
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
private: private:
bool ApproximateRetainedSizes(); HeapEntry* AddEntry(
bool BuildDominatorTree(const Vector<HeapEntry*>& entries, HeapObject* object, int children_count, int retainers_count);
Vector<HeapEntry*>* dominators); HeapEntry* AddEntry(HeapObject* object,
bool CountEntriesAndReferences(); HeapEntry::Type type,
HeapEntry* GetEntry(Object* obj); const char* name,
void IncProgressCounter() { ++progress_counter_; } int children_count,
int retainers_count);
void ExtractReferences(HeapObject* obj); void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry); void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry); void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry); void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry); void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
bool FillReferences();
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
bool IterateAndExtractReferences();
inline bool ReportProgress(bool force = false);
bool SetEntriesDominators();
void SetClosureReference(HeapObject* parent_obj, void SetClosureReference(HeapObject* parent_obj,
HeapEntry* parent, HeapEntry* parent,
String* reference_name, String* reference_name,
@ -1011,24 +1041,54 @@ class HeapSnapshotGenerator {
void SetRootShortcutReference(Object* child); void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference(); void SetRootGcRootsReference();
void SetGcRootsReference(Object* child); void SetGcRootsReference(Object* child);
void SetProgressTotal(int iterations_count);
HeapEntry* GetEntry(Object* obj);
HeapSnapshot* snapshot_; HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
HeapSnapshotsCollection* collection_; HeapSnapshotsCollection* collection_;
// Mapping from HeapObject* pointers to HeapEntry* pointers. SnapshottingProgressReportingInterface* progress_;
HeapEntriesMap entries_;
SnapshotFillerInterface* filler_;
// Used during references extraction to mark heap objects that // Used during references extraction to mark heap objects that
// are references via non-hidden properties. // are references via non-hidden properties.
HeapObjectsSet known_references_; HeapObjectsSet known_references_;
// Used during snapshot generation. SnapshotFillerInterface* filler_;
int progress_counter_;
int progress_total_; static HeapObject* const kInternalRootObject;
static HeapObject* const kGcRootsObject;
friend class IndexedReferencesExtractor; friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor; friend class RootsReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
};
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control);
bool GenerateSnapshot();
private:
bool ApproximateRetainedSizes();
bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators);
bool CountEntriesAndReferences();
bool FillReferences();
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
void ProgressStep();
bool ProgressReport(bool force = false);
bool SetEntriesDominators();
void SetProgressTotal(int iterations_count);
HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
V8HeapExplorer v8_heap_explorer_;
// Mapping from HeapThing pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
// Used during snapshot generation.
int progress_counter_;
int progress_total_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator); DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
}; };

71
deps/v8/src/runtime-profiler.cc

@ -35,6 +35,7 @@
#include "deoptimizer.h" #include "deoptimizer.h"
#include "execution.h" #include "execution.h"
#include "global-handles.h" #include "global-handles.h"
#include "mark-compact.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "top.h" #include "top.h"
@ -100,11 +101,6 @@ static int sampler_ticks_until_threshold_adjustment =
// The ratio of ticks spent in JS code in percent. // The ratio of ticks spent in JS code in percent.
static Atomic32 js_ratio; static Atomic32 js_ratio;
// The JSFunctions in the sampler window are not GC safe. Old-space
// pointers are not cleared during mark-sweep collection and therefore
// the window might contain stale pointers. The window is updated on
// scavenges and (parts of it) cleared on mark-sweep and
// mark-sweep-compact.
static Object* sampler_window[kSamplerWindowSize] = { NULL, }; static Object* sampler_window[kSamplerWindowSize] = { NULL, };
static int sampler_window_position = 0; static int sampler_window_position = 0;
static int sampler_window_weight[kSamplerWindowSize] = { 0, }; static int sampler_window_weight[kSamplerWindowSize] = { 0, };
@ -134,7 +130,6 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
static bool IsOptimizable(JSFunction* function) { static bool IsOptimizable(JSFunction* function) {
if (Heap::InNewSpace(function)) return false;
Code* code = function->code(); Code* code = function->code();
return code->kind() == Code::FUNCTION && code->optimizable(); return code->kind() == Code::FUNCTION && code->optimizable();
} }
@ -208,16 +203,6 @@ static void ClearSampleBuffer() {
} }
static void ClearSampleBufferNewSpaceEntries() {
for (int i = 0; i < kSamplerWindowSize; i++) {
if (Heap::InNewSpace(sampler_window[i])) {
sampler_window[i] = NULL;
sampler_window_weight[i] = 0;
}
}
}
static int LookupSample(JSFunction* function) { static int LookupSample(JSFunction* function) {
int weight = 0; int weight = 0;
for (int i = 0; i < kSamplerWindowSize; i++) { for (int i = 0; i < kSamplerWindowSize; i++) {
@ -372,24 +357,6 @@ void RuntimeProfiler::NotifyTick() {
} }
void RuntimeProfiler::MarkCompactPrologue(bool is_compacting) {
if (is_compacting) {
// Clear all samples before mark-sweep-compact because every
// function might move.
ClearSampleBuffer();
} else {
// Clear only new space entries on mark-sweep since none of the
// old-space functions will move.
ClearSampleBufferNewSpaceEntries();
}
}
bool IsEqual(void* first, void* second) {
return first == second;
}
void RuntimeProfiler::Setup() { void RuntimeProfiler::Setup() {
ClearSampleBuffer(); ClearSampleBuffer();
// If the ticker hasn't already started, make sure to do so to get // If the ticker hasn't already started, make sure to do so to get
@ -411,13 +378,41 @@ void RuntimeProfiler::TearDown() {
} }
Object** RuntimeProfiler::SamplerWindowAddress() { int RuntimeProfiler::SamplerWindowSize() {
return sampler_window; return kSamplerWindowSize;
} }
int RuntimeProfiler::SamplerWindowSize() { // Update the pointers in the sampler window after a GC.
return kSamplerWindowSize; void RuntimeProfiler::UpdateSamplesAfterScavenge() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window[i];
if (function != NULL && Heap::InNewSpace(function)) {
MapWord map_word = HeapObject::cast(function)->map_word();
if (map_word.IsForwardingAddress()) {
sampler_window[i] = map_word.ToForwardingAddress();
} else {
sampler_window[i] = NULL;
}
}
}
}
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window[i];
if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
sampler_window[i] = NULL;
}
}
}
void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
for (int i = 0; i < kSamplerWindowSize; i++) {
visitor->VisitPointer(&sampler_window[i]);
}
} }

5
deps/v8/src/runtime-profiler.h

@ -47,9 +47,10 @@ class RuntimeProfiler : public AllStatic {
static void Reset(); static void Reset();
static void TearDown(); static void TearDown();
static void MarkCompactPrologue(bool is_compacting);
static Object** SamplerWindowAddress();
static int SamplerWindowSize(); static int SamplerWindowSize();
static void UpdateSamplesAfterScavenge();
static void RemoveDeadSamples();
static void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
}; };

932
deps/v8/src/runtime.cc

File diff suppressed because it is too large

27
deps/v8/src/runtime.h

@ -241,7 +241,7 @@ namespace internal {
F(ResolvePossiblyDirectEval, 4, 2) \ F(ResolvePossiblyDirectEval, 4, 2) \
F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \ F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
\ \
F(SetProperty, -1 /* 3 or 4 */, 1) \ F(SetProperty, -1 /* 4 or 5 */, 1) \
F(DefineOrRedefineDataProperty, 4, 1) \ F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \ F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \ F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
@ -288,12 +288,12 @@ namespace internal {
F(DeleteContextSlot, 2, 1) \ F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \ F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \ F(LoadContextSlotNoReferenceError, 2, 2) \
F(StoreContextSlot, 3, 1) \ F(StoreContextSlot, 4, 1) \
\ \
/* Declarations and initialization */ \ /* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \ F(DeclareGlobals, 4, 1) \
F(DeclareContextSlot, 4, 1) \ F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 1 or 2 */, 1) \ F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
F(InitializeConstGlobal, 2, 1) \ F(InitializeConstGlobal, 2, 1) \
F(InitializeConstContextSlot, 3, 1) \ F(InitializeConstContextSlot, 3, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \ F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
@ -376,7 +376,21 @@ namespace internal {
\ \
F(SetFlags, 1, 1) \ F(SetFlags, 1, 1) \
F(CollectGarbage, 1, 1) \ F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) F(GetHeapUsage, 0, 1) \
\
/* LiveObjectList support*/ \
F(HasLOLEnabled, 0, 1) \
F(CaptureLOL, 0, 1) \
F(DeleteLOL, 1, 1) \
F(DumpLOL, 5, 1) \
F(GetLOLObj, 1, 1) \
F(GetLOLObjId, 1, 1) \
F(GetLOLObjRetainers, 6, 1) \
F(GetLOLPath, 3, 1) \
F(InfoLOL, 2, 1) \
F(PrintLOLObj, 1, 1) \
F(ResetLOL, 0, 1) \
F(SummarizeLOL, 3, 1)
#else #else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
@ -538,7 +552,8 @@ class Runtime : public AllStatic {
Handle<Object> object, Handle<Object> object,
Handle<Object> key, Handle<Object> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attr); PropertyAttributes attr,
StrictModeFlag strict);
MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty( MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
Handle<JSObject> object, Handle<JSObject> object,

6
deps/v8/src/spaces.h

@ -2121,6 +2121,12 @@ class MapSpace : public FixedSpace {
accounting_stats_.DeallocateBytes(accounting_stats_.Size()); accounting_stats_.DeallocateBytes(accounting_stats_.Size());
accounting_stats_.AllocateBytes(new_size); accounting_stats_.AllocateBytes(new_size);
// Flush allocation watermarks.
for (Page* p = first_page_; p != top_page; p = p->next_page()) {
p->SetAllocationWatermark(p->AllocationTop());
}
top_page->SetAllocationWatermark(new_top);
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
intptr_t actual_size = 0; intptr_t actual_size = 0;

78
deps/v8/src/stub-cache.cc

@ -498,13 +498,13 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
JSObject* receiver, JSObject* receiver,
int field_index, int field_index,
Map* transition, Map* transition,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION; PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags( Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, extra_ic_state); Code::STORE_IC, type, strict_mode);
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
StoreStubCompiler compiler(extra_ic_state); StoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = { MaybeObject* maybe_code =
compiler.CompileStoreField(receiver, field_index, transition, name); compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
@ -521,13 +521,15 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
} }
MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) { MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
JSObject* receiver,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL); Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
String* name = Heap::KeyedStoreSpecialized_symbol(); String* name = Heap::KeyedStoreSpecialized_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
KeyedStoreStubCompiler compiler; KeyedStoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver); { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
} }
@ -542,7 +544,9 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
} }
MaybeObject* StubCache::ComputeKeyedStorePixelArray(JSObject* receiver) { MaybeObject* StubCache::ComputeKeyedStorePixelArray(
JSObject* receiver,
StrictModeFlag strict_mode) {
// Using NORMAL as the PropertyType for array element stores is a misuse. The // Using NORMAL as the PropertyType for array element stores is a misuse. The
// generated stub always accesses fast elements, not slow-mode fields, but // generated stub always accesses fast elements, not slow-mode fields, but
// some property type is required for the stub lookup. Note that overloading // some property type is required for the stub lookup. Note that overloading
@ -550,11 +554,11 @@ MaybeObject* StubCache::ComputeKeyedStorePixelArray(JSObject* receiver) {
// other keyed field stores. This is guaranteed to be the case since all field // other keyed field stores. This is guaranteed to be the case since all field
// keyed stores that are not array elements go through a generic builtin stub. // keyed stores that are not array elements go through a generic builtin stub.
Code::Flags flags = Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL); Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
String* name = Heap::KeyedStorePixelArray_symbol(); String* name = Heap::KeyedStorePixelArray_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
KeyedStoreStubCompiler compiler; KeyedStoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver); { MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
} }
@ -598,11 +602,13 @@ ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray( MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver, JSObject* receiver,
bool is_store) { bool is_store,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::Flags flags =
Code::ComputeMonomorphicFlags( Code::ComputeMonomorphicFlags(
is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC, is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
NORMAL); NORMAL,
strict_mode);
ExternalArrayType array_type = ExternalArrayType array_type =
ElementsKindToExternalArrayType(receiver->GetElementsKind()); ElementsKindToExternalArrayType(receiver->GetElementsKind());
String* name = String* name =
@ -615,9 +621,9 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
Object* code = map->FindInCodeCache(name, flags); Object* code = map->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
ExternalArrayStubCompiler compiler; ExternalArrayStubCompiler compiler;
{ MaybeObject* maybe_code = { MaybeObject* maybe_code = is_store
is_store ? compiler.CompileKeyedStoreStub(array_type, flags) : ? compiler.CompileKeyedStoreStub(array_type, flags)
compiler.CompileKeyedLoadStub(array_type, flags); : compiler.CompileKeyedLoadStub(array_type, flags);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
} }
if (is_store) { if (is_store) {
@ -637,8 +643,8 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
} }
MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) { MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
return Builtins::builtin(extra_ic_state == StoreIC::kStoreICStrict return Builtins::builtin((strict_mode == kStrictMode)
? Builtins::StoreIC_Normal_Strict ? Builtins::StoreIC_Normal_Strict
: Builtins::StoreIC_Normal); : Builtins::StoreIC_Normal);
} }
@ -647,12 +653,12 @@ MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) {
MaybeObject* StubCache::ComputeStoreGlobal(String* name, MaybeObject* StubCache::ComputeStoreGlobal(String* name,
GlobalObject* receiver, GlobalObject* receiver,
JSGlobalPropertyCell* cell, JSGlobalPropertyCell* cell,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags( Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, NORMAL, extra_ic_state); Code::STORE_IC, NORMAL, strict_mode);
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
StoreStubCompiler compiler(extra_ic_state); StoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = { MaybeObject* maybe_code =
compiler.CompileStoreGlobal(receiver, cell, name); compiler.CompileStoreGlobal(receiver, cell, name);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
@ -673,13 +679,13 @@ MaybeObject* StubCache::ComputeStoreCallback(
String* name, String* name,
JSObject* receiver, JSObject* receiver,
AccessorInfo* callback, AccessorInfo* callback,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0); ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags( Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, CALLBACKS, extra_ic_state); Code::STORE_IC, CALLBACKS, strict_mode);
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
StoreStubCompiler compiler(extra_ic_state); StoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = { MaybeObject* maybe_code =
compiler.CompileStoreCallback(receiver, callback, name); compiler.CompileStoreCallback(receiver, callback, name);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
@ -699,12 +705,12 @@ MaybeObject* StubCache::ComputeStoreCallback(
MaybeObject* StubCache::ComputeStoreInterceptor( MaybeObject* StubCache::ComputeStoreInterceptor(
String* name, String* name,
JSObject* receiver, JSObject* receiver,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags( Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, INTERCEPTOR, extra_ic_state); Code::STORE_IC, INTERCEPTOR, strict_mode);
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
StoreStubCompiler compiler(extra_ic_state); StoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = { MaybeObject* maybe_code =
compiler.CompileStoreInterceptor(receiver, name); compiler.CompileStoreInterceptor(receiver, name);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
@ -724,12 +730,14 @@ MaybeObject* StubCache::ComputeStoreInterceptor(
MaybeObject* StubCache::ComputeKeyedStoreField(String* name, MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
JSObject* receiver, JSObject* receiver,
int field_index, int field_index,
Map* transition) { Map* transition,
StrictModeFlag strict_mode) {
PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION; PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
Object* code = receiver->map()->FindInCodeCache(name, flags); Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) { if (code->IsUndefined()) {
KeyedStoreStubCompiler compiler; KeyedStoreStubCompiler compiler(strict_mode);
{ MaybeObject* maybe_code = { MaybeObject* maybe_code =
compiler.CompileStoreField(receiver, field_index, transition, name); compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code; if (!maybe_code->ToObject(&code)) return maybe_code;
@ -1417,12 +1425,17 @@ MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) {
MaybeObject* StoreInterceptorProperty(Arguments args) { MaybeObject* StoreInterceptorProperty(Arguments args) {
ASSERT(args.length() == 4);
JSObject* recv = JSObject::cast(args[0]); JSObject* recv = JSObject::cast(args[0]);
String* name = String::cast(args[1]); String* name = String::cast(args[1]);
Object* value = args[2]; Object* value = args[2];
StrictModeFlag strict =
static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
ASSERT(strict == kStrictMode || strict == kNonStrictMode);
ASSERT(recv->HasNamedInterceptor()); ASSERT(recv->HasNamedInterceptor());
PropertyAttributes attr = NONE; PropertyAttributes attr = NONE;
MaybeObject* result = recv->SetPropertyWithInterceptor(name, value, attr); MaybeObject* result = recv->SetPropertyWithInterceptor(
name, value, attr, strict);
return result; return result;
} }
@ -1675,8 +1688,8 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) { MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type, Code::Flags flags = Code::ComputeMonomorphicFlags(
extra_ic_state_); Code::STORE_IC, type, strict_mode_);
MaybeObject* result = GetCodeWithFlags(flags, name); MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) { if (!result->IsFailure()) {
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
@ -1691,7 +1704,8 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) { MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type); Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode_);
MaybeObject* result = GetCodeWithFlags(flags, name); MaybeObject* result = GetCodeWithFlags(flags, name);
if (!result->IsFailure()) { if (!result->IsFailure()) {
PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,

33
deps/v8/src/stub-cache.h

@ -143,27 +143,27 @@ class StubCache : public AllStatic {
JSObject* receiver, JSObject* receiver,
int field_index, int field_index,
Map* transition, Map* transition,
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeStoreNormal( MUST_USE_RESULT static MaybeObject* ComputeStoreNormal(
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal( MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal(
String* name, String* name,
GlobalObject* receiver, GlobalObject* receiver,
JSGlobalPropertyCell* cell, JSGlobalPropertyCell* cell,
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeStoreCallback( MUST_USE_RESULT static MaybeObject* ComputeStoreCallback(
String* name, String* name,
JSObject* receiver, JSObject* receiver,
AccessorInfo* callback, AccessorInfo* callback,
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor( MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor(
String* name, String* name,
JSObject* receiver, JSObject* receiver,
Code::ExtraICState extra_ic_state); StrictModeFlag strict_mode);
// --- // ---
@ -171,17 +171,21 @@ class StubCache : public AllStatic {
String* name, String* name,
JSObject* receiver, JSObject* receiver,
int field_index, int field_index,
Map* transition = NULL); Map* transition,
StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized( MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver); JSObject* receiver,
StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray( MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray(
JSObject* receiver); JSObject* receiver,
StrictModeFlag strict_mode);
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray( MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
JSObject* receiver, JSObject* receiver,
bool is_store); bool is_store,
StrictModeFlag strict_mode);
// --- // ---
@ -628,8 +632,8 @@ class KeyedLoadStubCompiler: public StubCompiler {
class StoreStubCompiler: public StubCompiler { class StoreStubCompiler: public StubCompiler {
public: public:
explicit StoreStubCompiler(Code::ExtraICState extra_ic_state) explicit StoreStubCompiler(StrictModeFlag strict_mode)
: extra_ic_state_(extra_ic_state) { } : strict_mode_(strict_mode) { }
MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object, MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
int index, int index,
@ -649,12 +653,15 @@ class StoreStubCompiler: public StubCompiler {
private: private:
MaybeObject* GetCode(PropertyType type, String* name); MaybeObject* GetCode(PropertyType type, String* name);
Code::ExtraICState extra_ic_state_; StrictModeFlag strict_mode_;
}; };
class KeyedStoreStubCompiler: public StubCompiler { class KeyedStoreStubCompiler: public StubCompiler {
public: public:
explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
: strict_mode_(strict_mode) { }
MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object, MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
int index, int index,
Map* transition, Map* transition,
@ -666,6 +673,8 @@ class KeyedStoreStubCompiler: public StubCompiler {
private: private:
MaybeObject* GetCode(PropertyType type, String* name); MaybeObject* GetCode(PropertyType type, String* name);
StrictModeFlag strict_mode_;
}; };

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3 #define MAJOR_VERSION 3
#define MINOR_VERSION 1 #define MINOR_VERSION 1
#define BUILD_NUMBER 6 #define BUILD_NUMBER 8
#define PATCH_LEVEL 0 #define PATCH_LEVEL 0
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false

6
deps/v8/src/virtual-frame-heavy-inl.h

@ -82,10 +82,8 @@ void VirtualFrame::Push(Register reg, TypeInfo info) {
} }
void VirtualFrame::Push(Handle<Object> value) { bool VirtualFrame::ConstantPoolOverflowed() {
FrameElement element = return FrameElement::ConstantPoolOverflowed();
FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
elements_.Add(element);
} }

2
deps/v8/src/x64/assembler-x64-inl.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

24
deps/v8/src/x64/assembler-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -2995,6 +2995,28 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
} }
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x54);
emit_sse_operand(dst, src);
}
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x66);
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0x56);
emit_sse_operand(dst, src);
}
void Assembler::xorpd(XMMRegister dst, XMMRegister src) { void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

4
deps/v8/src/x64/assembler-x64.h

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// A lightweight X64 Assembler. // A lightweight X64 Assembler.
@ -1284,6 +1284,8 @@ class Assembler : public Malloced {
void mulsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src);
void divsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
void orpd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src);

2
deps/v8/src/x64/builtins-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

321
deps/v8/src/x64/code-stubs-x64.cc

@ -1506,12 +1506,25 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input on stack: // TAGGED case:
// Input:
// rsp[8]: argument (should be number). // rsp[8]: argument (should be number).
// rsp[0]: return address. // rsp[0]: return address.
// Output:
// rax: tagged double result.
// UNTAGGED case:
// Input::
// rsp[0]: return address.
// xmm1: untagged double input argument
// Output:
// xmm1: untagged double result.
Label runtime_call; Label runtime_call;
Label runtime_call_clear_stack; Label runtime_call_clear_stack;
Label input_not_smi; Label skip_cache;
const bool tagged = (argument_type_ == TAGGED);
if (tagged) {
NearLabel input_not_smi;
NearLabel loaded; NearLabel loaded;
// Test that rax is a number. // Test that rax is a number.
__ movq(rax, Operand(rsp, kPointerSize)); __ movq(rax, Operand(rsp, kPointerSize));
@ -1519,18 +1532,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Input is a smi. Untag and load it onto the FPU stack. // Input is a smi. Untag and load it onto the FPU stack.
// Then load the bits of the double into rbx. // Then load the bits of the double into rbx.
__ SmiToInteger32(rax, rax); __ SmiToInteger32(rax, rax);
__ subq(rsp, Immediate(kPointerSize)); __ subq(rsp, Immediate(kDoubleSize));
__ cvtlsi2sd(xmm1, rax); __ cvtlsi2sd(xmm1, rax);
__ movsd(Operand(rsp, 0), xmm1); __ movsd(Operand(rsp, 0), xmm1);
__ movq(rbx, xmm1); __ movq(rbx, xmm1);
__ movq(rdx, xmm1); __ movq(rdx, xmm1);
__ fld_d(Operand(rsp, 0)); __ fld_d(Operand(rsp, 0));
__ addq(rsp, Immediate(kPointerSize)); __ addq(rsp, Immediate(kDoubleSize));
__ jmp(&loaded); __ jmp(&loaded);
__ bind(&input_not_smi); __ bind(&input_not_smi);
// Check if input is a HeapNumber. // Check if input is a HeapNumber.
__ Move(rbx, Factory::heap_number_map()); __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
__ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ j(not_equal, &runtime_call); __ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its // Input is a HeapNumber. Push it on the FPU stack and load its
@ -1538,8 +1551,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
__ movq(rdx, rbx); __ movq(rdx, rbx);
__ bind(&loaded); __ bind(&loaded);
// ST[0] == double value } else { // UNTAGGED.
__ movq(rbx, xmm1);
__ movq(rdx, xmm1);
}
// ST[0] == double value, if TAGGED.
// rbx = bits of double value. // rbx = bits of double value.
// rdx = also bits of double value. // rdx = also bits of double value.
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
@ -1571,7 +1590,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// rax points to the cache for the type type_. // rax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime. // If NULL, the cache hasn't been initialized yet, so go through runtime.
__ testq(rax, rax); __ testq(rax, rax);
__ j(zero, &runtime_call_clear_stack); __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
#ifdef DEBUG #ifdef DEBUG
// Check that the layout of cache elements match expectations. // Check that the layout of cache elements match expectations.
{ // NOLINT - doesn't like a single brace on a line. { // NOLINT - doesn't like a single brace on a line.
@ -1597,30 +1616,70 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cache_miss); __ j(not_equal, &cache_miss);
// Cache hit! // Cache hit!
__ movq(rax, Operand(rcx, 2 * kIntSize)); __ movq(rax, Operand(rcx, 2 * kIntSize));
if (tagged) {
__ fstp(0); // Clear FPU stack. __ fstp(0); // Clear FPU stack.
__ ret(kPointerSize); __ ret(kPointerSize);
} else { // UNTAGGED.
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
__ bind(&cache_miss); __ bind(&cache_miss);
// Update cache with new value. // Update cache with new value.
Label nan_result; if (tagged) {
GenerateOperation(masm, &nan_result);
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
} else { // UNTAGGED.
__ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
}
GenerateOperation(masm);
__ movq(Operand(rcx, 0), rbx); __ movq(Operand(rcx, 0), rbx);
__ movq(Operand(rcx, 2 * kIntSize), rax); __ movq(Operand(rcx, 2 * kIntSize), rax);
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
if (tagged) {
__ ret(kPointerSize); __ ret(kPointerSize);
} else { // UNTAGGED.
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ subq(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), xmm1);
__ fld_d(Operand(rsp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(rsp, 0));
__ movsd(xmm1, Operand(rsp, 0));
__ addq(rsp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
__ EnterInternalFrame();
// Allocate an unused object bigger than a HeapNumber.
__ Push(Smi::FromInt(2 * kDoubleSize));
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
__ LeaveInternalFrame();
__ Ret();
}
// Call runtime, doing whatever allocation and cleanup is necessary.
if (tagged) {
__ bind(&runtime_call_clear_stack); __ bind(&runtime_call_clear_stack);
__ fstp(0); __ fstp(0);
__ bind(&runtime_call); __ bind(&runtime_call);
__ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
} else { // UNTAGGED.
__ bind(&nan_result); __ bind(&runtime_call_clear_stack);
__ fstp(0); // Remove argument from FPU stack. __ bind(&runtime_call);
__ LoadRoot(rax, Heap::kNanValueRootIndex); __ AllocateHeapNumber(rax, rdi, &skip_cache);
__ movq(Operand(rcx, 0), rbx); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
__ movq(Operand(rcx, 2 * kIntSize), rax); __ EnterInternalFrame();
__ ret(kPointerSize); __ push(rax);
__ CallRuntime(RuntimeFunction(), 1);
__ LeaveInternalFrame();
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ Ret();
}
} }
@ -1637,9 +1696,9 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
} }
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm, void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
Label* on_nan_result) {
// Registers: // Registers:
// rax: Newly allocated HeapNumber, which must be preserved.
// rbx: Bits of input double. Must be preserved. // rbx: Bits of input double. Must be preserved.
// rcx: Pointer to cache entry. Must be preserved. // rcx: Pointer to cache entry. Must be preserved.
// st(0): Input double // st(0): Input double
@ -1661,9 +1720,18 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
__ j(below, &in_range); __ j(below, &in_range);
// Check for infinity and NaN. Both return NaN for sin. // Check for infinity and NaN. Both return NaN for sin.
__ cmpl(rdi, Immediate(0x7ff)); __ cmpl(rdi, Immediate(0x7ff));
__ j(equal, on_nan_result); NearLabel non_nan_result;
__ j(not_equal, &non_nan_result);
// Input is +/-Infinity or NaN. Result is NaN.
__ fstp(0);
__ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&non_nan_result);
// Use fpmod to restrict argument to the range +/-2*PI. // Use fpmod to restrict argument to the range +/-2*PI.
__ movq(rdi, rax); // Save rax before using fnstsw_ax.
__ fldpi(); __ fldpi();
__ fadd(0); __ fadd(0);
__ fld(1); __ fld(1);
@ -1696,6 +1764,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
// FPU Stack: input % 2*pi, 2*pi, // FPU Stack: input % 2*pi, 2*pi,
__ fstp(0); __ fstp(0);
// FPU Stack: input % 2*pi // FPU Stack: input % 2*pi
__ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
__ bind(&in_range); __ bind(&in_range);
switch (type_) { switch (type_) {
case TranscendentalCache::SIN: case TranscendentalCache::SIN:
@ -1948,8 +2017,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
__ AbortIfSmi(rax); __ AbortIfSmi(rax);
} }
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow); __ j(not_equal, &slow);
// Operand is a float, negate its value by flipping sign bit. // Operand is a float, negate its value by flipping sign bit.
__ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
@ -1978,8 +2047,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
} }
// Check if the operand is a heap number. // Check if the operand is a heap number.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex); Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &slow); __ j(not_equal, &slow);
// Convert the heap number in rax to an untagged integer in rcx. // Convert the heap number in rax to an untagged integer in rcx.
@ -2012,6 +2081,157 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
} }
void MathPowStub::Generate(MacroAssembler* masm) {
// Registers are used as follows:
// rdx = base
// rax = exponent
// rcx = temporary, result
Label allocate_return, call_runtime;
// Load input parameters.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// Save 1 in xmm3 - we need this several times later on.
__ movl(rcx, Immediate(1));
__ cvtlsi2sd(xmm3, rcx);
Label exponent_nonsmi;
Label base_nonsmi;
// If the exponent is a heap number go to that specific case.
__ JumpIfNotSmi(rax, &exponent_nonsmi);
__ JumpIfNotSmi(rdx, &base_nonsmi);
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
__ jmp(&powi);
// Exponent is a smi and base is a heapnumber.
__ bind(&base_nonsmi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Optimized version of pow if exponent is a smi.
// xmm0 contains the base.
__ bind(&powi);
__ SmiToInteger32(rax, rax);
// Save exponent in base as we need to check if exponent is negative later.
// We know that base and exponent are in different registers.
__ movq(rdx, rax);
// Get absolute value of exponent.
NearLabel no_neg;
__ cmpl(rax, Immediate(0));
__ j(greater_equal, &no_neg);
__ negl(rax);
__ bind(&no_neg);
// Load xmm1 with 1.
__ movsd(xmm1, xmm3);
NearLabel while_true;
NearLabel no_multiply;
__ bind(&while_true);
__ shrl(rax, Immediate(1));
__ j(not_carry, &no_multiply);
__ mulsd(xmm1, xmm0);
__ bind(&no_multiply);
__ mulsd(xmm0, xmm0);
__ j(not_zero, &while_true);
// Base has the original value of the exponent - if the exponent is
// negative return 1/result.
__ testl(rdx, rdx);
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ divsd(xmm3, xmm1);
__ movsd(xmm1, xmm3);
__ xorpd(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
__ jmp(&allocate_return);
// Exponent (or both) is a heapnumber - no matter what we should now work
// on doubles.
__ bind(&exponent_nonsmi);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
// Test if exponent is nan.
__ ucomisd(xmm1, xmm1);
__ j(parity_even, &call_runtime);
NearLabel base_not_smi;
NearLabel handle_special_cases;
__ JumpIfNotSmi(rdx, &base_not_smi);
__ SmiToInteger32(rdx, rdx);
__ cvtlsi2sd(xmm0, rdx);
__ jmp(&handle_special_cases);
__ bind(&base_not_smi);
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &call_runtime);
__ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// base is in xmm0 and exponent is in xmm1.
__ bind(&handle_special_cases);
NearLabel not_minus_half;
// Test for -0.5.
// Load xmm2 with -0.5.
__ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
__ movq(xmm2, rcx);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &not_minus_half);
// Calculates reciprocal of square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorpd(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ divsd(xmm3, xmm1);
__ movsd(xmm1, xmm3);
__ jmp(&allocate_return);
// Test for 0.5.
__ bind(&not_minus_half);
// Load xmm2 with 0.5.
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
__ ucomisd(xmm2, xmm1);
__ j(not_equal, &call_runtime);
// Calculates square root.
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
__ xorpd(xmm1, xmm1);
__ addsd(xmm1, xmm0);
__ sqrtsd(xmm1, xmm1);
__ bind(&allocate_return);
__ AllocateHeapNumber(rcx, rax, &call_runtime);
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
__ movq(rax, rcx);
__ ret(2 * kPointerSize);
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax. // The key is in rdx and the parameter count is in rax.
@ -4613,6 +4833,61 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kStringCompare, 2, 1); __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
} }
void StringCharAtStub::Generate(MacroAssembler* masm) {
// Expects two arguments (object, index) on the stack:
// Stack frame on entry.
// rsp[0]: return address
// rsp[8]: index
// rsp[16]: object
Register object = rbx;
Register index = rax;
Register scratch1 = rcx;
Register scratch2 = rdx;
Register result = rax;
__ pop(scratch1); // Return address.
__ pop(index);
__ pop(object);
__ push(scratch1);
Label need_conversion;
Label index_out_of_range;
Label done;
StringCharAtGenerator generator(object,
index,
scratch1,
scratch2,
result,
&need_conversion,
&need_conversion,
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm);
__ jmp(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ Move(result, Factory::empty_string());
__ jmp(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
__ Move(result, Smi::FromInt(0));
__ jmp(&done);
StubRuntimeCallHelper call_helper;
generator.GenerateSlow(masm, call_helper);
__ bind(&done);
__ ret(0);
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS); ASSERT(state_ == CompareIC::SMIS);
NearLabel miss; NearLabel miss;

18
deps/v8/src/x64/code-stubs-x64.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -39,15 +39,23 @@ namespace internal {
// TranscendentalCache runtime function. // TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub { class TranscendentalCacheStub: public CodeStub {
public: public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type) enum ArgumentType {
: type_(type) {} TAGGED = 0,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
explicit TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) {}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
TranscendentalCache::Type type_; TranscendentalCache::Type type_;
ArgumentType argument_type_;
Major MajorKey() { return TranscendentalCache; } Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; } int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction(); Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm, Label* on_nan_result); void GenerateOperation(MacroAssembler* masm);
}; };

2
deps/v8/src/x64/codegen-x64-inl.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

38
deps/v8/src/x64/codegen-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -2747,7 +2747,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
frame_->EmitPush(rsi); // The context is the first argument. frame_->EmitPush(rsi); // The context is the first argument.
frame_->EmitPush(kScratchRegister); frame_->EmitPush(kScratchRegister);
frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3); frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored. // Return value is ignored.
} }
@ -4605,7 +4606,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// by initialization. // by initialization.
value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else { } else {
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3); frame_->Push(Smi::FromInt(strict_mode_flag()));
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
} }
// Storing a variable must keep the (new) value on the expression // Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling chained assignment // stack. This is necessary for compiling chained assignment
@ -4914,8 +4916,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->key()); Load(property->key());
Load(property->value()); Load(property->value());
if (property->emit_store()) { if (property->emit_store()) {
frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
// Ignore the result. // Ignore the result.
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3); Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
} else { } else {
frame_->Drop(3); frame_->Drop(3);
} }
@ -7030,7 +7033,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1); ASSERT_EQ(args->length(), 1);
Load(args->at(0)); Load(args->at(0));
TranscendentalCacheStub stub(TranscendentalCache::SIN); TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1); Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result); frame_->Push(&result);
} }
@ -7039,7 +7043,8 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1); ASSERT_EQ(args->length(), 1);
Load(args->at(0)); Load(args->at(0));
TranscendentalCacheStub stub(TranscendentalCache::COS); TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1); Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result); frame_->Push(&result);
} }
@ -7048,7 +7053,8 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) { void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
ASSERT_EQ(args->length(), 1); ASSERT_EQ(args->length(), 1);
Load(args->at(0)); Load(args->at(0));
TranscendentalCacheStub stub(TranscendentalCache::LOG); TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
Result result = frame_->CallStub(&stub, 1); Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result); frame_->Push(&result);
} }
@ -8072,8 +8078,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
public: public:
DeferredReferenceSetKeyedValue(Register value, DeferredReferenceSetKeyedValue(Register value,
Register key, Register key,
Register receiver) Register receiver,
: value_(value), key_(key), receiver_(receiver) { StrictModeFlag strict_mode)
: value_(value),
key_(key),
receiver_(receiver),
strict_mode_(strict_mode) {
set_comment("[ DeferredReferenceSetKeyedValue"); set_comment("[ DeferredReferenceSetKeyedValue");
} }
@ -8086,6 +8096,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
Register key_; Register key_;
Register receiver_; Register receiver_;
Label patch_site_; Label patch_site_;
StrictModeFlag strict_mode_;
}; };
@ -8137,7 +8148,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
} }
// Call the IC stub. // Call the IC stub.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET); __ Call(ic, RelocInfo::CODE_TARGET);
// The delta from the start of the map-compare instructions (initial movq) // The delta from the start of the map-compare instructions (initial movq)
// to the test instruction. We use masm_-> directly here instead of the // to the test instruction. We use masm_-> directly here instead of the
@ -8478,7 +8491,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
DeferredReferenceSetKeyedValue* deferred = DeferredReferenceSetKeyedValue* deferred =
new DeferredReferenceSetKeyedValue(result.reg(), new DeferredReferenceSetKeyedValue(result.reg(),
key.reg(), key.reg(),
receiver.reg()); receiver.reg(),
strict_mode_flag());
// Check that the receiver is not a smi. // Check that the receiver is not a smi.
__ JumpIfSmi(receiver.reg(), deferred->entry_label()); __ JumpIfSmi(receiver.reg(), deferred->entry_label());
@ -8540,7 +8554,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
deferred->BindExit(); deferred->BindExit();
} else { } else {
result = frame()->CallKeyedStoreIC(); result = frame()->CallKeyedStoreIC(strict_mode_flag());
// Make sure that we do not have a test instruction after the // Make sure that we do not have a test instruction after the
// call. A test instruction after the call is used to // call. A test instruction after the call is used to
// indicate that we have generated an inline version of the // indicate that we have generated an inline version of the

2
deps/v8/src/x64/codegen-x64.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

2
deps/v8/src/x64/cpu-x64.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

2
deps/v8/src/x64/debug-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

146
deps/v8/src/x64/deoptimizer-x64.cc

@ -224,7 +224,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
// ok: // ok:
// //
ASSERT(*(call_target_address - 3) == 0x73 && // jae ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x05 && // offset *(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call *(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop *(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop *(call_target_address - 2) = 0x90; // nop
@ -245,14 +245,154 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
*(call_target_address - 2) == 0x90 && // nop *(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call *(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae *(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x05; // offset *(call_target_address - 2) = 0x07; // offset
Assembler::set_target_address_at(call_target_address, Assembler::set_target_address_at(call_target_address,
check_code->entry()); check_code->entry());
} }
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
// Read the number of frames.
value = it.Next();
if (value == 1) return i;
}
}
UNREACHABLE();
return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() { void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED(); DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
ASSERT(bailout_id_ == ast_id);
int bailout_id = LookupBailoutId(data, ast_id);
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
USE(function);
ASSERT(function == function_);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
output_frame_size);
}
// There's only one output frame in the OSR case.
output_count_ = 1;
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
unsigned output_offset = output_frame_size - kPointerSize;
int parameter_count = function_->shared()->formal_parameter_count() + 1;
for (int i = 0; i < parameter_count; ++i) {
output_[0]->SetFrameSlot(output_offset, 0);
output_offset -= kPointerSize;
}
// Translate the incoming parameters. This may overwrite some of the
// incoming argument slots we've just cleared.
int input_offset = input_frame_size - kPointerSize;
bool ok = true;
int limit = input_offset - (parameter_count * kPointerSize);
while (ok && input_offset > limit) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = 0; ok && i < 4; i++) {
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
output_offset,
input_value,
input_offset);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
}
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// If translation of any command failed, continue using the input frame.
if (!ok) {
delete output_[0];
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
intptr_t pc = reinterpret_cast<intptr_t>(
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
}
} }

12
deps/v8/src/x64/disasm-x64.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -1040,14 +1040,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer(", %s", NameOfXMMRegister(regop)); AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else { } else {
const char* mnemonic = "?"; const char* mnemonic = "?";
if (opcode == 0x57) { if (opcode == 0x50) {
mnemonic = "movmskpd";
} else if (opcode == 0x54) {
mnemonic = "andpd";
} else if (opcode == 0x56) {
mnemonic = "orpd";
} else if (opcode == 0x57) {
mnemonic = "xorpd"; mnemonic = "xorpd";
} else if (opcode == 0x2E) { } else if (opcode == 0x2E) {
mnemonic = "ucomisd"; mnemonic = "ucomisd";
} else if (opcode == 0x2F) { } else if (opcode == 0x2F) {
mnemonic = "comisd"; mnemonic = "comisd";
} else if (opcode == 0x50) {
mnemonic = "movmskpd";
} else { } else {
UnimplementedInstruction(); UnimplementedInstruction();
} }

2
deps/v8/src/x64/frames-x64.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

2
deps/v8/src/x64/frames-x64.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

387
deps/v8/src/x64/full-codegen-x64.cc

@ -207,24 +207,24 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, rcx, rbx, rdx); Move(dot_arguments_slot, rcx, rbx, rdx);
} }
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ Declarations");
scope()->VisitIllegalRedeclaration(this);
} else {
{ Comment cmnt(masm_, "[ Declarations"); { Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a // For named function expressions, declare the function name as a
// constant. // constant.
if (scope()->is_function_scope() && scope()->function() != NULL) { if (scope()->is_function_scope() && scope()->function() != NULL) {
EmitDeclaration(scope()->function(), Variable::CONST, NULL); EmitDeclaration(scope()->function(), Variable::CONST, NULL);
} }
// Visit all the explicit declarations unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
scope()->VisitIllegalRedeclaration(this);
} else {
VisitDeclarations(scope()->declarations()); VisitDeclarations(scope()->declarations());
} }
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
{ Comment cmnt(masm_, "[ Stack check"); { Comment cmnt(masm_, "[ Stack check");
PrepareForBailout(info->function(), NO_REGISTERS); PrepareForBailout(info->function(), NO_REGISTERS);
@ -241,9 +241,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
VisitStatements(function()->body()); VisitStatements(function()->body());
ASSERT(loop_depth() == 0); ASSERT(loop_depth() == 0);
} }
}
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;"); { Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the body.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex); __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
EmitReturnSequence(); EmitReturnSequence();
} }
@ -267,6 +269,13 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
// the deoptimization input data found in the optimized code. // the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId()); RecordStackCheck(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
// in a test instruction after the call so we can extract it from the OSR
// builtin.
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
__ bind(&ok); __ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR // Record a mapping of the OSR id to this PC. This is used if the OSR
@ -318,13 +327,6 @@ void FullCodeGenerator::EmitReturnSequence() {
} }
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
Token::Value op, Expression* left, Expression* right) {
ASSERT(ShouldInlineSmiCase(op));
return kNoConstants;
}
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const { void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
} }
@ -543,7 +545,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
__ j(equal, if_true); __ j(equal, if_true);
__ CompareRoot(result_register(), Heap::kFalseValueRootIndex); __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
__ j(equal, if_false); __ j(equal, if_false);
ASSERT_EQ(0, kSmiTag); STATIC_ASSERT(kSmiTag == 0);
__ SmiCompare(result_register(), Smi::FromInt(0)); __ SmiCompare(result_register(), Smi::FromInt(0));
__ j(equal, if_false); __ j(equal, if_false);
Condition is_smi = masm_->CheckSmi(result_register()); Condition is_smi = masm_->CheckSmi(result_register());
@ -733,7 +735,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
prop->key()->AsLiteral()->handle()->IsSmi()); prop->key()->AsLiteral()->handle()->IsSmi());
__ Move(rcx, prop->key()->AsLiteral()->handle()); __ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(is_strict()
? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
} }
} }
@ -750,7 +754,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ push(rsi); // The context is the first argument. __ push(rsi); // The context is the first argument.
__ Push(pairs); __ Push(pairs);
__ Push(Smi::FromInt(is_eval() ? 1 : 0)); __ Push(Smi::FromInt(is_eval() ? 1 : 0));
__ CallRuntime(Runtime::kDeclareGlobals, 3); __ Push(Smi::FromInt(strict_mode_flag()));
__ CallRuntime(Runtime::kDeclareGlobals, 4);
// Return value is ignored. // Return value is ignored.
} }
@ -851,7 +856,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
VisitForAccumulatorValue(stmt->enumerable()); VisitForAccumulatorValue(stmt->enumerable());
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit); __ j(equal, &exit);
__ CompareRoot(rax, Heap::kNullValueRootIndex); Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ cmpq(rax, null_value);
__ j(equal, &exit); __ j(equal, &exit);
// Convert the object to a JS object. // Convert the object to a JS object.
@ -865,12 +872,61 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&done_convert); __ bind(&done_convert);
__ push(rax); __ push(rax);
// BUG(867): Check cache validity in generated code. This is a fast // Check cache validity in generated code. This is a fast case for
// case for the JSObject::IsSimpleEnum cache validity checks. If we // the JSObject::IsSimpleEnum cache validity checks. If we cannot
// cannot guarantee cache validity, call the runtime system to check // guarantee cache validity, call the runtime system to check cache
// cache validity or get the property names in a fixed array. // validity or get the property names in a fixed array.
Label next, call_runtime;
Register empty_fixed_array_value = r8;
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Register empty_descriptor_array_value = r9;
__ LoadRoot(empty_descriptor_array_value,
Heap::kEmptyDescriptorArrayRootIndex);
__ movq(rcx, rax);
__ bind(&next);
// Check that there are no elements. Register rcx contains the
// current JS object we've reached through the prototype chain.
__ cmpq(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
__ j(not_equal, &call_runtime);
// Check that instance descriptors are not empty so that we can
// check for an enum cache. Leave the map in rbx for the subsequent
// prototype load.
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
__ cmpq(rdx, empty_descriptor_array_value);
__ j(equal, &call_runtime);
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration
// index field does not contain a smi.
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
__ JumpIfSmi(rdx, &call_runtime);
// For all objects but the receiver, check that the cache is empty.
NearLabel check_prototype;
__ cmpq(rcx, rax);
__ j(equal, &check_prototype);
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmpq(rdx, empty_fixed_array_value);
__ j(not_equal, &call_runtime);
// Load the prototype from the map and loop if non-null.
__ bind(&check_prototype);
__ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
__ cmpq(rcx, null_value);
__ j(not_equal, &next);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
NearLabel use_cache;
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache);
// Get the set of properties to enumerate. // Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(rax); // Duplicate the enumerable object on the stack. __ push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1); __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
@ -883,6 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(not_equal, &fixed_array); __ j(not_equal, &fixed_array);
// We got a map in register rax. Get the enumeration cache from it. // We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
__ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset)); __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
@ -971,8 +1028,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) { bool pretenure) {
// Use the fast case closure allocation code that allocates in new // Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning. // space for nested functions that don't need literals cloning. If
if (scope()->is_function_scope() && // we're running with the --always-opt or the --prepare-always-opt
// flag, we need to use the runtime function so that the new function
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
scope()->is_function_scope() &&
info->num_literals() == 0 && info->num_literals() == 0 &&
!pretenure) { !pretenure) {
FastNewClosureStub stub; FastNewClosureStub stub;
@ -1082,8 +1145,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
// Check that last extension is NULL. // Check that last extension is NULL.
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow); __ j(not_equal, slow);
__ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
return ContextOperand(temp, slot->index()); // This function is used only for loads, not stores, so it's safe to
// return an rsi-based operand (the write barrier cannot be allowed to
// destroy the rsi register).
return ContextOperand(context, slot->index());
} }
@ -1333,7 +1399,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key); VisitForStackValue(key);
VisitForStackValue(value); VisitForStackValue(value);
if (property->emit_store()) { if (property->emit_store()) {
__ CallRuntime(Runtime::kSetProperty, 3); __ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ CallRuntime(Runtime::kSetProperty, 4);
} else { } else {
__ Drop(3); __ Drop(3);
} }
@ -1509,14 +1576,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} }
Token::Value op = expr->binary_op(); Token::Value op = expr->binary_op();
ConstantOperand constant = ShouldInlineSmiCase(op)
? GetConstantOperand(op, expr->target(), expr->value())
: kNoConstants;
ASSERT(constant == kRightConstant || constant == kNoConstants);
if (constant == kNoConstants) {
__ push(rax); // Left operand goes on the stack. __ push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value()); VisitForAccumulatorValue(expr->value());
}
OverwriteMode mode = expr->value()->ResultOverwriteAllowed() OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT ? OVERWRITE_RIGHT
@ -1528,8 +1589,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
op, op,
mode, mode,
expr->target(), expr->target(),
expr->value(), expr->value());
constant);
} else { } else {
EmitBinaryOp(op, mode); EmitBinaryOp(op, mode);
} }
@ -1580,10 +1640,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op, Token::Value op,
OverwriteMode mode, OverwriteMode mode,
Expression* left, Expression* left,
Expression* right, Expression* right) {
ConstantOperand constant) {
ASSERT(constant == kNoConstants); // Only handled case.
// Do combined smi check of the operands. Left operand is on the // Do combined smi check of the operands. Left operand is on the
// stack (popped into rdx). Right operand is in rax but moved into // stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier. // rcx to make the shifts easier.
@ -1680,7 +1737,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ movq(rdx, rax); __ movq(rdx, rax);
__ pop(rax); // Restore value. __ pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->handle()); __ Move(rcx, prop->key()->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
break; break;
} }
@ -1701,7 +1760,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
__ pop(rdx); __ pop(rdx);
} }
__ pop(rax); // Restore value. __ pop(rax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
break; break;
} }
@ -1730,57 +1791,76 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize)); : Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { } else if (op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization // Like var declarations, const declarations are hoisted to function
// of const variables. Const assignments are simply skipped. // scope. However, unlike var initializers, const initializers are able
Label done; // to drill a hole to that function context, even from inside a 'with'
// context. We thus bypass the normal static scope lookup.
Slot* slot = var->AsSlot(); Slot* slot = var->AsSlot();
Label skip;
switch (slot->type()) { switch (slot->type()) {
case Slot::PARAMETER: case Slot::PARAMETER:
// No const parameters.
UNREACHABLE();
break;
case Slot::LOCAL: case Slot::LOCAL:
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ movq(rdx, Operand(rbp, SlotOffset(slot))); __ movq(rdx, Operand(rbp, SlotOffset(slot)));
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex); __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done); __ j(not_equal, &skip);
__ movq(Operand(rbp, SlotOffset(slot)), rax);
break;
case Slot::CONTEXT: {
__ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
__ movq(rdx, ContextOperand(rcx, slot->index()));
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
__ movq(ContextOperand(rcx, slot->index()), rax);
int offset = Context::SlotOffset(slot->index());
__ movq(rdx, rax); // Preserve the stored value in eax.
__ RecordWrite(rcx, offset, rdx, rbx);
break;
} }
case Slot::LOOKUP:
__ push(rax);
__ push(rsi);
__ Push(var->name());
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
break;
}
__ bind(&skip);
} else if (var->mode() != Variable::CONST) {
// Perform the assignment for non-const variables. Const assignments
// are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
// Perform the assignment. // Perform the assignment.
__ movq(Operand(rbp, SlotOffset(slot)), rax); __ movq(Operand(rbp, SlotOffset(slot)), rax);
break; break;
case Slot::CONTEXT: { case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, rcx); MemOperand target = EmitSlotSearch(slot, rcx);
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ movq(rdx, target);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done);
}
// Perform the assignment and issue the write barrier. // Perform the assignment and issue the write barrier.
__ movq(target, rax); __ movq(target, rax);
// The value of the assignment is in rax. RecordWrite clobbers its // The value of the assignment is in rax. RecordWrite clobbers its
// register arguments. // register arguments.
__ movq(rdx, rax); __ movq(rdx, rax);
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; int offset = Context::SlotOffset(slot->index());
__ RecordWrite(rcx, offset, rdx, rbx); __ RecordWrite(rcx, offset, rdx, rbx);
break; break;
} }
case Slot::LOOKUP: case Slot::LOOKUP:
// Call the runtime for the assignment. The runtime will ignore // Call the runtime for the assignment.
// const reinitialization.
__ push(rax); // Value. __ push(rax); // Value.
__ push(rsi); // Context. __ push(rsi); // Context.
__ Push(var->name()); __ Push(var->name());
if (op == Token::INIT_CONST) { __ Push(Smi::FromInt(strict_mode_flag()));
// The runtime will ignore const redeclaration. __ CallRuntime(Runtime::kStoreContextSlot, 4);
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
__ CallRuntime(Runtime::kStoreContextSlot, 3);
}
break; break;
} }
__ bind(&done);
} }
} }
@ -1809,7 +1889,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} else { } else {
__ pop(rdx); __ pop(rdx);
} }
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
@ -1847,7 +1929,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
} }
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
// If the assignment ends an initialization block, revert to fast case. // If the assignment ends an initialization block, revert to fast case.
@ -1963,6 +2047,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
} }
void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
__ push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
// Push the strict mode flag.
__ Push(Smi::FromInt(strict_mode_flag()));
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
? Runtime::kResolvePossiblyDirectEvalNoLookup
: Runtime::kResolvePossiblyDirectEval, 4);
}
void FullCodeGenerator::VisitCall(Call* expr) { void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG #ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths // We want to verify that RecordJSReturnSite gets called on all paths
@ -1990,21 +2095,30 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VisitForStackValue(args->at(i)); VisitForStackValue(args->at(i));
} }
// Push copy of the function - found below the arguments. // If we know that eval can only be shadowed by eval-introduced
__ push(Operand(rsp, (arg_count + 1) * kPointerSize)); // variables we attempt to load the global eval function directly
// in generated code. If we succeed, there is no need to perform a
// Push copy of the first argument or undefined if it doesn't exist. // context lookup in the runtime system.
if (arg_count > 0) { Label done;
__ push(Operand(rsp, arg_count * kPointerSize)); if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
} else { Label slow;
__ PushRoot(Heap::kUndefinedValueRootIndex); EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
NOT_INSIDE_TYPEOF,
&slow);
// Push the function and resolve eval.
__ push(rax);
EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
__ jmp(&done);
__ bind(&slow);
} }
// Push the receiver of the enclosing function and do runtime call. // Push copy of the function (found below the arguments) and
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize)); // resolve eval.
// Push the strict mode flag. __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
__ Push(Smi::FromInt(strict_mode_flag())); EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); if (done.is_linked()) {
__ bind(&done);
}
// The runtime call returns a pair of values in rax (function) and // The runtime call returns a pair of values in rax (function) and
// rdx (receiver). Touch up the stack with the right values. // rdx (receiver). Touch up the stack with the right values.
@ -2621,7 +2735,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2); ASSERT(args->length() == 2);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
__ CallRuntime(Runtime::kMath_pow, 2); MathPowStub stub;
__ CallStub(&stub);
context()->Plug(rax); context()->Plug(rax);
} }
@ -2805,7 +2920,8 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub. // Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::SIN); TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
__ CallStub(&stub); __ CallStub(&stub);
@ -2815,7 +2931,8 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub. // Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::COS); TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
__ CallStub(&stub); __ CallStub(&stub);
@ -2825,7 +2942,8 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
// Load the argument on the stack and call the stub. // Load the argument on the stack and call the stub.
TranscendentalCacheStub stub(TranscendentalCache::LOG); TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::TAGGED);
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
__ CallStub(&stub); __ CallStub(&stub);
@ -2877,7 +2995,73 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
VisitForStackValue(args->at(0)); VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1)); VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2)); VisitForStackValue(args->at(2));
Label done;
Label slow_case;
Register object = rax;
Register index_1 = rbx;
Register index_2 = rcx;
Register elements = rdi;
Register temp = rdx;
__ movq(object, Operand(rsp, 2 * kPointerSize));
// Fetch the map and check if array is in fast case.
// Check that object doesn't require security checks and
// has no indexed interceptor.
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
__ j(below, &slow_case);
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
__ j(not_zero, &slow_case);
// Check the object's elements are in fast case and writable.
__ movq(elements, FieldOperand(object, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &slow_case);
// Check that both indices are smis.
__ movq(index_1, Operand(rsp, 1 * kPointerSize));
__ movq(index_2, Operand(rsp, 0 * kPointerSize));
__ JumpIfNotBothSmi(index_1, index_2, &slow_case);
// Check that both indices are valid.
// The JSArray length field is a smi since the array is in fast case mode.
__ movq(temp, FieldOperand(object, JSArray::kLengthOffset));
__ SmiCompare(temp, index_1);
__ j(below_equal, &slow_case);
__ SmiCompare(temp, index_2);
__ j(below_equal, &slow_case);
__ SmiToInteger32(index_1, index_1);
__ SmiToInteger32(index_2, index_2);
// Bring addresses into index1 and index2.
__ lea(index_1, FieldOperand(elements, index_1, times_pointer_size,
FixedArray::kHeaderSize));
__ lea(index_2, FieldOperand(elements, index_2, times_pointer_size,
FixedArray::kHeaderSize));
// Swap elements. Use object and temp as scratch registers.
__ movq(object, Operand(index_1, 0));
__ movq(temp, Operand(index_2, 0));
__ movq(Operand(index_2, 0), object);
__ movq(Operand(index_1, 0), temp);
Label new_space;
__ InNewSpace(elements, temp, equal, &new_space);
__ movq(object, elements);
__ RecordWriteHelper(object, index_1, temp);
__ RecordWriteHelper(elements, index_2, temp);
__ bind(&new_space);
// We are done. Drop elements from the stack, and return undefined.
__ addq(rsp, Immediate(3 * kPointerSize));
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ jmp(&done);
__ bind(&slow_case);
__ CallRuntime(Runtime::kSwapElements, 3); __ CallRuntime(Runtime::kSwapElements, 3);
__ bind(&done);
context()->Plug(rax); context()->Plug(rax);
} }
@ -3000,9 +3184,12 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) { void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1); ASSERT(args->length() == 1);
VisitForAccumulatorValue(args->at(0)); VisitForAccumulatorValue(args->at(0));
if (FLAG_debug_code) {
__ AbortIfNotString(rax);
}
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset)); __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
ASSERT(String::kHashShift >= kSmiTagSize); ASSERT(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(rax, rax); __ IndexFromHash(rax, rax);
@ -3355,7 +3542,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->handle()); __ Move(rcx, prop->key()->AsLiteral()->handle());
__ pop(rdx); __ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -3370,7 +3559,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
__ pop(rcx); __ pop(rcx);
__ pop(rdx); __ pop(rdx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET); EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -3715,6 +3906,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) { void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
switch (ic->kind()) {
case Code::LOAD_IC:
__ IncrementCounter(&Counters::named_load_full, 1);
break;
case Code::KEYED_LOAD_IC:
__ IncrementCounter(&Counters::keyed_load_full, 1);
break;
case Code::STORE_IC:
__ IncrementCounter(&Counters::named_store_full, 1);
break;
case Code::KEYED_STORE_IC:
__ IncrementCounter(&Counters::keyed_store_full, 1);
default:
break;
}
__ call(ic, RelocInfo::CODE_TARGET); __ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) { if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo(); patch_site->EmitPatchInfo();

27
deps/v8/src/x64/ic-x64.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved. // Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -766,7 +766,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
} }
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : value // -- rax : value
// -- rcx : key // -- rcx : key
@ -813,7 +814,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow); __ bind(&slow);
__ Integer32ToSmi(rcx, rcx); __ Integer32ToSmi(rcx, rcx);
__ bind(&slow_with_tagged_index); __ bind(&slow_with_tagged_index);
GenerateRuntimeSetProperty(masm); GenerateRuntimeSetProperty(masm, strict_mode);
// Never returns to here. // Never returns to here.
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
@ -1474,7 +1475,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm, void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) { StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : value // -- rax : value
// -- rcx : name // -- rcx : name
@ -1486,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP, NOT_IN_LOOP,
MONOMORPHIC, MONOMORPHIC,
extra_ic_state); strict_mode);
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg); StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
// Cache miss: Jump to runtime. // Cache miss: Jump to runtime.
@ -1593,7 +1594,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
} }
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) { void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : value // -- rax : value
// -- rcx : name // -- rcx : name
@ -1604,14 +1606,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
__ push(rdx); __ push(rdx);
__ push(rcx); __ push(rcx);
__ push(rax); __ push(rax);
__ push(rbx); __ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ push(rbx); // return address
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) { void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- rax : value // -- rax : value
// -- rcx : key // -- rcx : key
@ -1623,10 +1628,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
__ push(rdx); // receiver __ push(rdx); // receiver
__ push(rcx); // key __ push(rcx); // key
__ push(rax); // value __ push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ push(rbx); // return address __ push(rbx); // return address
// Do tail-call to runtime routine. // Do tail-call to runtime routine.
__ TailCallRuntime(Runtime::kSetProperty, 3, 1); __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
} }

2
deps/v8/src/x64/jump-target-x64.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:

241
deps/v8/src/x64/lithium-codegen-x64.cc

@ -77,6 +77,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() && return GeneratePrologue() &&
GenerateBody() && GenerateBody() &&
GenerateDeferredCode() && GenerateDeferredCode() &&
GenerateJumpTable() &&
GenerateSafepointTable(); GenerateSafepointTable();
} }
@ -240,6 +241,16 @@ LInstruction* LCodeGen::GetNextInstruction() {
} }
bool LCodeGen::GenerateJumpTable() {
for (int i = 0; i < jump_table_.length(); i++) {
JumpTableEntry* info = jump_table_[i];
__ bind(&(info->label_));
__ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
}
return !is_aborted();
}
bool LCodeGen::GenerateDeferredCode() { bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating()); ASSERT(is_generating());
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
@ -512,10 +523,17 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
if (cc == no_condition) { if (cc == no_condition) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY); __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else { } else {
NearLabel done; JumpTableEntry* jump_info = NULL;
__ j(NegateCondition(cc), &done); // We often have several deopts to the same entry, reuse the last
__ Jump(entry, RelocInfo::RUNTIME_ENTRY); // jump entry if this is the case.
__ bind(&done); if (jump_table_.length() > 0 &&
jump_table_[jump_table_.length() - 1]->address_ == entry) {
jump_info = jump_table_[jump_table_.length() - 1];
} else {
jump_info = new JumpTableEntry(entry);
jump_table_.Add(jump_info);
}
__ j(cc, &jump_info->label_);
} }
} }
@ -527,7 +545,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
Handle<DeoptimizationInputData> data = Handle<DeoptimizationInputData> data =
Factory::NewDeoptimizationInputData(length, TENURED); Factory::NewDeoptimizationInputData(length, TENURED);
data->SetTranslationByteArray(*translations_.CreateByteArray()); Handle<ByteArray> translations = translations_.CreateByteArray();
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
Handle<FixedArray> literals = Handle<FixedArray> literals =
@ -686,13 +705,13 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
break; break;
} }
case CodeStub::StringCharAt: { case CodeStub::StringCharAt: {
// TODO(1116): Add StringCharAt stub to x64. StringCharAtStub stub;
Abort("Unimplemented: %s", "StringCharAt Stub"); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
case CodeStub::MathPow: { case CodeStub::MathPow: {
// TODO(1115): Add MathPow stub to x64. MathPowStub stub;
Abort("Unimplemented: %s", "MathPow Stub"); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
case CodeStub::NumberToString: { case CodeStub::NumberToString: {
@ -711,7 +730,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
break; break;
} }
case CodeStub::TranscendentalCache: { case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type()); TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break; break;
} }
@ -1579,7 +1599,20 @@ static Condition BranchCondition(HHasInstanceType* instr) {
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) { void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
Abort("Unimplemented: %s", "DoHasInstanceType"); Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ testl(input, Immediate(kSmiTagMask));
NearLabel done, is_false;
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ jmp(&done);
__ bind(&is_false);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
} }
@ -1599,7 +1632,17 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) { void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Abort("Unimplemented: %s", "DoHasCachedArrayIndex"); Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
NearLabel done;
__ j(not_zero, &done);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ bind(&done);
} }
@ -1795,9 +1838,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ push(ToRegister(instr->InputAt(0))); __ push(ToRegister(instr->InputAt(0)));
__ Push(instr->function()); __ Push(instr->function());
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
__ movq(kScratchRegister, rax); __ movq(kScratchRegister, rax);
__ PopSafepointRegisters(); __ PopSafepointRegisters();
__ testq(kScratchRegister, kScratchRegister); __ testq(kScratchRegister, kScratchRegister);
@ -2271,12 +2312,105 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber"); Register input_reg = ToRegister(instr->InputAt(0));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr->environment());
Label done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
// Preserve the value of all registers.
__ PushSafepointRegisters();
Label negative;
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ testl(tmp, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative);
__ jmp(&done);
__ bind(&negative);
Label allocated, slow;
__ AllocateHeapNumber(tmp, tmp2, &slow);
__ jmp(&allocated);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) {
__ movq(tmp, rax);
}
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shl(tmp2, Immediate(1));
__ shr(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
__ PopSafepointRegisters();
}
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input_reg = ToRegister(instr->InputAt(0));
__ testl(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
} }
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoMathAbs"); // Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
LUnaryMathOperation* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
private:
LUnaryMathOperation* instr_;
};
ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
__ xorpd(scratch, scratch);
__ subsd(scratch, input_reg);
__ andpd(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
} }
@ -2355,22 +2489,78 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoPower(LPower* instr) {
Abort("Unimplemented: %s", "DoPower"); LOperand* left = instr->InputAt(0);
XMMRegister left_reg = ToDoubleRegister(left);
ASSERT(!left_reg.is(xmm1));
LOperand* right = instr->InputAt(1);
XMMRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers
__ movsd(xmm0, left_reg);
ASSERT(ToDoubleRegister(right).is(xmm1));
__ CallCFunction(ExternalReference::power_double_double_function(), 2);
} else if (exponent_type.IsInteger32()) {
__ PrepareCallCFunction(2);
// Move arguments to correct registers: xmm0 and edi (not rdi).
// On Windows, the registers are xmm0 and edx.
__ movsd(xmm0, left_reg);
#ifdef _WIN64
ASSERT(ToRegister(right).is(rdx));
#else
ASSERT(ToRegister(right).is(rdi));
#endif
__ CallCFunction(ExternalReference::power_double_int_function(), 2);
} else {
ASSERT(exponent_type.IsTagged());
CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
__ JumpIfNotSmi(right_reg, &non_smi);
__ SmiToInteger32(right_reg, right_reg);
__ cvtlsi2sd(xmm1, right_reg);
__ jmp(&call);
__ bind(&non_smi);
__ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
DeoptimizeIf(not_equal, instr->environment());
__ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
__ bind(&call);
__ PrepareCallCFunction(2);
// Move arguments to correct registers xmm0 and xmm1.
__ movsd(xmm0, left_reg);
// Right argument is already in xmm1.
__ CallCFunction(ExternalReference::power_double_double_function(), 2);
}
// Return value is in xmm0.
__ movsd(result_reg, xmm0);
} }
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoMathLog"); ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoMathCos"); ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoMathSin"); ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} }
@ -2414,6 +2604,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
int arity = instr->arity(); int arity = instr->arity();
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP); Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
} }
@ -2506,7 +2697,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax)); ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name()); __ Move(rcx, instr->hydrogen()->name());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }
@ -2575,7 +2768,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx)); ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax)); ASSERT(ToRegister(instr->value()).is(rax));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); Handle<Code> ic(Builtins::builtin(
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr); CallCode(ic, RelocInfo::CODE_TARGET, instr);
} }

12
deps/v8/src/x64/lithium-codegen-x64.h

@ -53,6 +53,7 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1), current_instruction_(-1),
instructions_(chunk->instructions()), instructions_(chunk->instructions()),
deoptimizations_(4), deoptimizations_(4),
jump_table_(4),
deoptimization_literals_(8), deoptimization_literals_(8),
inlined_function_count_(0), inlined_function_count_(0),
scope_(chunk->graph()->info()->scope()), scope_(chunk->graph()->info()->scope()),
@ -147,6 +148,7 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue(); bool GeneratePrologue();
bool GenerateBody(); bool GenerateBody();
bool GenerateDeferredCode(); bool GenerateDeferredCode();
bool GenerateJumpTable();
bool GenerateSafepointTable(); bool GenerateSafepointTable();
void CallCode(Handle<Code> code, void CallCode(Handle<Code> code,
@ -186,6 +188,7 @@ class LCodeGen BASE_EMBEDDED {
XMMRegister ToDoubleRegister(int index) const; XMMRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation. // Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr);
@ -233,6 +236,14 @@ class LCodeGen BASE_EMBEDDED {
// Emits code for pushing a constant operand. // Emits code for pushing a constant operand.
void EmitPushConstantOperand(LOperand* operand); void EmitPushConstantOperand(LOperand* operand);
struct JumpTableEntry {
inline JumpTableEntry(Address address)
: label_(),
address_(address) { }
Label label_;
Address address_;
};
LChunk* const chunk_; LChunk* const chunk_;
MacroAssembler* const masm_; MacroAssembler* const masm_;
CompilationInfo* const info_; CompilationInfo* const info_;
@ -241,6 +252,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_; int current_instruction_;
const ZoneList<LInstruction*>* instructions_; const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_; ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;

34
deps/v8/src/x64/lithium-x64.cc

@ -1158,9 +1158,8 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) { HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result = LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseRegisterAtStart(instr->value())); new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax));
MarkAsSaveDoubles(result); return MarkAsCall(DefineFixed(result, rax), instr);
return AssignEnvironment(AssignPointerMap(DefineFixed(result, rax)));
} }
@ -1436,8 +1435,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoPower(HPower* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Abort("Unimplemented: %s", "DoPower"); ASSERT(instr->representation().IsDouble());
return NULL; // We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), xmm1) :
#ifdef _WIN64
UseFixed(instr->right(), rdx);
#else
UseFixed(instr->right(), rdi);
#endif
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
CAN_DEOPTIMIZE_EAGERLY);
} }
@ -1502,8 +1515,10 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) { LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
Abort("Unimplemented: %s", "DoHasInstanceType"); ASSERT(instr->value()->representation().IsTagged());
return NULL; LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LHasInstanceType(value));
} }
@ -1516,8 +1531,9 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
LInstruction* LChunkBuilder::DoHasCachedArrayIndex( LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) { HHasCachedArrayIndex* instr) {
Abort("Unimplemented: %s", "DoHasCachedArrayIndex"); ASSERT(instr->value()->representation().IsTagged());
return NULL; LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LHasCachedArrayIndex(value));
} }

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save