Browse Source

v8: upgrade to v3.19.13

v0.11.3-release
Ben Noordhuis 12 years ago
parent
commit
6dd78074a3
  1. 102
      deps/v8/ChangeLog
  2. 2
      deps/v8/Makefile.nacl
  3. 188
      deps/v8/build/common.gypi
  4. 24
      deps/v8/include/v8-profiler.h
  5. 567
      deps/v8/include/v8.h
  6. 27
      deps/v8/samples/lineprocessor.cc
  7. 110
      deps/v8/samples/process.cc
  8. 50
      deps/v8/samples/shell.cc
  9. 16
      deps/v8/src/accessors.cc
  10. 344
      deps/v8/src/api.cc
  11. 12
      deps/v8/src/api.h
  12. 24
      deps/v8/src/arguments.h
  13. 3
      deps/v8/src/arm/assembler-arm.cc
  14. 50
      deps/v8/src/arm/builtins-arm.cc
  15. 361
      deps/v8/src/arm/code-stubs-arm.cc
  16. 44
      deps/v8/src/arm/codegen-arm.cc
  17. 2
      deps/v8/src/arm/codegen-arm.h
  18. 2
      deps/v8/src/arm/deoptimizer-arm.cc
  19. 174
      deps/v8/src/arm/full-codegen-arm.cc
  20. 23
      deps/v8/src/arm/ic-arm.cc
  21. 206
      deps/v8/src/arm/lithium-arm.cc
  22. 91
      deps/v8/src/arm/lithium-arm.h
  23. 698
      deps/v8/src/arm/lithium-codegen-arm.cc
  24. 12
      deps/v8/src/arm/lithium-codegen-arm.h
  25. 8
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  26. 35
      deps/v8/src/arm/macro-assembler-arm.cc
  27. 11
      deps/v8/src/arm/macro-assembler-arm.h
  28. 57
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  29. 4
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  30. 200
      deps/v8/src/arm/stub-cache-arm.cc
  31. 188
      deps/v8/src/array.js
  32. 168
      deps/v8/src/assert-scope.h
  33. 91
      deps/v8/src/ast.cc
  34. 392
      deps/v8/src/ast.h
  35. 17
      deps/v8/src/atomicops_internals_mips_gcc.h
  36. 55
      deps/v8/src/bootstrapper.cc
  37. 9
      deps/v8/src/bootstrapper.h
  38. 87
      deps/v8/src/builtins.cc
  39. 2
      deps/v8/src/builtins.h
  40. 2
      deps/v8/src/checks.cc
  41. 344
      deps/v8/src/code-stubs-hydrogen.cc
  42. 103
      deps/v8/src/code-stubs.cc
  43. 333
      deps/v8/src/code-stubs.h
  44. 12
      deps/v8/src/codegen.cc
  45. 12
      deps/v8/src/codegen.h
  46. 38
      deps/v8/src/compiler.cc
  47. 2
      deps/v8/src/compiler.h
  48. 8
      deps/v8/src/contexts.h
  49. 3
      deps/v8/src/cpu-profiler.cc
  50. 97
      deps/v8/src/d8-posix.cc
  51. 236
      deps/v8/src/d8.cc
  52. 94
      deps/v8/src/d8.h
  53. 49
      deps/v8/src/debug.cc
  54. 4
      deps/v8/src/debug.h
  55. 88
      deps/v8/src/deoptimizer.cc
  56. 39
      deps/v8/src/deoptimizer.h
  57. 4
      deps/v8/src/disassembler.cc
  58. 8
      deps/v8/src/elements.cc
  59. 5
      deps/v8/src/execution.cc
  60. 32
      deps/v8/src/extensions/externalize-string-extension.cc
  61. 4
      deps/v8/src/extensions/externalize-string-extension.h
  62. 3
      deps/v8/src/extensions/gc-extension.cc
  63. 2
      deps/v8/src/extensions/gc-extension.h
  64. 6
      deps/v8/src/extensions/statistics-extension.cc
  65. 2
      deps/v8/src/extensions/statistics-extension.h
  66. 54
      deps/v8/src/factory.cc
  67. 23
      deps/v8/src/factory.h
  68. 20
      deps/v8/src/flag-definitions.h
  69. 2
      deps/v8/src/frames.cc
  70. 15
      deps/v8/src/full-codegen.cc
  71. 5
      deps/v8/src/full-codegen.h
  72. 2
      deps/v8/src/gdb-jit.cc
  73. 19
      deps/v8/src/generator.js
  74. 146
      deps/v8/src/global-handles.cc
  75. 15
      deps/v8/src/global-handles.h
  76. 113
      deps/v8/src/handles-inl.h
  77. 21
      deps/v8/src/handles.cc
  78. 54
      deps/v8/src/handles.h
  79. 64
      deps/v8/src/heap-inl.h
  80. 10
      deps/v8/src/heap-snapshot-generator.cc
  81. 293
      deps/v8/src/heap.cc
  82. 139
      deps/v8/src/heap.h
  83. 267
      deps/v8/src/hydrogen-environment-liveness.cc
  84. 94
      deps/v8/src/hydrogen-environment-liveness.h
  85. 855
      deps/v8/src/hydrogen-gvn.cc
  86. 123
      deps/v8/src/hydrogen-gvn.h
  87. 505
      deps/v8/src/hydrogen-instructions.cc
  88. 611
      deps/v8/src/hydrogen-instructions.h
  89. 2264
      deps/v8/src/hydrogen.cc
  90. 301
      deps/v8/src/hydrogen.h
  91. 7
      deps/v8/src/ia32/assembler-ia32-inl.h
  92. 2
      deps/v8/src/ia32/assembler-ia32.cc
  93. 2
      deps/v8/src/ia32/assembler-ia32.h
  94. 52
      deps/v8/src/ia32/builtins-ia32.cc
  95. 397
      deps/v8/src/ia32/code-stubs-ia32.cc
  96. 44
      deps/v8/src/ia32/codegen-ia32.cc
  97. 2
      deps/v8/src/ia32/codegen-ia32.h
  98. 2
      deps/v8/src/ia32/deoptimizer-ia32.cc
  99. 170
      deps/v8/src/ia32/full-codegen-ia32.cc
  100. 24
      deps/v8/src/ia32/ic-ia32.cc

102
deps/v8/ChangeLog

@ -1,3 +1,105 @@
2013-06-11: Version 3.19.13
Performance and stability improvements on all platforms.
2013-06-10: Version 3.19.12
Fixed arguments array access. (Chromium issue 247303)
Fixed bug in LookupForWrite. (Chromium issue 242332)
Performance and stability improvements on all platforms.
2013-06-07: Version 3.19.11
Performance and stability improvements on all platforms.
2013-06-06: Version 3.19.10
Performance and stability improvements on all platforms.
2013-06-05: Version 3.19.9
Implemented Load IC support for loading properties from primitive
values to avoid perpetual soft deopts. (Chromium issue 242512)
Implemented Freeing of PerThreadAssertData when possible to avoid
memory leak. (Chromium issue 246567)
Removed V8_USE_OLD_STYLE_PERSISTENT_HANDLE_VISITORS.
Performance and stability improvements on all platforms.
2013-06-03: Version 3.19.8
Fixed bug with inlining 'Array' function. (Chromium issue 244461)
Fixed initialization of literal objects. (Chromium issue 245424)
Fixed function name inferred inside closures. (Chromium issue 224884)
Performance and stability improvements on all platforms.
2013-05-31: Version 3.19.7
Added support for //# sourceURL similar to deprecated //@ sourceURL one.
(issue 2702)
Made sure IfBuilder::Return clears the current block.
(Chromium issue 243868)
Fixed two CPU profiler tests on ARM and MIPS simulators
(issue 2628)
Fixed idle incremental GC for large objects.
(Chromium issue 241815)
Disabled --optimize-constructed-arrays due to crashes
(Chromium issue 244461)
Performance and stability improvements on all platforms.
2013-05-28: Version 3.19.6
Fixed IfBuilder::Deopt to clear the current block
(Chromium issue 243868).
Performance and stability improvements on all platforms.
2013-05-27: Version 3.19.5
Reset regexp parser flag after scanning ahead for capture groups.
(issue 2690)
Removed flakiness in test-cpu-profiler/SampleWhenFrameIsNotSetup.
(issue 2628)
Performance and stability improvements on all platforms.
2013-05-24: Version 3.19.4
Fixed edge case in stack trace formatting. (Chromium issue 237617)
Fixed embedded new-space pointer in LCmpObjectEqAndBranch. (Chromium
issue 240032)
Made Object.freeze fast (issue 1858, Chromium issue 115960)
Fixed bogus deopt in BuildEmitDeepCopy for holey arrays. (Chromium issue
242924)
Performance and stability improvements on all platforms.
2013-05-22: Version 3.19.3
Performance and stability improvements on all platforms.

2
deps/v8/Makefile.nacl

@ -46,7 +46,7 @@ else
endif
endif
TOOLCHAIN_PATH = ${NACL_SDK_ROOT}/toolchain
TOOLCHAIN_PATH = $(realpath ${NACL_SDK_ROOT}/toolchain)
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(ARCH), nacl_ia32)

188
deps/v8/build/common.gypi

@ -129,22 +129,13 @@
'defines': [
'V8_TARGET_ARCH_ARM',
],
'variables': {
'armsimulator': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: arm" && echo "no" || echo "yes")',
},
'conditions': [
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
],
}, {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
['armsimulator=="no"', {
'target_conditions': [
['_toolset=="target"', {
'target_conditions': [
['_toolset=="host"', {
'variables': {
'armcompiler': '<!($(echo ${CXX_host:-$(which g++)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
},
'conditions': [
['armcompiler=="yes"', {
'conditions': [
[ 'armv7==1', {
'cflags': ['-march=armv7-a',],
@ -159,9 +150,9 @@
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
]
],
}],
]
],
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
@ -172,63 +163,149 @@
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
[ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
}, {
# armcompiler=="no"
'conditions': [
[ 'armv7==1 or armv7=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
'conditions': [
[ 'arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
],
}],
[ 'arm_float_abi=="hard"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
],
}],
[ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
],
}],
],
'conditions': [
[ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
}],
['armsimulator=="yes"', {
'defines': [
'ARM_TEST',
],
}], # _toolset=="host"
['_toolset=="target"', {
'variables': {
'armcompiler': '<!($(echo ${CXX_target:-<(CXX)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
},
'conditions': [
[ 'armv7==1 or armv7=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
['armcompiler=="yes"', {
'conditions': [
[ 'arm_fpu=="default"', {
[ 'armv7==1', {
'cflags': ['-march=armv7-a',],
}],
[ 'armv7==1 or armv7=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
},
{
'conditions': [
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
],
}],
],
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
}],
[ 'arm_thumb==1', {
'cflags': ['-mthumb',],
}],
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
[ 'arm_test=="on"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'ARM_TEST',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
],
}, {
# armcompiler=="no"
'conditions': [
[ 'armv7==1 or armv7=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
'conditions': [
[ 'arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
],
}],
[ 'arm_fpu=="vfpv3"', {
[ 'arm_float_abi=="hard"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
'USE_EABI_HARDFLOAT=1',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
[ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
'USE_EABI_HARDFLOAT=0',
],
}],
],
}],
[ 'arm_float_abi=="hard"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
],
}],
[ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
'USE_EABI_HARDFLOAT=0',
'ARM_TEST',
],
}],
]
}],
],
}], # _toolset=="target"
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
@ -453,6 +530,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [

24
deps/v8/include/v8-profiler.h

@ -184,19 +184,21 @@ class V8EXPORT CpuProfiler {
V8_DEPRECATED(static const CpuProfile* GetProfile(
int index,
Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(
/** Deprecated. Use GetCpuProfile with single parameter. */
V8_DEPRECATED(const CpuProfile* GetCpuProfile(
int index,
Handle<Value> security_token = Handle<Value>());
Handle<Value> security_token));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(int index);
/** Deprecated. Use FindProfile instead. */
V8_DEPRECATED(static const CpuProfile* FindProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by uid. */
const CpuProfile* FindCpuProfile(
V8_DEPRECATED(const CpuProfile* FindCpuProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
Handle<Value> security_token = Handle<Value>()));
/** Deprecated. Use StartCpuProfiling instead. */
V8_DEPRECATED(static void StartProfiling(Handle<String> title,
@ -218,13 +220,17 @@ class V8EXPORT CpuProfiler {
V8_DEPRECATED(static const CpuProfile* StopProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>()));
/**
* Deprecated. Use StopCpuProfiling with one parameter instead.
*/
V8_DEPRECATED(const CpuProfile* StopCpuProfiling(
Handle<String> title,
Handle<Value> security_token));
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
const CpuProfile* StopCpuProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>());
const CpuProfile* StopCpuProfiling(Handle<String> title);
/** Deprecated. Use DeleteAllCpuProfiles instead. */
V8_DEPRECATED(static void DeleteAllProfiles());
@ -438,7 +444,7 @@ class V8EXPORT HeapProfiler {
/** Deprecated. Use FindHeapSnapshot instead. */
V8_DEPRECATED(static const HeapSnapshot* FindSnapshot(unsigned uid));
/** Returns a profile by uid. */
const HeapSnapshot* FindHeapSnapshot(unsigned uid);
V8_DEPRECATED(const HeapSnapshot* FindHeapSnapshot(unsigned uid));
/** Deprecated. Use GetObjectId instead. */
V8_DEPRECATED(static SnapshotObjectId GetSnapshotObjectId(

567
deps/v8/include/v8.h

File diff suppressed because it is too large

27
deps/v8/samples/lineprocessor.cc

@ -25,10 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include <v8.h>
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -106,8 +102,8 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
v8::Handle<v8::String> ReadFile(const char* name);
v8::Handle<v8::String> ReadLine();
v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args);
bool RunCppCycle(v8::Handle<v8::Script> script,
v8::Local<v8::Context> context,
bool report_exceptions);
@ -130,7 +126,9 @@ void DispatchDebugMessages() {
// think about.
v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::HandleScope handle_scope(isolate);
v8::Context::Scope scope(isolate, debug_message_context);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, debug_message_context);
v8::Context::Scope scope(context);
v8::Debug::ProcessDebugMessages();
}
@ -220,8 +218,7 @@ int RunMain(int argc, char* argv[]) {
v8::Context::Scope context_scope(context);
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_message_context =
v8::Persistent<v8::Context>::New(isolate, context);
debug_message_context.Reset(isolate, context);
v8::Locker locker(isolate);
@ -396,7 +393,7 @@ void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
v8::Handle<v8::Value> Print(const v8::Arguments& args) {
void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
@ -411,17 +408,17 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
}
printf("\n");
fflush(stdout);
return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'read_line'
// function is called. Reads a string from standard input and returns.
v8::Handle<v8::Value> ReadLine(const v8::Arguments& args) {
void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() > 0) {
return v8::ThrowException(v8::String::New("Unexpected arguments"));
v8::ThrowException(v8::String::New("Unexpected arguments"));
return;
}
return ReadLine();
args.GetReturnValue().Set(ReadLine());
}
v8::Handle<v8::String> ReadLine() {
@ -437,7 +434,7 @@ v8::Handle<v8::String> ReadLine() {
}
if (res == NULL) {
v8::Handle<v8::Primitive> t = v8::Undefined();
return v8::Handle<v8::String>(v8::String::Cast(*t));
return v8::Handle<v8::String>::Cast(t);
}
// Remove newline char
for (char* pos = buffer; *pos != '\0'; pos++) {

110
deps/v8/samples/process.cc

@ -25,11 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove this
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#include <v8.h>
#include <string>
@ -107,18 +102,21 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
static Handle<ObjectTemplate> MakeMapTemplate(Isolate* isolate);
// Callbacks that access the individual fields of request objects.
static Handle<Value> GetPath(Local<String> name, const AccessorInfo& info);
static Handle<Value> GetReferrer(Local<String> name,
const AccessorInfo& info);
static Handle<Value> GetHost(Local<String> name, const AccessorInfo& info);
static Handle<Value> GetUserAgent(Local<String> name,
const AccessorInfo& info);
static void GetPath(Local<String> name,
const PropertyCallbackInfo<Value>& info);
static void GetReferrer(Local<String> name,
const PropertyCallbackInfo<Value>& info);
static void GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info);
static void GetUserAgent(Local<String> name,
const PropertyCallbackInfo<Value>& info);
// Callbacks that access maps
static Handle<Value> MapGet(Local<String> name, const AccessorInfo& info);
static Handle<Value> MapSet(Local<String> name,
Local<Value> value,
const AccessorInfo& info);
static void MapGet(Local<String> name,
const PropertyCallbackInfo<Value>& info);
static void MapSet(Local<String> name,
Local<Value> value,
const PropertyCallbackInfo<Value>& info);
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
@ -142,13 +140,12 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// -------------------------
static Handle<Value> LogCallback(const Arguments& args) {
if (args.Length() < 1) return Undefined();
static void LogCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1) return;
HandleScope scope(args.GetIsolate());
Handle<Value> arg = args[0];
String::Utf8Value value(arg);
HttpRequestProcessor::Log(*value);
return Undefined();
}
@ -168,11 +165,12 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// is what we need for the reference to remain after we return from
// this method. That persistent handle has to be disposed in the
// destructor.
context_.Reset(GetIsolate(), Context::New(GetIsolate(), NULL, global));
v8::Handle<v8::Context> context = Context::New(GetIsolate(), NULL, global);
context_.Reset(GetIsolate(), context);
// Enter the new context so all the following operations take place
// within it.
Context::Scope context_scope(GetIsolate(), context_);
Context::Scope context_scope(context);
// Make the options mapping available within the context
if (!InstallMaps(opts, output))
@ -185,7 +183,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// The script compiled and ran correctly. Now we fetch out the
// Process function from the global object.
Handle<String> process_name = String::New("Process");
Handle<Value> process_val = context_->Global()->Get(process_name);
Handle<Value> process_val = context->Global()->Get(process_name);
// If there is no Process function, or if it is not a function,
// bail out
@ -196,7 +194,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Store the function in a Persistent handle, since we also want
// that to remain after this call returns
process_ = Persistent<Function>::New(GetIsolate(), process_fun);
process_.Reset(GetIsolate(), process_fun);
// All done; all went well
return true;
@ -239,11 +237,14 @@ bool JsHttpRequestProcessor::InstallMaps(map<string, string>* opts,
// Wrap the map object in a JavaScript wrapper
Handle<Object> opts_obj = WrapMap(opts);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Set the options object as a property on the global object.
context_->Global()->Set(String::New("options"), opts_obj);
context->Global()->Set(String::New("options"), opts_obj);
Handle<Object> output_obj = WrapMap(output);
context_->Global()->Set(String::New("output"), output_obj);
context->Global()->Set(String::New("output"), output_obj);
return true;
}
@ -253,9 +254,12 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// Create a handle scope to keep the temporary object references.
HandleScope handle_scope(GetIsolate());
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(GetIsolate(), context_);
// Enter this processor's context so all the remaining operations
// take place there
Context::Scope context_scope(GetIsolate(), context_);
Context::Scope context_scope(context);
// Wrap the C++ request object in a JavaScript wrapper
Handle<Object> request_obj = WrapRequest(request);
@ -267,7 +271,9 @@ bool JsHttpRequestProcessor::Process(HttpRequest* request) {
// and one argument, the request.
const int argc = 1;
Handle<Value> argv[argc] = { request_obj };
Handle<Value> result = process_->Call(context_->Global(), argc, argv);
v8::Local<v8::Function> process =
v8::Local<v8::Function>::New(GetIsolate(), process_);
Handle<Value> result = process->Call(context->Global(), argc, argv);
if (result.IsEmpty()) {
String::Utf8Value error(try_catch.Exception());
Log(*error);
@ -306,7 +312,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// It only has to be created once, which we do on demand.
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate(GetIsolate());
map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
map_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), map_template_);
@ -346,8 +352,8 @@ string ObjectToString(Local<Value> value) {
}
Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
const AccessorInfo& info) {
void JsHttpRequestProcessor::MapGet(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
@ -358,17 +364,18 @@ Handle<Value> JsHttpRequestProcessor::MapGet(Local<String> name,
map<string, string>::iterator iter = obj->find(key);
// If the key is not present return an empty handle as signal
if (iter == obj->end()) return Handle<Value>();
if (iter == obj->end()) return;
// Otherwise fetch the value and wrap it in a JavaScript string
const string& value = (*iter).second;
return String::New(value.c_str(), static_cast<int>(value.length()));
info.GetReturnValue().Set(
String::New(value.c_str(), static_cast<int>(value.length())));
}
Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
Local<Value> value_obj,
const AccessorInfo& info) {
void JsHttpRequestProcessor::MapSet(Local<String> name,
Local<Value> value_obj,
const PropertyCallbackInfo<Value>& info) {
// Fetch the map wrapped by this object.
map<string, string>* obj = UnwrapMap(info.Holder());
@ -380,7 +387,7 @@ Handle<Value> JsHttpRequestProcessor::MapSet(Local<String> name,
(*obj)[key] = value;
// Return the value; any non-empty handle will work.
return value_obj;
info.GetReturnValue().Set(value_obj);
}
@ -413,8 +420,7 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate(GetIsolate());
request_template_ =
Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
request_template_.Reset(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ =
Local<ObjectTemplate>::New(GetIsolate(), request_template_);
@ -448,8 +454,8 @@ HttpRequest* JsHttpRequestProcessor::UnwrapRequest(Handle<Object> obj) {
}
Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const AccessorInfo& info) {
void JsHttpRequestProcessor::GetPath(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
// Extract the C++ request object from the JavaScript wrapper.
HttpRequest* request = UnwrapRequest(info.Holder());
@ -457,31 +463,37 @@ Handle<Value> JsHttpRequestProcessor::GetPath(Local<String> name,
const string& path = request->Path();
// Wrap the result in a JavaScript string and return it.
return String::New(path.c_str(), static_cast<int>(path.length()));
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
}
Handle<Value> JsHttpRequestProcessor::GetReferrer(Local<String> name,
const AccessorInfo& info) {
void JsHttpRequestProcessor::GetReferrer(
Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Referrer();
return String::New(path.c_str(), static_cast<int>(path.length()));
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
}
Handle<Value> JsHttpRequestProcessor::GetHost(Local<String> name,
const AccessorInfo& info) {
void JsHttpRequestProcessor::GetHost(Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->Host();
return String::New(path.c_str(), static_cast<int>(path.length()));
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
}
Handle<Value> JsHttpRequestProcessor::GetUserAgent(Local<String> name,
const AccessorInfo& info) {
void JsHttpRequestProcessor::GetUserAgent(
Local<String> name,
const PropertyCallbackInfo<Value>& info) {
HttpRequest* request = UnwrapRequest(info.Holder());
const string& path = request->UserAgent();
return String::New(path.c_str(), static_cast<int>(path.length()));
info.GetReturnValue().Set(
String::New(path.c_str(), static_cast<int>(path.length())));
}

50
deps/v8/samples/shell.cc

@ -25,11 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove this
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#include <v8.h>
#include <assert.h>
#include <fcntl.h>
@ -58,11 +53,11 @@ bool ExecuteString(v8::Isolate* isolate,
v8::Handle<v8::Value> name,
bool print_result,
bool report_exceptions);
v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::Value> Read(const v8::Arguments& args);
v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
@ -121,7 +116,7 @@ v8::Handle<v8::Context> CreateShellContext(v8::Isolate* isolate) {
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
v8::Handle<v8::Value> Print(const v8::Arguments& args) {
void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
bool first = true;
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
@ -136,70 +131,73 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
}
printf("\n");
fflush(stdout);
return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'read'
// function is called. This function loads the content of the file named in
// the argument into a JavaScript string.
v8::Handle<v8::Value> Read(const v8::Arguments& args) {
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
return v8::ThrowException(v8::String::New("Bad parameters"));
v8::ThrowException(v8::String::New("Bad parameters"));
return;
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
return v8::ThrowException(v8::String::New("Error loading file"));
v8::ThrowException(v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
return v8::ThrowException(v8::String::New("Error loading file"));
v8::ThrowException(v8::String::New("Error loading file"));
return;
}
return source;
args.GetReturnValue().Set(source);
}
// The callback that is invoked by v8 whenever the JavaScript 'load'
// function is called. Loads, compiles and executes its argument
// JavaScript file.
v8::Handle<v8::Value> Load(const v8::Arguments& args) {
void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args[i]);
if (*file == NULL) {
return v8::ThrowException(v8::String::New("Error loading file"));
v8::ThrowException(v8::String::New("Error loading file"));
return;
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
return v8::ThrowException(v8::String::New("Error loading file"));
v8::ThrowException(v8::String::New("Error loading file"));
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
v8::String::New(*file),
false,
false)) {
return v8::ThrowException(v8::String::New("Error executing file"));
v8::ThrowException(v8::String::New("Error executing file"));
return;
}
}
return v8::Undefined();
}
// The callback that is invoked by v8 whenever the JavaScript 'quit'
// function is called. Quits.
v8::Handle<v8::Value> Quit(const v8::Arguments& args) {
void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
// If not arguments are given args[0] will yield undefined which
// converts to the integer value 0.
int exit_code = args[0]->Int32Value();
fflush(stdout);
fflush(stderr);
exit(exit_code);
return v8::Undefined();
}
v8::Handle<v8::Value> Version(const v8::Arguments& args) {
return v8::String::New(v8::V8::GetVersion());
void Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8::String::New(v8::V8::GetVersion()));
}

16
deps/v8/src/accessors.cc

@ -687,7 +687,7 @@ const AccessorDescriptor Accessors::FunctionArguments = {
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: frame_iterator_(isolate),
functions_(2),
index_(0) {
@ -734,13 +734,13 @@ class FrameFunctionIterator {
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
AssertNoAllocation no_alloc;
DisallowHeapAllocation no_allocation;
JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
FrameFunctionIterator it(isolate, no_alloc);
FrameFunctionIterator it(isolate, no_allocation);
// Find the function from the frames.
if (!it.Find(*function)) {
@ -793,9 +793,9 @@ const AccessorDescriptor Accessors::FunctionCaller = {
// Accessors::MakeModuleExport
//
static v8::Handle<v8::Value> ModuleGetExport(
static void ModuleGetExport(
v8::Local<v8::String> property,
const v8::AccessorInfo& info) {
const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());
@ -807,16 +807,16 @@ static v8::Handle<v8::Value> ModuleGetExport(
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
return v8::Handle<v8::Value>();
return;
}
return v8::Utils::ToLocal(Handle<Object>(value, isolate));
info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
}
static void ModuleSetExport(
v8::Local<v8::String> property,
v8::Local<v8::Value> value,
const v8::AccessorInfo& info) {
const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
ASSERT(context->IsModuleContext());

344
deps/v8/src/api.cc

@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include "api.h"
#include <string.h> // For memcpy, strlen.
@ -35,6 +32,7 @@
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#include "assert-scope.h"
#include "bootstrapper.h"
#include "code-stubs.h"
#include "compiler.h"
@ -625,31 +623,22 @@ i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
}
void V8::MakeWeak(i::Isolate* isolate,
i::Object** object,
void V8::MakeWeak(i::Object** object,
void* parameters,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object,
parameters,
weak_reference_callback,
near_death_callback);
RevivableCallback weak_reference_callback) {
i::GlobalHandles::MakeWeak(object,
parameters,
weak_reference_callback);
}
void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "ClearWeak");
isolate->global_handles()->ClearWeakness(obj);
void V8::ClearWeak(i::Object** obj) {
i::GlobalHandles::ClearWeakness(obj);
}
void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "DisposeGlobal");
if (!isolate->IsInitialized()) return;
isolate->global_handles()->Destroy(obj);
void V8::DisposeGlobal(i::Object** obj) {
i::GlobalHandles::Destroy(obj);
}
// --- H a n d l e s ---
@ -686,19 +675,7 @@ HandleScope::~HandleScope() {
void HandleScope::Leave() {
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
current->level--;
ASSERT(current->level >= 0);
current->next = prev_next_;
if (current->limit != prev_limit_) {
current->limit = prev_limit_;
i::HandleScope::DeleteExtensions(isolate_);
}
#ifdef ENABLE_EXTRA_CHECKS
i::HandleScope::ZapRange(prev_next_, prev_limit_);
#endif
return i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
@ -909,7 +886,8 @@ void NeanderArray::add(i::Handle<i::Object> value) {
int length = this->length();
int size = obj_.size();
if (length == size - 1) {
i::Handle<i::FixedArray> new_elms = FACTORY->NewFixedArray(2 * size);
i::Factory* factory = i::Isolate::Current()->factory();
i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
for (int i = 0; i < length; i++)
new_elms->set(i + 1, get(i));
obj_.value()->set_elements(*new_elms);
@ -985,7 +963,7 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
template<typename Callback>
static Local<FunctionTemplate> FunctionTemplateNew(
Callback callback_in,
Callback callback,
v8::Handle<Value> data,
v8::Handle<Signature> signature,
int length) {
@ -1001,10 +979,8 @@ static Local<FunctionTemplate> FunctionTemplateNew(
int next_serial_number = isolate->next_serial_number();
isolate->set_next_serial_number(next_serial_number + 1);
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback_in != 0) {
if (callback != 0) {
if (data.IsEmpty()) data = v8::Undefined();
InvocationCallback callback =
i::CallbackTable::Register(isolate, callback_in);
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
obj->set_length(length);
@ -1228,7 +1204,7 @@ int TypeSwitch::match(v8::Handle<Value> value) {
template<typename Callback>
static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
Callback callback,
Callback callback_in,
v8::Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(function_template)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return;
@ -1238,6 +1214,8 @@ static void FunctionTemplateSetCallHandler(FunctionTemplate* function_template,
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
FunctionCallback callback =
i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@ -1284,9 +1262,11 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::ExecutableAccessorInfo> obj =
isolate->factory()->NewExecutableAccessorInfo();
AccessorGetter getter = i::CallbackTable::Register(isolate, getter_in);
AccessorGetterCallback getter =
i::CallbackTable::Register(isolate, getter_in);
SET_FIELD_WRAPPED(obj, set_getter, getter);
AccessorSetter setter = i::CallbackTable::Register(isolate, setter_in);
AccessorSetterCallback setter =
i::CallbackTable::Register(isolate, setter_in);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
obj->set_data(*Utils::OpenHandle(*data));
@ -1389,16 +1369,19 @@ static void SetNamedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
NamedPropertyGetter getter = i::CallbackTable::Register(isolate, getter_in);
NamedPropertyGetterCallback getter =
i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
NamedPropertySetter setter = i::CallbackTable::Register(isolate, setter_in);
NamedPropertySetterCallback setter =
i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
NamedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
NamedPropertyQueryCallback query =
i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
NamedPropertyDeleter remover =
NamedPropertyDeleterCallback remover =
i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
NamedPropertyEnumerator enumerator =
NamedPropertyEnumeratorCallback enumerator =
i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
@ -1434,18 +1417,19 @@ static void SetIndexedInstancePropertyHandler(
i::Handle<i::InterceptorInfo> obj =
i::Handle<i::InterceptorInfo>::cast(struct_obj);
IndexedPropertyGetter getter =
IndexedPropertyGetterCallback getter =
i::CallbackTable::Register(isolate, getter_in);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
IndexedPropertySetter setter =
IndexedPropertySetterCallback setter =
i::CallbackTable::Register(isolate, setter_in);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
IndexedPropertyQuery query = i::CallbackTable::Register(isolate, query_in);
IndexedPropertyQueryCallback query =
i::CallbackTable::Register(isolate, query_in);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
IndexedPropertyDeleter remover =
IndexedPropertyDeleterCallback remover =
i::CallbackTable::Register(isolate, remover_in);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
IndexedPropertyEnumerator enumerator =
IndexedPropertyEnumeratorCallback enumerator =
i::CallbackTable::Register(isolate, enumerator_in);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
@ -1471,7 +1455,7 @@ static void SetInstanceCallAsFunctionHandler(
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
InvocationCallback callback =
FunctionCallback callback =
i::CallbackTable::Register(isolate, callback_in);
SET_FIELD_WRAPPED(obj, set_callback, callback);
if (data.IsEmpty()) data = v8::Undefined();
@ -3879,7 +3863,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
@ -3893,7 +3878,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
i::Handle<i::String> key_string =
isolate->factory()->InternalizeString(key_obj);
self->DeleteHiddenProperty(*key_string);
return true;
}
@ -4317,6 +4303,124 @@ bool String::IsOneByte() const {
return str->HasOnlyOneByteChars();
}
// Helpers for ContainsOnlyOneByteHelper
template<size_t size> struct OneByteMask;
template<> struct OneByteMask<4> {
static const uint32_t value = 0xFF00FF00;
};
template<> struct OneByteMask<8> {
static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00);
};
static const uintptr_t kOneByteMask = OneByteMask<sizeof(uintptr_t)>::value;
static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1;
static inline bool Unaligned(const uint16_t* chars) {
return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
}
static inline const uint16_t* Align(const uint16_t* chars) {
return reinterpret_cast<uint16_t*>(
reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
}
class ContainsOnlyOneByteHelper {
public:
ContainsOnlyOneByteHelper() : is_one_byte_(true) {}
bool Check(i::String* string) {
i::ConsString* cons_string = i::String::VisitFlat(this, string, 0);
if (cons_string == NULL) return is_one_byte_;
return CheckCons(cons_string);
}
void VisitOneByteString(const uint8_t* chars, int length) {
// Nothing to do.
}
void VisitTwoByteString(const uint16_t* chars, int length) {
// Accumulated bits.
uintptr_t acc = 0;
// Align to uintptr_t.
const uint16_t* end = chars + length;
while (Unaligned(chars) && chars != end) {
acc |= *chars++;
}
// Read word aligned in blocks,
// checking the return value at the end of each block.
const uint16_t* aligned_end = Align(end);
const int increment = sizeof(uintptr_t)/sizeof(uint16_t);
const int inner_loops = 16;
while (chars + inner_loops*increment < aligned_end) {
for (int i = 0; i < inner_loops; i++) {
acc |= *reinterpret_cast<const uintptr_t*>(chars);
chars += increment;
}
// Check for early return.
if ((acc & kOneByteMask) != 0) {
is_one_byte_ = false;
return;
}
}
// Read the rest.
while (chars != end) {
acc |= *chars++;
}
// Check result.
if ((acc & kOneByteMask) != 0) is_one_byte_ = false;
}
private:
bool CheckCons(i::ConsString* cons_string) {
while (true) {
// Check left side if flat.
i::String* left = cons_string->first();
i::ConsString* left_as_cons =
i::String::VisitFlat(this, left, 0);
if (!is_one_byte_) return false;
// Check right side if flat.
i::String* right = cons_string->second();
i::ConsString* right_as_cons =
i::String::VisitFlat(this, right, 0);
if (!is_one_byte_) return false;
// Standard recurse/iterate trick.
if (left_as_cons != NULL && right_as_cons != NULL) {
if (left->length() < right->length()) {
CheckCons(left_as_cons);
cons_string = right_as_cons;
} else {
CheckCons(right_as_cons);
cons_string = left_as_cons;
}
// Check fast return.
if (!is_one_byte_) return false;
continue;
}
// Descend left in place.
if (left_as_cons != NULL) {
cons_string = left_as_cons;
continue;
}
// Descend right in place.
if (right_as_cons != NULL) {
cons_string = right_as_cons;
continue;
}
// Terminate.
break;
}
return is_one_byte_;
}
bool is_one_byte_;
DISALLOW_COPY_AND_ASSIGN(ContainsOnlyOneByteHelper);
};
bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(),
"v8::String::ContainsOnlyOneByte()")) {
return false;
}
if (str->HasOnlyOneByteChars()) return true;
ContainsOnlyOneByteHelper helper;
return helper.Check(*str);
}
class Utf8LengthHelper : public i::AllStatic {
public:
@ -5056,6 +5160,15 @@ void v8::V8::SetJitCodeEventHandler(
isolate->logger()->SetCodeEventHandler(options, event_handler);
}
void v8::V8::SetArrayBufferAllocator(
ArrayBuffer::Allocator* allocator) {
if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
"v8::V8::SetArrayBufferAllocator",
"ArrayBufferAllocator might only be set once"))
return;
i::V8::SetArrayBufferAllocator(allocator);
}
bool v8::V8::Dispose() {
i::Isolate* isolate = i::Isolate::Current();
@ -5107,8 +5220,9 @@ class VisitorAdapter : public i::ObjectVisitor {
UNREACHABLE();
}
virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
class_id);
Value* value = ToApi<Value>(i::Handle<i::Object>(p));
visitor_->VisitPersistentHandle(
reinterpret_cast<Persistent<Value>*>(&value), class_id);
}
private:
PersistentHandleVisitor* visitor_;
@ -5119,7 +5233,7 @@ void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
i::AssertNoAllocation no_allocation;
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
@ -5132,7 +5246,7 @@ void v8::V8::VisitHandlesForPartialDependence(
ASSERT(isolate == i::Isolate::Current());
IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
i::AssertNoAllocation no_allocation;
i::DisallowHeapAllocation no_allocation;
VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
@ -5914,13 +6028,14 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
i::Isolate* isolate = i::Isolate::Current();
uint8_t flags_buf[3];
int num_flags = 0;
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
return FACTORY->InternalizeOneByteString(
return isolate->factory()->InternalizeOneByteString(
i::Vector<const uint8_t>(flags_buf, num_flags));
}
@ -6019,19 +6134,48 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
size_t v8::ArrayBuffer::ByteLength() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
bool v8::ArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
ApiCheck(!obj->is_external(),
"v8::ArrayBuffer::Externalize",
"ArrayBuffer already externalized");
obj->set_is_external(true);
size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
Contents contents;
contents.data_ = obj->backing_store();
contents.byte_length_ = byte_length;
return contents;
}
void* v8::ArrayBuffer::Data() const {
void v8::ArrayBuffer::Neuter() {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
ApiCheck(obj->is_external(),
"v8::ArrayBuffer::Neuter",
"Only externalized ArrayBuffers can be neutered");
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
for (i::Handle<i::Object> array_obj(obj->weak_first_array(), isolate);
*array_obj != i::Smi::FromInt(0);) {
i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*array_obj));
typed_array->Neuter();
array_obj = i::handle(typed_array->weak_next(), isolate);
}
obj->Neuter();
}
size_t v8::ArrayBuffer::ByteLength() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0;
if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0;
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return obj->backing_store();
return static_cast<size_t>(obj->byte_length()->Number());
}
@ -6054,7 +6198,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) {
ENTER_V8(isolate);
i::Handle<i::JSArrayBuffer> obj =
isolate->factory()->NewJSArrayBuffer();
i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length);
i::Runtime::SetupArrayBuffer(isolate, obj, true, data, byte_length);
return Utils::ToLocal(obj);
}
@ -6121,6 +6265,9 @@ i::Handle<i::JSTypedArray> NewTypedArray(
obj->set_buffer(*buffer);
obj->set_weak_next(buffer->weak_first_array());
buffer->set_weak_first_array(*obj);
i::Handle<i::Object> byte_offset_object = isolate->factory()->NewNumber(
static_cast<double>(byte_offset));
obj->set_byte_offset(*byte_offset_object);
@ -6265,14 +6412,12 @@ Local<Integer> v8::Integer::NewFromUnsigned(uint32_t value, Isolate* isolate) {
#ifdef DEBUG
v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate)
: isolate_(isolate),
last_state_(i::EnterAllocationScope(
reinterpret_cast<i::Isolate*>(isolate), false)) {
v8::AssertNoGCScope::AssertNoGCScope(v8::Isolate* isolate) {
disallow_heap_allocation_ = new i::DisallowHeapAllocation();
}
v8::AssertNoGCScope::~AssertNoGCScope() {
i::ExitAllocationScope(reinterpret_cast<i::Isolate*>(isolate_), last_state_);
delete static_cast<i::DisallowHeapAllocation*>(disallow_heap_allocation_);
}
#endif
@ -6359,42 +6504,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
}
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
isolate->global_handles()->AddObjectGroup(
reinterpret_cast<i::Object***>(objects), length, info);
}
void V8::AddObjectGroup(Isolate* exported_isolate,
Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
ASSERT(isolate == i::Isolate::Current());
if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
isolate->global_handles()->AddObjectGroup(
reinterpret_cast<i::Object***>(objects), length, info);
}
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
isolate->global_handles()->AddImplicitReferences(
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
reinterpret_cast<i::Object***>(children), length);
}
intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
@ -7228,6 +7337,12 @@ const CpuProfile* CpuProfiler::GetCpuProfile(int index,
}
const CpuProfile* CpuProfiler::GetCpuProfile(int index) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->GetProfile(NULL, index));
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
Handle<Value> security_token) {
i::Isolate* isolate = i::Isolate::Current();
@ -7287,6 +7402,14 @@ const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title,
}
const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
return reinterpret_cast<const CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
NULL,
*Utils::OpenHandle(*title)));
}
void CpuProfiler::DeleteAllProfiles() {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles");
@ -7826,8 +7949,7 @@ DeferredHandles* HandleScopeImplementer::Detach(Object** prev_limit) {
while (!blocks_.is_empty()) {
Object** block_start = blocks_.last();
Object** block_limit = &block_start[kHandleBlockSize];
// We should not need to check for NoHandleAllocation here. Assert
// this.
// We should not need to check for SealHandleScope here. Assert this.
ASSERT(prev_limit == block_limit ||
!(block_start <= prev_limit && prev_limit <= block_limit));
if (prev_limit == block_limit) break;

12
deps/v8/src/api.h

@ -126,8 +126,9 @@ template <typename T> inline T ToCData(v8::internal::Object* obj) {
template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return FACTORY->NewForeign(
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@ -636,8 +637,13 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
internal::Object** block_start = blocks_.last();
internal::Object** block_limit = block_start + kHandleBlockSize;
#ifdef DEBUG
// NoHandleAllocation may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) break;
// SealHandleScope may make the prev_limit to point inside the block.
if (block_start <= prev_limit && prev_limit <= block_limit) {
#ifdef ENABLE_EXTRA_CHECKS
internal::HandleScope::ZapRange(prev_limit, block_limit);
#endif
break;
}
#else
if (prev_limit == block_limit) break;
#endif

24
deps/v8/src/arguments.h

@ -52,7 +52,8 @@ class Arguments BASE_EMBEDDED {
Object*& operator[] (int index) {
ASSERT(0 <= index && index < length_);
return arguments_[-index];
return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
index * kPointerSize));
}
template <class S> Handle<S> at(int index) {
@ -152,8 +153,7 @@ class Arguments BASE_EMBEDDED {
// TODO(dcarney): Remove this class when old callbacks are gone.
class CallbackTable {
public:
// TODO(dcarney): Flip this when it makes sense for performance.
static const bool kStoreVoidFunctions = true;
static const bool kStoreVoidFunctions = false;
static inline bool ReturnsVoid(Isolate* isolate, void* function) {
CallbackTable* table = isolate->callback_table();
bool contains =
@ -171,13 +171,13 @@ class CallbackTable {
}
#define WRITE_REGISTER(OldFunction, NewFunction) \
static OldFunction Register(Isolate* isolate, NewFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), true); \
return reinterpret_cast<OldFunction>(f); \
static NewFunction Register(Isolate* isolate, OldFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), false); \
return reinterpret_cast<NewFunction>(f); \
} \
\
static OldFunction Register(Isolate* isolate, OldFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), false); \
static NewFunction Register(Isolate* isolate, NewFunction f) { \
InsertCallback(isolate, FunctionToVoidPtr(f), true); \
return f; \
}
FOR_EACH_CALLBACK_TABLE_MAPPING(WRITE_REGISTER)
@ -254,6 +254,10 @@ class PropertyCallbackArguments
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kHolderIndex]->IsHeapObject());
ASSERT(values[T::kIsolateIndex]->IsSmi());
@ -314,6 +318,10 @@ class FunctionCallbackArguments
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
ASSERT(values[T::kCalleeIndex]->IsJSFunction());
ASSERT(values[T::kHolderIndex]->IsHeapObject());

3
deps/v8/src/arm/assembler-arm.cc

@ -308,7 +308,7 @@ Operand::Operand(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address");
AllowDeferredHandleDereference using_raw_address;
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
@ -1368,6 +1368,7 @@ void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
ASSERT(IsEnabled(SUDIV));
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
src2.code()*B8 | B4 | src1.code());
}

50
deps/v8/src/arm/builtins-arm.cc

@ -480,15 +480,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle the
// construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle the
// construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
}
@ -513,15 +518,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(r2, Operand(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, &generic_array_code);
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
}

361
deps/v8/src/arm/code-stubs-arm.cc

@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@ -45,7 +44,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { r3, r2, r1 };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@ -57,7 +55,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { r3, r2, r1, r0 };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@ -80,7 +77,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@ -91,7 +87,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { r1 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@ -127,8 +122,8 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->miss_handler_ =
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
}
@ -150,7 +145,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure);
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// r0 -- number of arguments
// r1 -- constructor function
static Register registers[] = { r1 };
descriptor->register_param_count_ = 1;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &r0;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@ -175,6 +192,40 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
void ToBooleanStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { r0 };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@ -218,7 +269,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler_;
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@ -649,7 +700,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ cmp(r0, r1);
__ b(ne, &not_identical);
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
@ -1207,116 +1258,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
// Boolean -> its value.
CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
// 'null' -> false.
CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
if (types_.Contains(SMI)) {
// Smis: 0 -> false, all other -> true
__ SmiTst(tos_);
// tos_ contains the correct return value already
__ Ret(eq);
} else if (types_.NeedsMap()) {
// If we need a map later and have a Smi -> patch.
__ JumpIfSmi(tos_, &patch);
}
if (types_.NeedsMap()) {
__ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
if (types_.CanBeUndetectable()) {
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
// Undetectable -> false.
__ mov(tos_, Operand::Zero(), LeaveCC, ne);
__ Ret(ne);
}
}
if (types_.Contains(SPEC_OBJECT)) {
// Spec object -> true.
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
// tos_ contains the correct non-zero return value already.
__ Ret(ge);
}
if (types_.Contains(STRING)) {
// String value -> false iff empty.
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
__ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
__ Ret(lt); // the string length is OK as the return value
}
if (types_.Contains(HEAP_NUMBER)) {
// Heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
__ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
__ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
__ Ret();
__ bind(&not_heap_number);
}
__ bind(&patch);
GenerateTypeTransition(masm);
}
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
bool result) {
if (types_.Contains(type)) {
// If we see an expected oddball, return its ToBoolean value tos_.
__ LoadRoot(ip, value);
__ cmp(tos_, ip);
// The value of a root is never NULL, so we can avoid loading a non-null
// value into tos_ when we want to return 'true'.
if (!result) {
__ mov(tos_, Operand::Zero(), LeaveCC, eq);
}
__ Ret(eq);
}
}
void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
if (!tos_.is(r3)) {
__ mov(r3, Operand(tos_));
}
__ mov(r2, Operand(Smi::FromInt(tos_.code())));
__ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
__ Push(r3, r2, r1);
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
3,
1);
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@ -1766,6 +1707,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm, SUDIV);
Label result_not_zero;
__ bind(&div_with_sdiv);
@ -1822,6 +1764,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
__ Ret();
if (CpuFeatures::IsSupported(SUDIV)) {
CpuFeatureScope scope(masm, SUDIV);
__ bind(&modulo_with_sdiv);
__ mov(scratch2, right);
// Perform modulus with sdiv and mls.
@ -2130,7 +2073,14 @@ void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label not_smis, call_runtime;
Label right_arg_changed, call_runtime;
if (op_ == Token::MOD && has_fixed_right_arg_) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value())));
__ b(ne, &right_arg_changed);
}
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
@ -2147,6 +2097,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
__ bind(&right_arg_changed);
GenerateTypeTransition(masm);
__ bind(&call_runtime);
@ -2259,42 +2210,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
UNREACHABLE();
}
if (op_ != Token::DIV) {
// These operations produce an integer result.
// Try to return a smi if we can.
// Otherwise return a heap number if allowed, or jump to type
// transition.
if (result_type_ <= BinaryOpIC::INT32) {
__ TryDoubleToInt32Exact(scratch1, d5, d8);
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
} else {
__ vcvt_s32_f64(s8, d5);
__ vmov(scratch1, s8);
}
// Check if the result fits in a smi.
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
// Check for minus zero. Return heap number for minus zero if
// double results are allowed; otherwise transition.
if (result_type_ <= BinaryOpIC::INT32) {
__ TryDoubleToInt32Exact(scratch1, d5, d8);
// If the ne condition is set, result does
// not fit in a 32-bit integer.
__ b(ne, &transition);
// Try to tag the result as a Smi, return heap number on overflow.
__ SmiTag(scratch1, SetCC);
__ b(vs, &return_heap_number);
// Check for minus zero, transition in that case (because we need
// to return a heap number).
Label not_zero;
__ cmp(scratch1, Operand::Zero());
ASSERT(kSmiTag == 0);
__ b(ne, &not_zero);
__ vmov(scratch2, d5.high());
__ tst(scratch2, Operand(HeapNumber::kSignMask));
__ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition
: &return_heap_number);
__ b(ne, &transition);
__ bind(&not_zero);
// Tag the result and return.
__ SmiTag(r0, scratch1);
__ mov(r0, scratch1);
__ Ret();
} else {
// DIV just falls through to allocating a heap number.
}
__ bind(&return_heap_number);
@ -2318,6 +2252,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// to type transition.
} else {
if (has_fixed_right_arg_) {
__ Vmov(d8, fixed_right_arg_value(), scratch1);
__ VFPCompareAndSetFlags(d1, d8);
__ b(ne, &transition);
}
// We preserved r0 and r1 to be able to call runtime.
// Save the left value on the stack.
__ Push(r5, r4);
@ -4689,7 +4629,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@ -7336,6 +7275,10 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, true);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
}
@ -7350,6 +7293,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
Isolate* isolate) {
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
stubh1.GetCode(isolate)->set_is_pregenerated(true);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
stubh2.GetCode(isolate)->set_is_pregenerated(true);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
stubh3.GetCode(isolate)->set_is_pregenerated(true);
}
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count_ == ANY)
@ -7436,6 +7394,105 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
Label not_zero_case, not_one_case;
Label normal_sequence;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ ldr(r3, MemOperand(sp, 0));
__ cmp(r3, Operand::Zero());
__ b(eq, &normal_sequence);
InternalArraySingleArgumentConstructorStub
stub1_holey(GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
InternalArraySingleArgumentConstructorStub stub1(kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
InternalArrayNArgumentsConstructorStub stubN(kind);
__ TailCallStub(&stubN);
}
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
__ Assert(eq,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
}
#undef __
} } // namespace v8::internal

44
deps/v8/src/arm/codegen-arm.cc

@ -504,50 +504,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ SmiTst(index);
__ Check(eq, "Non-smi index");
__ SmiTst(value);
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
__ Check(lt, "Index is too large");
__ cmp(index, Operand(Smi::FromInt(0)));
__ Check(ge, "Index is negative");
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, "Unexpected string type");
}
__ add(ip,
string,
Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ SmiUntag(value, value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
__ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
} else {
// No need to untag a smi for two-byte addressing.
__ strh(value, MemOperand(ip, index)); // LSL(1 - kSmiTagSize).
}
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}

2
deps/v8/src/arm/codegen-arm.h

@ -51,7 +51,7 @@ class CodeGenerator: public AstVisitor {
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,

2
deps/v8/src/arm/deoptimizer-arm.cc

@ -48,7 +48,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());

174
deps/v8/src/arm/full-codegen-arm.cc

@ -678,8 +678,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ CallStub(&stub, condition->test_id());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@ -1081,9 +1081,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
// Get the object to enumerate over. Both SpiderMonkey and JSC
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r0, ip);
@ -1259,6 +1258,65 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Comment cmnt(masm_, "[ ForOfStatement");
SetStatementPosition(stmt);
Iteration loop_statement(this, stmt);
increment_loop_depth();
// var iterator = iterable[@@iterator]()
VisitForAccumulatorValue(stmt->assign_iterator());
// As with for-in, skip the loop if the iterator is null or undefined.
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, loop_statement.break_label());
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, loop_statement.break_label());
// Convert the iterator to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &done_convert);
__ bind(&convert);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
__ push(r0);
// Loop entry.
__ bind(loop_statement.continue_label());
// result = iterator.next()
VisitForEffect(stmt->next_result());
// if (result.done) break;
Label result_not_done;
VisitForControl(stmt->result_done(),
loop_statement.break_label(),
&result_not_done,
&result_not_done);
__ bind(&result_not_done);
// each = result.value
VisitForEffect(stmt->assign_each());
// Generate code for the body of the loop.
Visit(stmt->body());
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
__ jmp(loop_statement.continue_label());
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@ -1971,10 +2029,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(&l_send);
__ b(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@ -1983,11 +2041,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(r3); // iter
__ push(r0); // exception
__ mov(r0, r3); // iter
__ push(r0); // push LoadIC state
__ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw"
Handle<Code> throw_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(throw_ic); // iter.throw in r0
__ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
__ jmp(&l_call);
// try { received = yield result.value }
@ -2007,17 +2063,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in r0
__ PopTryHandler();
// receiver = iter; f = iter.send; arg = received;
__ bind(&l_send);
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ push(r3); // iter
__ push(r0); // received
__ mov(r0, r3); // iter
__ push(r0); // push LoadIC state
__ LoadRoot(r2, Heap::ksend_stringRootIndex); // "send"
Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(send_ic); // iter.send in r0
__ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
__ LoadRoot(r2, Heap::knext_stringRootIndex); // "next"
Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(next_ic); // iter.next in r0
// result = f.call(receiver, arg);
__ bind(&l_call);
@ -2045,13 +2099,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(r1); // result
__ push(r0); // result.value
__ mov(r0, r1); // result
__ push(r0); // push LoadIC state
__ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in r0
__ add(sp, sp, Operand(kPointerSize)); // drop LoadIC state
ToBooleanStub stub(r0);
__ CallStub(&stub);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ cmp(r0, Operand(0));
__ b(eq, &l_try);
@ -2122,7 +2174,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::SEND) {
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmp(r3, Operand(0));
__ b(ne, &slow_resume);
@ -3013,7 +3065,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// string "valueOf" the result is false.
// The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
__ mov(ip, Operand(FACTORY->value_of_string()));
__ mov(ip, Operand(isolate()->factory()->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
@ -3425,19 +3477,56 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask) {
__ SmiTst(index);
__ Check(eq, "Non-smi index");
__ SmiTst(value);
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
__ Check(lt, "Index is too large");
__ cmp(index, Operand(Smi::FromInt(0)));
__ Check(ge, "Index is negative");
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
__ cmp(ip, Operand(encoding_mask));
__ Check(eq, "Unexpected string type");
}
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
Register string = r0;
Register index = r1;
Register value = r2;
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
__ pop(value);
__ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
if (FLAG_debug_code) {
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
}
__ SmiUntag(value, value);
__ add(ip,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
context()->Plug(string);
}
@ -3445,15 +3534,28 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
Register string = r0;
Register index = r1;
Register value = r2;
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
__ pop(value);
__ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
if (FLAG_debug_code) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
}
__ SmiUntag(value, value);
__ add(ip,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ strh(value, MemOperand(ip, index));
context()->Plug(string);
}
@ -4663,9 +4765,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
EqualityKind kind = expr->op() == Token::EQ_STRICT
? kStrictEquality : kNonStrictEquality;
if (kind == kStrictEquality) {
if (expr->op() == Token::EQ_STRICT) {
Heap::RootListIndex nil_value = nil == kNullValue ?
Heap::kNullValueRootIndex :
Heap::kUndefinedValueRootIndex;
@ -4673,9 +4773,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
__ cmp(r0, r1);
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
kNonStrictEquality,
nil);
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ cmp(r0, Operand(0));
Split(ne, if_true, if_false, fall_through);

23
deps/v8/src/arm/ic-arm.cc

@ -646,15 +646,11 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
// Probe the stub cache.
@ -674,7 +670,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Label miss;
@ -695,7 +690,6 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -- sp[0] : receiver
// -----------------------------------
Isolate* isolate = masm->isolate();
@ -711,6 +705,20 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- r2 : name
// -- lr : return address
// -- r0 : receiver
// -----------------------------------
__ mov(r3, r0);
__ Push(r3, r2);
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
Register object,
Register key,
@ -878,9 +886,6 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
}
Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- lr : return address

206
deps/v8/src/arm/lithium-arm.cc

@ -369,8 +369,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
hydrogen()->access().PrintTo(stream);
stream->Add(" <- ");
value()->PrintTo(stream);
}
@ -406,7 +405,14 @@ void LStoreKeyed::PrintDataTo(StringStream* stream) {
} else {
stream->Add("] <- ");
}
value()->PrintTo(stream);
if (value() == NULL) {
ASSERT(hydrogen()->IsConstantHoleStore() &&
hydrogen()->value()->representation().IsDouble());
stream->Add("<the hole(nan)>");
} else {
value()->PrintTo(stream);
}
}
@ -699,6 +705,12 @@ LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
}
LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
UNREACHABLE();
return NULL;
}
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@ -711,9 +723,9 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
if (instr->representation().IsSmiOrTagged()) {
ASSERT(instr->left()->representation().IsSmiOrTagged());
ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
@ -781,8 +793,8 @@ LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
op == Token::SUB);
HValue* left = instr->left();
HValue* right = instr->right();
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
ASSERT(left->representation().IsSmiOrTagged());
ASSERT(right->representation().IsSmiOrTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
LArithmeticT* result =
@ -1304,9 +1316,9 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
return DefineAsRegister(new(zone()) LBitI(left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
ASSERT(instr->representation().IsSmiOrTagged());
ASSERT(instr->left()->representation().IsSmiOrTagged());
ASSERT(instr->right()->representation().IsSmiOrTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
@ -1333,18 +1345,14 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
LDivI* div =
new(zone()) LDivI(value, UseOrConstant(instr->right()));
new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
return AssignEnvironment(DefineSameAsFirst(div));
}
// TODO(1042) The fixed register allocation
// is needed because we call TypeRecordingBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
return AssignEnvironment(DefineAsRegister(div));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@ -1434,43 +1442,61 @@ LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
HValue* left = instr->left();
HValue* right = instr->right();
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LModI* mod;
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
if (instr->HasPowerOf2Divisor()) {
ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
LOperand* value = UseRegisterAtStart(instr->left());
mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
LOperand* divisor = UseRegister(instr->right());
mod = new(zone()) LModI(dividend,
divisor,
TempRegister(),
FixedTemp(d10),
FixedTemp(d11));
}
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero) ||
instr->CheckFlag(HValue::kCanOverflow)) {
ASSERT(!right->CanBeZero());
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseOrConstant(right));
LInstruction* result = DefineAsRegister(mod);
return (left->CanBeNegative() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero))
? AssignEnvironment(result)
: result;
} else if (instr->has_fixed_right_arg()) {
LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
UseRegisterAtStart(right));
return AssignEnvironment(DefineAsRegister(mod));
} else if (CpuFeatures::IsSupported(SUDIV)) {
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right));
LInstruction* result = DefineAsRegister(mod);
return (right->CanBeZero() ||
(left->RangeCanInclude(kMinInt) &&
right->RangeCanInclude(-1) &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
(left->CanBeNegative() &&
instr->CanBeZero() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)))
? AssignEnvironment(result)
: result;
} else {
return DefineAsRegister(mod);
LModI* mod = new(zone()) LModI(UseRegister(left),
UseRegister(right),
FixedTemp(d10),
FixedTemp(d11));
LInstruction* result = DefineAsRegister(mod);
return (right->CanBeZero() ||
(left->CanBeNegative() &&
instr->CanBeZero() &&
instr->CheckFlag(HValue::kBailoutOnMinusZero)))
? AssignEnvironment(result)
: result;
}
} else if (instr->representation().IsTagged()) {
} else if (instr->representation().IsSmiOrTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// We call a C function for double modulo. It can't trigger a GC. We need
// to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = UseFixedDouble(instr->right(), d2);
LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, d1), instr);
LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD,
UseFixedDouble(left, d1),
UseFixedDouble(right, d2));
return MarkAsCall(DefineFixedDouble(mod, d1), instr);
}
}
@ -1618,7 +1644,7 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->representation().IsSmiOrTagged());
return DoArithmeticT(Token::ADD, instr);
}
}
@ -1682,9 +1708,10 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
if (r.IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().IsSmiOrInteger32());
ASSERT(instr->left()->representation().Equals(
instr->right()->representation()));
LOperand* left = UseRegisterOrConstantAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
return new(zone()) LCmpIDAndBranch(left, right);
@ -1887,12 +1914,26 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
if (from.IsSmi()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
from = Representation::Tagged();
}
if (from.IsTagged()) {
if (to.IsDouble()) {
info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->type().IsSmi()) {
return DefineSameAsFirst(new(zone()) LDummyUse(value));
}
return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
} else {
ASSERT(to.IsInteger32());
LOperand* value = NULL;
@ -1927,6 +1968,10 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else if (to.IsSmi()) {
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToSmi(value,
TempRegister(), TempRegister())));
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
@ -1949,6 +1994,15 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LNumberTagI* result = new(zone()) LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* result =
DefineSameAsFirst(new(zone()) LInteger32ToSmi(value));
if (val->HasRange() && val->range()->IsInSmiRange()) {
return result;
}
return AssignEnvironment(result);
} else {
ASSERT(to.IsDouble());
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
@ -1986,18 +2040,6 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckSmi(value));
}
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@ -2020,7 +2062,7 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
ASSERT(input_rep.IsSmiOrTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
@ -2038,7 +2080,9 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
if (r.IsSmi()) {
return DefineAsRegister(new(zone()) LConstantS);
} else if (r.IsInteger32()) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
return DefineAsRegister(new(zone()) LConstantD);
@ -2154,7 +2198,7 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
instr->key()->representation().IsSmi());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
@ -2164,7 +2208,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->representation().IsSmiOrTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
@ -2214,7 +2258,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsTagged());
ASSERT(instr->value()->representation().IsSmiOrTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
@ -2293,13 +2337,14 @@ LInstruction* LChunkBuilder::DoTrapAllocationMemento(
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool is_in_object = instr->access().IsInobject();
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
instr->NeedsWriteBarrierForMap();
LOperand* obj;
if (needs_write_barrier) {
obj = instr->is_in_object()
obj = is_in_object
? UseRegister(instr->object())
: UseTempRegister(instr->object());
} else {
@ -2323,10 +2368,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
(FLAG_track_heap_object_fields &&
instr->field_representation().IsHeapObject())) {
return AssignEnvironment(result);
if (FLAG_track_heap_object_fields &&
instr->field_representation().IsHeapObject()) {
if (!instr->value()->type().IsHeapObject()) {
return AssignEnvironment(result);
}
}
return result;
}
@ -2370,14 +2416,6 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
}
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
info()->MarkAsDeferredCalling();
LOperand* size = instr->size()->IsConstant()
@ -2467,7 +2505,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
index = UseOrConstant(instr->index());
} else {
length = UseTempRegister(instr->length());
index = Use(instr->index());
index = UseRegisterAtStart(instr->index());
}
return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
}

91
deps/v8/src/arm/lithium-arm.h

@ -49,7 +49,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@ -87,6 +86,7 @@ class LCodeGen;
V(CmpT) \
V(ConstantD) \
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
V(Context) \
V(DebugBreak) \
@ -95,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(DoubleToSmi) \
V(DummyUse) \
V(ElementsKind) \
V(FixedArrayBaseLength) \
@ -111,6 +112,7 @@ class LCodeGen;
V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Integer32ToSmi) \
V(Uint32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
@ -573,51 +575,39 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
};
class LModI: public LTemplateInstruction<1, 2, 3> {
class LModI: public LTemplateInstruction<1, 2, 2> {
public:
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = NULL;
temps_[1] = NULL;
temps_[2] = NULL;
}
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
LOperand* temp,
LOperand* temp2,
LOperand* temp3) {
LOperand* temp = NULL,
LOperand* temp2 = NULL) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
temps_[1] = temp2;
temps_[2] = temp3;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
LOperand* temp3() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
class LDivI: public LTemplateInstruction<1, 2, 0> {
class LDivI: public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right) {
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
@ -1204,6 +1194,15 @@ class LConstantI: public LTemplateInstruction<1, 0, 0> {
};
class LConstantS: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
DECLARE_HYDROGEN_ACCESSOR(Constant)
Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
};
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@ -1954,6 +1953,19 @@ class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
};
class LInteger32ToSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToSmi(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
class LUint32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUint32ToDouble(LOperand* value) {
@ -2007,6 +2019,25 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
};
class LDoubleToSmi: public LTemplateInstruction<1, 1, 2> {
public:
LDoubleToSmi(LOperand* value, LOperand* temp, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp;
temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
@ -2111,9 +2142,6 @@ class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
virtual void PrintDataTo(StringStream* stream);
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
Representation representation() const {
return hydrogen()->field_representation();
@ -2352,7 +2380,7 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
};
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
class LCheckSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
@ -2416,21 +2444,6 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
temps_[1] = temp2;
}
LOperand* temp() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
};
class LAllocate: public LTemplateInstruction<1, 2, 2> {
public:
LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {

698
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

12
deps/v8/src/arm/lithium-codegen-arm.h

@ -119,6 +119,7 @@ class LCodeGen BASE_EMBEDDED {
SwVfpRegister flt_scratch,
DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
@ -126,6 +127,7 @@ class LCodeGen BASE_EMBEDDED {
MemOperand ToHighMemOperand(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
bool IsSmi(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
@ -138,10 +140,6 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
@ -155,13 +153,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register map_reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
void DoCheckMapCommon(Register map_reg, Handle<Map> map, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@ -334,7 +330,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
DwVfpRegister result,
bool deoptimize_on_undefined,
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode);

8
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -248,7 +248,9 @@ void LGapResolver::EmitMove(int index) {
LConstantOperand* constant_source = LConstantOperand::cast(source);
if (destination->IsRegister()) {
Register dst = cgen_->ToRegister(destination);
if (cgen_->IsInteger32(constant_source)) {
if (cgen_->IsSmi(constant_source)) {
__ mov(dst, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32(constant_source)) {
__ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
} else {
__ LoadObject(dst, cgen_->ToHandle(constant_source));
@ -256,7 +258,9 @@ void LGapResolver::EmitMove(int index) {
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsInteger32(constant_source)) {
if (cgen_->IsSmi(constant_source)) {
__ mov(kSavedValueRegister, Operand(cgen_->ToSmi(constant_source)));
} else if (cgen_->IsInteger32(constant_source)) {
__ mov(kSavedValueRegister,
Operand(cgen_->ToInteger32(constant_source)));
} else {

35
deps/v8/src/arm/macro-assembler-arm.cc

@ -74,7 +74,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
@ -163,7 +163,7 @@ int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond) {
ALLOW_HANDLE_DEREF(isolate(), "using raw address");
AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
@ -181,7 +181,7 @@ void MacroAssembler::Call(Handle<Code> code,
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
// 'code' is always generated ARM code, never THUMB code
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
}
@ -398,7 +398,7 @@ void MacroAssembler::StoreRoot(Register source,
void MacroAssembler::LoadHeapObject(Register result,
Handle<HeapObject> object) {
ALLOW_HANDLE_DEREF(isolate(), "using raw address");
AllowDeferredHandleDereference using_raw_address;
if (isolate()->heap()->InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(object);
@ -2105,32 +2105,16 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
Label* early_success,
CompareMapMode mode) {
Label* early_success) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMap(scratch, map, early_success, mode);
CompareMap(scratch, map, early_success);
}
void MacroAssembler::CompareMap(Register obj_map,
Handle<Map> map,
Label* early_success,
CompareMapMode mode) {
Label* early_success) {
cmp(obj_map, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
ElementsKind kind = map->elements_kind();
if (IsFastElementsKind(kind)) {
bool packed = IsFastPackedElementsKind(kind);
Map* current_map = *map;
while (CanTransitionToMoreGeneralFastElementsKind(kind, packed)) {
kind = GetNextMoreGeneralFastElementsKind(kind, packed);
current_map = current_map->LookupElementsTransitionMap(kind);
if (!current_map) break;
b(eq, early_success);
cmp(obj_map, Operand(Handle<Map>(current_map)));
}
}
}
}
@ -2138,14 +2122,13 @@ void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type,
CompareMapMode mode) {
SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
Label success;
CompareMap(obj, scratch, map, &success, mode);
CompareMap(obj, scratch, map, &success);
b(ne, fail);
bind(&success);
}

11
deps/v8/src/arm/macro-assembler-arm.h

@ -162,7 +162,7 @@ class MacroAssembler: public Assembler {
void LoadHeapObject(Register dst, Handle<HeapObject> object);
void LoadObject(Register result, Handle<Object> object) {
ALLOW_HANDLE_DEREF(isolate(), "heap object check");
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
LoadHeapObject(result, Handle<HeapObject>::cast(object));
} else {
@ -884,15 +884,13 @@ class MacroAssembler: public Assembler {
void CompareMap(Register obj,
Register scratch,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
Label* early_success);
// As above, but the map of the object is already loaded into the register
// which is preserved by the code generated.
void CompareMap(Register obj_map,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
Label* early_success);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
@ -902,8 +900,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type,
CompareMapMode mode = REQUIRE_EXACT_MAP);
SmiCheckType smi_check_type);
void CheckMap(Register obj,

57
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -122,7 +122,7 @@ RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
int registers_to_save,
Zone* zone)
: NativeRegExpMacroAssembler(zone),
masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@ -235,54 +235,6 @@ void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
}
void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string) {
if (on_failure == NULL) {
// Instead of inlining a backtrack for each test, (re)use the global
// backtrack target.
on_failure = &backtrack_label_;
}
if (check_end_of_string) {
// Is last character of required match inside string.
CheckPosition(cp_offset + str.length() - 1, on_failure);
}
__ add(r0, end_of_input_address(), Operand(current_input_offset()));
if (cp_offset != 0) {
int byte_offset = cp_offset * char_size();
__ add(r0, r0, Operand(byte_offset));
}
// r0 : Address of characters to match against str.
int stored_high_byte = 0;
for (int i = 0; i < str.length(); i++) {
if (mode_ == ASCII) {
__ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
ASSERT(str[i] <= String::kMaxOneByteCharCode);
__ cmp(r1, Operand(str[i]));
} else {
__ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
uc16 match_char = str[i];
int match_high_byte = (match_char >> 8);
if (match_high_byte == 0) {
__ cmp(r1, Operand(str[i]));
} else {
if (match_high_byte != stored_high_byte) {
__ mov(r2, Operand(match_high_byte));
stored_high_byte = match_high_byte;
}
__ add(r3, r2, Operand(match_char & 0xff));
__ cmp(r1, r3);
}
}
BranchOrBacktrack(ne, on_failure);
}
}
void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
__ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r0);
@ -556,7 +508,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
case 'd':
// Match ASCII digits ('0'..'9')
__ sub(r0, current_character(), Operand('0'));
__ cmp(current_character(), Operand('9' - '0'));
__ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
@ -917,9 +869,8 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = FACTORY->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
Handle<Code> code = isolate()->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}

4
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -53,10 +53,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
virtual void CheckCharacters(Vector<const uc16> str,
int cp_offset,
Label* on_failure,
bool check_end_of_string);
// A "greedy loop" is a loop that is both greedy and with a simple
// body. It has a particularly simple implementation.
virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);

200
deps/v8/src/arm/stub-cache-arm.cc

@ -462,7 +462,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, REQUIRE_EXACT_MAP);
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -581,6 +581,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@ -606,7 +608,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
} else {
// Write to the properties array.
@ -636,7 +640,9 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
}
@ -665,7 +671,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -723,6 +729,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// TODO(verwaest): Share this code as a code stub.
SmiCheck smi_check = representation.IsTagged()
? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@ -740,7 +748,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
} else {
// Write to the properties array.
@ -762,7 +772,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
smi_check);
}
}
@ -881,11 +893,12 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -- sp[4] : callee JS function
// -- sp[8] : call data
// -- sp[12] : isolate
// -- sp[16] : ReturnValue
// -- sp[20] : last JS argument
// -- sp[16] : ReturnValue default value
// -- sp[20] : ReturnValue
// -- sp[24] : last JS argument
// -- ...
// -- sp[(argc + 4) * 4] : first JS argument
// -- sp[(argc + 5) * 4] : receiver
// -- sp[(argc + 5) * 4] : first JS argument
// -- sp[(argc + 6) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
@ -902,13 +915,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ Move(r6, call_data);
}
__ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
// Store JS function, call data, isolate and ReturnValue.
// Store JS function, call data, isolate ReturnValue default and ReturnValue.
__ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
__ str(r5, MemOperand(sp, 4 * kPointerSize));
__ str(r5, MemOperand(sp, 5 * kPointerSize));
// Prepare arguments.
__ add(r2, sp, Operand(4 * kPointerSize));
__ add(r2, sp, Operand(5 * kPointerSize));
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@ -1247,8 +1261,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
Handle<Map> current_map(current->map());
// CheckMap implicitly loads the map of |reg| into |map_reg|.
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
} else {
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
}
@ -1285,7 +1298,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
// Check the holder map.
__ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
DONT_DO_SMI_CHECK);
}
// Perform security check for access to the global object.
@ -1422,10 +1435,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
}
__ Push(reg, scratch3());
__ mov(scratch3(),
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
__ mov(scratch4(), scratch3());
__ Push(scratch3(), scratch4());
__ mov(scratch4(),
Operand(ExternalReference::isolate_address(isolate())));
__ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ Push(scratch3(), scratch4(), name());
__ Push(scratch4(), name());
__ mov(r0, sp); // r0 = Handle<Name>
const int kApiStackSpace = 1;
@ -1451,7 +1466,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback(
__ CallApiFunctionAndReturn(ref,
kStackUnwindSpace,
returns_handle,
3);
5);
}
@ -2797,7 +2812,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Check that the map of the object hasn't changed.
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
DO_SMI_CHECK);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -3080,151 +3095,6 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
}
Handle<Code> ConstructStubCompiler::CompileConstructStub(
Handle<JSFunction> function) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
// -- lr : return address
// -- [sp] : last argument
// -----------------------------------
Label generic_stub_call;
// Use r7 for holding undefined which is used in several places below.
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
// there are jump to the generic constructor stub which calls the actual
// code for the function thereby hitting the break points.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset));
__ cmp(r2, r7);
__ b(ne, &generic_stub_call);
#endif
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
// r7: undefined
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &generic_stub_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &generic_stub_call);
#ifdef DEBUG
// Cannot construct functions this way.
// r0: argc
// r1: constructor function
// r2: initial map
// r7: undefined
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ Check(ne, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject in new space.
// r0: argc
// r1: constructor function
// r2: initial map
// r7: undefined
ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
#ifdef DEBUG
int instance_size = function->initial_map()->instance_size();
__ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
__ Check(eq, "Instance size of initial map changed.");
#endif
__ Allocate(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
// map and properties and elements are set to empty fixed array.
// r0: argc
// r1: constructor function
// r2: initial map
// r3: object size (in words)
// r4: JSObject (not tagged)
// r7: undefined
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
__ str(r6, MemOperand(r5, kPointerSize, PostIndex));
// Calculate the location of the first argument. The stack contains only the
// argc arguments.
__ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
// Fill all the in-object properties with undefined.
// r0: argc
// r1: first argument
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
// r7: undefined
// Fill the initialized properties with a constant value or a passed argument
// depending on the this.x = ...; assignment in the function.
Handle<SharedFunctionInfo> shared(function->shared());
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed, next;
// Check if the argument assigned to the property is actually passed.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ cmp(r0, Operand(arg_number));
__ b(le, &not_passed);
// Argument passed - find it on the stack.
__ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize));
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
__ b(&next);
__ bind(&not_passed);
// Set the property to undefined.
__ str(r7, MemOperand(r5, kPointerSize, PostIndex));
__ bind(&next);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
isolate());
__ mov(r2, Operand(constant));
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
}
}
// Fill the unused in-object property fields with undefined.
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
__ str(r7, MemOperand(r5, kPointerSize, PostIndex));
}
// r0: argc
// r4: JSObject (not tagged)
// Move argc to r1 and the JSObject to return to r0 and tag it.
__ mov(r1, r0);
__ mov(r0, r4);
__ orr(r0, r0, Operand(kHeapObjectTag));
// r0: JSObject
// r1: argc
// Remove caller arguments and receiver from the stack and return.
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, Operand(kPointerSize));
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1, r1, r2);
__ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2);
__ Jump(lr);
// Jump to the generic stub in case the specialized code cannot handle the
// construction.
__ bind(&generic_stub_call);
Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
__ Jump(code, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode();
}
#undef __
#define __ ACCESS_MASM(masm)

188
deps/v8/src/array.js

@ -395,6 +395,23 @@ function ArrayJoin(separator) {
}
function ObservedArrayPop(n) {
n--;
var value = this[n];
EnqueueSpliceRecord(this, n, [value], 0);
try {
BeginPerformSplice(this);
delete this[n];
this.length = n;
} finally {
EndPerformSplice(this);
}
return value;
}
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
@ -408,6 +425,10 @@ function ArrayPop() {
this.length = n;
return;
}
if (%IsObserved(this))
return ObservedArrayPop.call(this, n);
n--;
var value = this[n];
delete this[n];
@ -420,11 +441,10 @@ function ObservedArrayPush() {
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
EnqueueSpliceRecord(this, n, [], 0, m);
EnqueueSpliceRecord(this, n, [], m);
try {
BeginPerformSplice(this);
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
}
@ -558,6 +578,22 @@ function ArrayReverse() {
}
function ObservedArrayShift(len) {
var first = this[0];
EnqueueSpliceRecord(this, 0, [first], 0);
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 1, len, 0);
this.length = len - 1;
} finally {
EndPerformSplice(this);
}
return first;
}
function ArrayShift() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
@ -571,9 +607,12 @@ function ArrayShift() {
return;
}
if (%IsObserved(this))
return ObservedArrayShift.call(this, len);
var first = this[0];
if (IS_ARRAY(this) && !%IsObserved(this)) {
if (IS_ARRAY(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@ -584,6 +623,25 @@ function ArrayShift() {
return first;
}
function ObservedArrayUnshift() {
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
EnqueueSpliceRecord(this, 0, [], num_arguments);
try {
BeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
for (var i = 0; i < num_arguments; i++) {
this[i] = %_Arguments(i);
}
this.length = len + num_arguments;
} finally {
EndPerformSplice(this);
}
return len + num_arguments;
}
function ArrayUnshift(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@ -591,10 +649,13 @@ function ArrayUnshift(arg1) { // length == 1
["Array.prototype.unshift"]);
}
if (%IsObserved(this))
return ObservedArrayUnshift.apply(this, arguments);
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this) && !%IsObserved(this)) {
if (IS_ARRAY(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@ -655,52 +716,99 @@ function ArraySlice(start, end) {
}
function ArraySplice(start, delete_count) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.splice"]);
}
var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
function ComputeSpliceStartIndex(start_i, len) {
if (start_i < 0) {
start_i += len;
if (start_i < 0) start_i = 0;
} else {
if (start_i > len) start_i = len;
return start_i < 0 ? 0 : start_i;
}
return start_i > len ? len : start_i;
}
function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
// SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
// given as a request to delete all the elements from the start.
// And it differs from the case of undefined delete count.
// This does not follow ECMA-262, but we do the same for
// compatibility.
var del_count = 0;
if (num_arguments == 1) {
del_count = len - start_i;
} else {
del_count = TO_INTEGER(delete_count);
if (del_count < 0) del_count = 0;
if (del_count > len - start_i) del_count = len - start_i;
}
if (num_arguments == 1)
return len - start_i;
del_count = TO_INTEGER(delete_count);
if (del_count < 0)
return 0;
if (del_count > len - start_i)
return len - start_i;
return del_count;
}
function ObservedArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
var deleted_elements = [];
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
try {
BeginPerformSplice(this);
// Number of elements to add.
var num_additional_args = 0;
if (num_arguments > 2) {
num_additional_args = num_arguments - 2;
SimpleSlice(this, start_i, del_count, len, deleted_elements);
SimpleMove(this, start_i, del_count, len, num_elements_to_add);
// Insert the arguments into the resulting array in
// place of the deleted elements.
var i = start_i;
var arguments_index = 2;
var arguments_length = %_ArgumentsLength();
while (arguments_index < arguments_length) {
this[i++] = %_Arguments(arguments_index++);
}
this.length = len - del_count + num_elements_to_add;
} finally {
EndPerformSplice(this);
if (deleted_elements.length || num_elements_to_add) {
EnqueueSpliceRecord(this,
start_i,
deleted_elements.slice(),
num_elements_to_add);
}
}
var use_simple_splice = true;
// Return the deleted elements.
return deleted_elements;
}
function ArraySplice(start, delete_count) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.splice"]);
}
if (%IsObserved(this))
return ObservedArraySplice.apply(this, arguments);
var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
var deleted_elements = [];
deleted_elements.length = del_count;
var num_elements_to_add = num_arguments > 2 ? num_arguments - 2 : 0;
var use_simple_splice = true;
if (IS_ARRAY(this) &&
!%IsObserved(this) &&
num_additional_args !== del_count) {
num_elements_to_add !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@ -712,10 +820,10 @@ function ArraySplice(start, delete_count) {
if (use_simple_splice) {
SimpleSlice(this, start_i, del_count, len, deleted_elements);
SimpleMove(this, start_i, del_count, len, num_additional_args);
SimpleMove(this, start_i, del_count, len, num_elements_to_add);
} else {
SmartSlice(this, start_i, del_count, len, deleted_elements);
SmartMove(this, start_i, del_count, len, num_additional_args);
SmartMove(this, start_i, del_count, len, num_elements_to_add);
}
// Insert the arguments into the resulting array in
@ -726,7 +834,7 @@ function ArraySplice(start, delete_count) {
while (arguments_index < arguments_length) {
this[i++] = %_Arguments(arguments_index++);
}
this.length = len - del_count + num_additional_args;
this.length = len - del_count + num_elements_to_add;
// Return the deleted elements.
return deleted_elements;
@ -1001,11 +1109,13 @@ function ArraySort(comparefn) {
max_prototype_element = CopyFromPrototype(this, length);
}
var num_non_undefined = %RemoveArrayHoles(this, length);
var num_non_undefined = %IsObserved(this) ?
-1 : %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// There were indexed accessors in the array. Move array holes and
// undefineds to the end using a Javascript function that is safe
// in the presence of accessors.
// The array is observed, or there were indexed accessors in the array.
// Move array holes and undefineds to the end using a Javascript function
// that is safe in the presence of accessors and is observable.
num_non_undefined = SafeRemoveArrayHoles(this);
}

168
deps/v8/src/assert-scope.h

@ -0,0 +1,168 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ASSERT_SCOPE_H_
#define V8_ASSERT_SCOPE_H_
#include "allocation.h"
#include "platform.h"
namespace v8 {
namespace internal {
class Isolate;
enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT,
HANDLE_ALLOCATION_ASSERT,
HANDLE_DEREFERENCE_ASSERT,
DEFERRED_HANDLE_DEREFERENCE_ASSERT,
LAST_PER_THREAD_ASSERT_TYPE
};
#ifdef DEBUG
class PerThreadAssertData {
public:
PerThreadAssertData() : nesting_level_(0) {
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
assert_states_[i] = true;
}
}
void set(PerThreadAssertType type, bool allow) {
assert_states_[type] = allow;
}
bool get(PerThreadAssertType type) const {
return assert_states_[type];
}
void increment_level() { ++nesting_level_; }
bool decrement_level() { return --nesting_level_ == 0; }
private:
bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
int nesting_level_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
#endif // DEBUG
class PerThreadAssertScopeBase {
#ifdef DEBUG
protected:
PerThreadAssertScopeBase() {
data_ = AssertData();
data_->increment_level();
}
~PerThreadAssertScopeBase() {
if (!data_->decrement_level()) return;
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
ASSERT(data_->get(static_cast<PerThreadAssertType>(i)));
}
delete data_;
Thread::SetThreadLocal(thread_local_key, NULL);
}
static PerThreadAssertData* AssertData() {
PerThreadAssertData* data = reinterpret_cast<PerThreadAssertData*>(
Thread::GetThreadLocal(thread_local_key));
if (data == NULL) {
data = new PerThreadAssertData();
Thread::SetThreadLocal(thread_local_key, data);
}
return data;
}
static Thread::LocalStorageKey thread_local_key;
PerThreadAssertData* data_;
friend class Isolate;
#endif // DEBUG
};
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
#ifndef DEBUG
PerThreadAssertScope() { }
static void SetIsAllowed(bool is_allowed) { }
#else
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
}
~PerThreadAssertScope() { data_->set(type, old_state_); }
static bool IsAllowed() { return AssertData()->get(type); }
private:
bool old_state_;
#endif
};
// Scope to document where we do not expect handles to be created.
typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>
DisallowHandleAllocation;
// Scope to introduce an exception to DisallowHandleAllocation.
typedef PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>
AllowHandleAllocation;
// Scope to document where we do not expect any allocation and GC.
typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>
DisallowHeapAllocation;
// Scope to introduce an exception to DisallowHeapAllocation.
typedef PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>
AllowHeapAllocation;
// Scope to document where we do not expect any handle dereferences.
typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>
DisallowHandleDereference;
// Scope to introduce an exception to DisallowHandleDereference.
typedef PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>
AllowHandleDereference;
// Scope to document where we do not expect deferred handles to be dereferenced.
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
DisallowDeferredHandleDereference;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
typedef PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
AllowDeferredHandleDereference;
} } // namespace v8::internal
#endif // V8_ASSERT_SCOPE_H_

91
deps/v8/src/ast.cc

@ -30,6 +30,7 @@
#include <cmath> // For isfinite.
#include "builtins.h"
#include "code-stubs.h"
#include "contexts.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
@ -181,9 +182,9 @@ LanguageMode FunctionLiteral::language_mode() const {
}
ObjectLiteral::Property::Property(Literal* key,
Expression* value,
Isolate* isolate) {
ObjectLiteralProperty::ObjectLiteralProperty(Literal* key,
Expression* value,
Isolate* isolate) {
emit_store_ = true;
key_ = key;
value_ = value;
@ -201,7 +202,8 @@ ObjectLiteral::Property::Property(Literal* key,
}
ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
ObjectLiteralProperty::ObjectLiteralProperty(bool is_getter,
FunctionLiteral* value) {
emit_store_ = true;
value_ = value;
kind_ = is_getter ? GETTER : SETTER;
@ -415,6 +417,16 @@ bool FunctionDeclaration::IsInlineable() const {
// ----------------------------------------------------------------------------
// Recording of type feedback
void ForInStatement::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
for_in_type_ = static_cast<ForInType>(oracle->ForInType(this));
}
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
to_boolean_types_ = oracle->ToBooleanTypes(test_id());
}
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
Zone* zone) {
// Record type feedback from the oracle in the AST.
@ -486,6 +498,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
store_mode_ = oracle->GetStoreMode(id);
type_ = oracle->IncrementType(this);
}
@ -575,6 +588,32 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
}
Handle<JSObject> Call::GetPrototypeForPrimitiveCheck(
CheckType check, Isolate* isolate) {
v8::internal::Context* native_context = isolate->context()->native_context();
JSFunction* function = NULL;
switch (check) {
case RECEIVER_MAP_CHECK:
UNREACHABLE();
break;
case STRING_CHECK:
function = native_context->string_function();
break;
case SYMBOL_CHECK:
function = native_context->symbol_function();
break;
case NUMBER_CHECK:
function = native_context->number_function();
break;
case BOOLEAN_CHECK:
function = native_context->boolean_function();
break;
}
ASSERT(function != NULL);
return Handle<JSObject>(JSObject::cast(function->instance_prototype()));
}
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
is_monomorphic_ = oracle->CallIsMonomorphic(this);
@ -606,8 +645,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
map = receiver_types_.at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
holder_ = Handle<JSObject>(
oracle->GetPrototypeForPrimitiveCheck(check_type_));
holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate());
map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
@ -617,10 +655,14 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
allocation_info_cell_ = oracle->GetCallNewAllocationInfoCell(this);
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
elements_kind_ = oracle->GetCallNewElementsKind(this);
Object* value = allocation_info_cell_->value();
if (value->IsSmi()) {
elements_kind_ = static_cast<ElementsKind>(Smi::cast(value)->value());
}
}
}
@ -632,6 +674,31 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
void UnaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
type_ = oracle->UnaryType(this);
}
void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
oracle->BinaryType(this, &left_type_, &right_type_, &result_type_,
&has_fixed_right_arg_, &fixed_right_arg_value_);
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
oracle->CompareType(this, &left_type_, &right_type_, &overall_type_);
if (!overall_type_.IsUninitialized() && overall_type_.IsNonPrimitive() &&
(op_ == Token::EQ || op_ == Token::EQ_STRICT)) {
map_ = oracle->GetCompareMap(this);
} else {
// May be a compare to nil.
map_ = oracle->CompareNilMonomorphicReceiverType(this);
if (op_ != Token::EQ_STRICT)
compare_nil_types_ = oracle->CompareNilTypes(this);
}
}
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
@ -723,12 +790,12 @@ Interval RegExpQuantifier::CaptureRegisters() {
bool RegExpAssertion::IsAnchoredAtStart() {
return type() == RegExpAssertion::START_OF_INPUT;
return assertion_type() == RegExpAssertion::START_OF_INPUT;
}
bool RegExpAssertion::IsAnchoredAtEnd() {
return type() == RegExpAssertion::END_OF_INPUT;
return assertion_type() == RegExpAssertion::END_OF_INPUT;
}
@ -860,7 +927,7 @@ void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
switch (that->type()) {
switch (that->assertion_type()) {
case RegExpAssertion::START_OF_INPUT:
stream()->Add("@^i");
break;
@ -1087,6 +1154,7 @@ DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForInStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_CACHE_NODE(ModuleLiteral)
@ -1115,6 +1183,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
Handle<String> Literal::ToString() {
if (handle_->IsString()) return Handle<String>::cast(handle_);
Factory* factory = Isolate::Current()->factory();
ASSERT(handle_->IsNumber());
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
@ -1126,7 +1195,7 @@ Handle<String> Literal::ToString() {
} else {
str = DoubleToCString(handle_->Number(), buffer);
}
return FACTORY->NewStringFromAscii(CStrVector(str));
return factory->NewStringFromAscii(CStrVector(str));
}

392
deps/v8/src/ast.h

@ -39,6 +39,8 @@
#include "small-pointer-list.h"
#include "smart-pointers.h"
#include "token.h"
#include "type-info.h" // TODO(rossberg): this should eventually be removed
#include "types.h"
#include "utils.h"
#include "variables.h"
#include "interface.h"
@ -88,6 +90,7 @@ namespace internal {
V(WhileStatement) \
V(ForStatement) \
V(ForInStatement) \
V(ForOfStatement) \
V(TryCatchStatement) \
V(TryFinallyStatement) \
V(DebuggerStatement)
@ -162,9 +165,9 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::NodeType node_type() const { return AstNode::k##type; } \
template<class> friend class AstNodeFactory;
@ -196,7 +199,7 @@ class AstProperties BASE_EMBEDDED {
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
enum Type {
enum NodeType {
AST_NODE_LIST(DECLARE_TYPE_ENUM)
kInvalid = -1
};
@ -211,7 +214,7 @@ class AstNode: public ZoneObject {
virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0;
virtual Type node_type() const = 0;
virtual NodeType node_type() const = 0;
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
@ -353,6 +356,9 @@ class Expression: public AstNode {
// True iff the expression is the undefined literal.
bool IsUndefinedLiteral();
// Expression type
Handle<Type> type() { return type_; }
// Type feedback information for assignments and properties.
virtual bool IsMonomorphic() {
UNREACHABLE();
@ -373,15 +379,23 @@ class Expression: public AstNode {
return STANDARD_STORE;
}
// TODO(rossberg): this should move to its own AST node eventually.
void RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle);
byte to_boolean_types() const { return to_boolean_types_; }
BailoutId id() const { return id_; }
TypeFeedbackId test_id() const { return test_id_; }
protected:
explicit Expression(Isolate* isolate)
: id_(GetNextId(isolate)),
: type_(Type::Any(), isolate),
id_(GetNextId(isolate)),
test_id_(GetNextId(isolate)) {}
private:
Handle<Type> type_;
byte to_boolean_types_;
const BailoutId id_;
const TypeFeedbackId test_id_;
};
@ -389,7 +403,7 @@ class Expression: public AstNode {
class BreakableStatement: public Statement {
public:
enum Type {
enum BreakableType {
TARGET_FOR_ANONYMOUS,
TARGET_FOR_NAMED_ONLY
};
@ -405,15 +419,18 @@ class BreakableStatement: public Statement {
Label* break_target() { return &break_target_; }
// Testers.
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
bool is_target_for_anonymous() const {
return breakable_type_ == TARGET_FOR_ANONYMOUS;
}
BailoutId EntryId() const { return entry_id_; }
BailoutId ExitId() const { return exit_id_; }
protected:
BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
BreakableStatement(
Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type)
: labels_(labels),
type_(type),
breakable_type_(breakable_type),
entry_id_(GetNextId(isolate)),
exit_id_(GetNextId(isolate)) {
ASSERT(labels == NULL || labels->length() > 0);
@ -422,7 +439,7 @@ class BreakableStatement: public Statement {
private:
ZoneStringList* labels_;
Type type_;
BreakableType breakable_type_;
Label break_target_;
const BailoutId entry_id_;
const BailoutId exit_id_;
@ -716,6 +733,7 @@ class IterationStatement: public BreakableStatement {
private:
Statement* body_;
Label continue_target_;
const BailoutId osr_entry_id_;
};
@ -751,7 +769,9 @@ class DoWhileStatement: public IterationStatement {
private:
Expression* cond_;
int condition_position_;
const BailoutId continue_id_;
const BailoutId back_edge_id_;
};
@ -788,8 +808,10 @@ class WhileStatement: public IterationStatement {
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
const BailoutId body_id_;
};
@ -843,51 +865,142 @@ class ForStatement: public IterationStatement {
Statement* init_;
Expression* cond_;
Statement* next_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
const BailoutId continue_id_;
const BailoutId body_id_;
};
class ForInStatement: public IterationStatement {
class ForEachStatement: public IterationStatement {
public:
DECLARE_NODE_TYPE(ForInStatement)
enum VisitMode {
ENUMERATE, // for (each in subject) body;
ITERATE // for (each of subject) body;
};
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
void Initialize(Expression* each, Expression* subject, Statement* body) {
IterationStatement::Initialize(body);
each_ = each;
enumerable_ = enumerable;
subject_ = subject;
}
Expression* each() const { return each_; }
Expression* enumerable() const { return enumerable_; }
Expression* subject() const { return subject_; }
virtual BailoutId ContinueId() const { return EntryId(); }
virtual BailoutId StackCheckId() const { return body_id_; }
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
protected:
ForEachStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
each_(NULL),
subject_(NULL) {
}
private:
Expression* each_;
Expression* subject_;
};
class ForInStatement: public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForInStatement)
Expression* enumerable() const {
return subject();
}
TypeFeedbackId ForInFeedbackId() const { return reuse(PrepareId()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
enum ForInType { FAST_FOR_IN, SLOW_FOR_IN };
ForInType for_in_type() const { return for_in_type_; }
BailoutId BodyId() const { return body_id_; }
BailoutId PrepareId() const { return prepare_id_; }
virtual BailoutId ContinueId() const { return EntryId(); }
virtual BailoutId StackCheckId() const { return body_id_; }
protected:
ForInStatement(Isolate* isolate, ZoneStringList* labels)
: IterationStatement(isolate, labels),
each_(NULL),
enumerable_(NULL),
: ForEachStatement(isolate, labels),
for_in_type_(SLOW_FOR_IN),
body_id_(GetNextId(isolate)),
prepare_id_(GetNextId(isolate)) {
}
private:
Expression* each_;
Expression* enumerable_;
ForInType for_in_type_;
const BailoutId body_id_;
const BailoutId prepare_id_;
};
class ForOfStatement: public ForEachStatement {
public:
DECLARE_NODE_TYPE(ForOfStatement)
void Initialize(Expression* each,
Expression* subject,
Statement* body,
Expression* assign_iterator,
Expression* next_result,
Expression* result_done,
Expression* assign_each) {
ForEachStatement::Initialize(each, subject, body);
assign_iterator_ = assign_iterator;
next_result_ = next_result;
result_done_ = result_done;
assign_each_ = assign_each;
}
Expression* iterable() const {
return subject();
}
// var iterator = iterable;
Expression* assign_iterator() const {
return assign_iterator_;
}
// var result = iterator.next();
Expression* next_result() const {
return next_result_;
}
// result.done
Expression* result_done() const {
return result_done_;
}
// each = result.value
Expression* assign_each() const {
return assign_each_;
}
virtual BailoutId ContinueId() const { return EntryId(); }
virtual BailoutId StackCheckId() const { return BackEdgeId(); }
BailoutId BackEdgeId() const { return back_edge_id_; }
protected:
ForOfStatement(Isolate* isolate, ZoneStringList* labels)
: ForEachStatement(isolate, labels),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL),
back_edge_id_(GetNextId(isolate)) {
}
Expression* assign_iterator_;
Expression* next_result_;
Expression* result_done_;
Expression* assign_each_;
const BailoutId back_edge_id_;
};
class ExpressionStatement: public Statement {
public:
DECLARE_NODE_TYPE(ExpressionStatement)
@ -1023,11 +1136,16 @@ class SwitchStatement: public BreakableStatement {
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
tag_ = tag;
cases_ = cases;
switch_type_ = UNKNOWN_SWITCH;
}
Expression* tag() const { return tag_; }
ZoneList<CaseClause*>* cases() const { return cases_; }
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH, GENERIC_SWITCH };
SwitchType switch_type() const { return switch_type_; }
void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; }
protected:
SwitchStatement(Isolate* isolate, ZoneStringList* labels)
: BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
@ -1037,6 +1155,7 @@ class SwitchStatement: public BreakableStatement {
private:
Expression* tag_;
ZoneList<CaseClause*>* cases_;
SwitchType switch_type_;
};
@ -1096,7 +1215,7 @@ class TargetCollector: public AstNode {
// Virtual behaviour. TargetCollectors are never part of the AST.
virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
virtual Type node_type() const { return kInvalid; }
virtual NodeType node_type() const { return kInvalid; }
virtual TargetCollector* AsTargetCollector() { return this; }
ZoneList<Label*>* targets() { return &targets_; }
@ -1282,52 +1401,55 @@ class MaterializedLiteral: public Expression {
};
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
class ObjectLiteralProperty: public ZoneObject {
public:
enum Kind {
CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal.
GETTER, SETTER, // Property is an accessor function.
PROTOTYPE // Property is __proto__.
};
ObjectLiteralProperty(Literal* key, Expression* value, Isolate* isolate);
Literal* key() { return key_; }
Expression* value() { return value_; }
Kind kind() { return kind_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
bool IsCompileTimeValue();
void set_emit_store(bool emit_store);
bool emit_store();
protected:
template<class> friend class AstNodeFactory;
ObjectLiteralProperty(bool is_getter, FunctionLiteral* value);
void set_key(Literal* key) { key_ = key; }
private:
Literal* key_;
Expression* value_;
Kind kind_;
bool emit_store_;
Handle<Map> receiver_type_;
};
// An object literal has a boilerplate object that is used
// for minimizing the work when constructing it at runtime.
class ObjectLiteral: public MaterializedLiteral {
public:
// Property is used for passing information
// about an object literal's properties from the parser
// to the code generator.
class Property: public ZoneObject {
public:
enum Kind {
CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal.
GETTER, SETTER, // Property is an accessor function.
PROTOTYPE // Property is __proto__.
};
Property(Literal* key, Expression* value, Isolate* isolate);
Literal* key() { return key_; }
Expression* value() { return value_; }
Kind kind() { return kind_; }
// Type feedback information.
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
bool IsCompileTimeValue();
void set_emit_store(bool emit_store);
bool emit_store();
protected:
template<class> friend class AstNodeFactory;
Property(bool is_getter, FunctionLiteral* value);
void set_key(Literal* key) { key_ = key; }
private:
Literal* key_;
Expression* value_;
Kind kind_;
bool emit_store_;
Handle<Map> receiver_type_;
};
typedef ObjectLiteralProperty Property;
DECLARE_NODE_TYPE(ObjectLiteral)
@ -1590,6 +1712,11 @@ class Call: public Expression {
BailoutId ReturnId() const { return return_id_; }
// TODO(rossberg): this should really move somewhere else (and be merged with
// various similar methods in objets.cc), but for now...
static Handle<JSObject> GetPrototypeForPrimitiveCheck(
CheckType check, Isolate* isolate);
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
bool return_is_recorded_;
@ -1636,10 +1763,13 @@ class CallNew: public Expression {
TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual bool IsMonomorphic() { return is_monomorphic_; }
Handle<JSFunction> target() { return target_; }
Handle<JSFunction> target() const { return target_; }
ElementsKind elements_kind() const { return elements_kind_; }
Handle<JSGlobalPropertyCell> allocation_info_cell() const {
return allocation_info_cell_;
}
BailoutId ReturnId() const { return return_id_; }
ElementsKind elements_kind() const { return elements_kind_; }
protected:
CallNew(Isolate* isolate,
@ -1651,8 +1781,8 @@ class CallNew: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
return_id_(GetNextId(isolate)),
elements_kind_(GetInitialFastElementsKind()) { }
elements_kind_(GetInitialFastElementsKind()),
return_id_(GetNextId(isolate)) { }
private:
Expression* expression_;
@ -1661,9 +1791,10 @@ class CallNew: public Expression {
bool is_monomorphic_;
Handle<JSFunction> target_;
ElementsKind elements_kind_;
Handle<JSGlobalPropertyCell> allocation_info_cell_;
const BailoutId return_id_;
ElementsKind elements_kind_;
};
@ -1713,6 +1844,8 @@ class UnaryOperation: public Expression {
BailoutId MaterializeFalseId() { return materialize_false_id_; }
TypeFeedbackId UnaryOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
TypeInfo type() const { return type_; }
protected:
UnaryOperation(Isolate* isolate,
@ -1733,6 +1866,8 @@ class UnaryOperation: public Expression {
Expression* expression_;
int pos_;
TypeInfo type_;
// For unary not (Token::NOT), the AST ids where true and false will
// actually be materialized, respectively.
const BailoutId materialize_true_id_;
@ -1754,6 +1889,12 @@ class BinaryOperation: public Expression {
BailoutId RightId() const { return right_id_; }
TypeFeedbackId BinaryOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
TypeInfo left_type() const { return left_type_; }
TypeInfo right_type() const { return right_type_; }
TypeInfo result_type() const { return result_type_; }
bool has_fixed_right_arg() const { return has_fixed_right_arg_; }
int fixed_right_arg_value() const { return fixed_right_arg_value_; }
protected:
BinaryOperation(Isolate* isolate,
@ -1775,6 +1916,13 @@ class BinaryOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
TypeInfo left_type_;
TypeInfo right_type_;
TypeInfo result_type_;
bool has_fixed_right_arg_;
int fixed_right_arg_value_;
// The short-circuit logical operations need an AST ID for their
// right-hand subexpression.
const BailoutId right_id_;
@ -1804,6 +1952,7 @@ class CountOperation: public Expression {
virtual KeyedAccessStoreMode GetStoreMode() {
return store_mode_;
}
TypeInfo type() const { return type_; }
BailoutId AssignmentId() const { return assignment_id_; }
@ -1832,6 +1981,8 @@ class CountOperation: public Expression {
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
TypeInfo type_;
Expression* expression_;
int pos_;
const BailoutId assignment_id_;
@ -1851,6 +2002,12 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
TypeInfo left_type() const { return left_type_; }
TypeInfo right_type() const { return right_type_; }
TypeInfo overall_type() const { return overall_type_; }
byte compare_nil_types() const { return compare_nil_types_; }
Handle<Map> map() const { return map_; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@ -1876,6 +2033,12 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
TypeInfo left_type_;
TypeInfo right_type_;
TypeInfo overall_type_;
byte compare_nil_types_;
Handle<Map> map_;
};
@ -2048,7 +2211,7 @@ class Throw: public Expression {
class FunctionLiteral: public Expression {
public:
enum Type {
enum FunctionType {
ANONYMOUS_EXPRESSION,
NAMED_EXPRESSION,
DECLARATION
@ -2092,12 +2255,6 @@ class FunctionLiteral: public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int handler_count() { return handler_count_; }
bool has_only_simple_this_property_assignments() {
return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
}
Handle<FixedArray> this_property_assignments() {
return this_property_assignments_;
}
int parameter_count() { return parameter_count_; }
bool AllowsLazyCompilation();
@ -2152,10 +2309,8 @@ class FunctionLiteral: public Expression {
int materialized_literal_count,
int expected_property_count,
int handler_count,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int parameter_count,
Type type,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
IsFunctionFlag is_function,
IsParenthesizedFlag is_parenthesized,
@ -2164,7 +2319,6 @@ class FunctionLiteral: public Expression {
name_(name),
scope_(scope),
body_(body),
this_property_assignments_(this_property_assignments),
inferred_name_(isolate->factory()->empty_string()),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@ -2172,10 +2326,8 @@ class FunctionLiteral: public Expression {
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
bitfield_ =
HasOnlySimpleThisPropertyAssignments::encode(
has_only_simple_this_property_assignments) |
IsExpression::encode(type != DECLARATION) |
IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
IsExpression::encode(function_type != DECLARATION) |
IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters) |
IsFunction::encode(is_function) |
@ -2187,7 +2339,6 @@ class FunctionLiteral: public Expression {
Handle<String> name_;
Scope* scope_;
ZoneList<Statement*>* body_;
Handle<FixedArray> this_property_assignments_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
@ -2198,14 +2349,13 @@ class FunctionLiteral: public Expression {
int function_token_position_;
unsigned bitfield_;
class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
class IsExpression: public BitField<bool, 1, 1> {};
class IsAnonymous: public BitField<bool, 2, 1> {};
class Pretenure: public BitField<bool, 3, 1> {};
class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
class IsParenthesized: public BitField<IsParenthesizedFlag, 6, 1> {};
class IsGenerator: public BitField<IsGeneratorFlag, 7, 1> {};
class IsExpression: public BitField<bool, 0, 1> {};
class IsAnonymous: public BitField<bool, 1, 1> {};
class Pretenure: public BitField<bool, 2, 1> {};
class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {};
class IsFunction: public BitField<IsFunctionFlag, 4, 1> {};
class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {};
class IsGenerator: public BitField<IsGeneratorFlag, 6, 1> {};
};
@ -2323,7 +2473,7 @@ class RegExpAlternative: public RegExpTree {
class RegExpAssertion: public RegExpTree {
public:
enum Type {
enum AssertionType {
START_OF_LINE,
START_OF_INPUT,
END_OF_LINE,
@ -2331,7 +2481,7 @@ class RegExpAssertion: public RegExpTree {
BOUNDARY,
NON_BOUNDARY
};
explicit RegExpAssertion(Type type) : type_(type) { }
explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
virtual void* Accept(RegExpVisitor* visitor, void* data);
virtual RegExpNode* ToNode(RegExpCompiler* compiler,
RegExpNode* on_success);
@ -2341,9 +2491,9 @@ class RegExpAssertion: public RegExpTree {
virtual bool IsAnchoredAtEnd();
virtual int min_match() { return 0; }
virtual int max_match() { return 0; }
Type type() { return type_; }
AssertionType assertion_type() { return assertion_type_; }
private:
Type type_;
AssertionType assertion_type_;
};
@ -2456,13 +2606,13 @@ class RegExpText: public RegExpTree {
class RegExpQuantifier: public RegExpTree {
public:
enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
: body_(body),
min_(min),
max_(max),
min_match_(min * body->min_match()),
type_(type) {
quantifier_type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
@ -2486,9 +2636,9 @@ class RegExpQuantifier: public RegExpTree {
virtual int max_match() { return max_match_; }
int min() { return min_; }
int max() { return max_; }
bool is_possessive() { return type_ == POSSESSIVE; }
bool is_non_greedy() { return type_ == NON_GREEDY; }
bool is_greedy() { return type_ == GREEDY; }
bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
bool is_non_greedy() { return quantifier_type_ == NON_GREEDY; }
bool is_greedy() { return quantifier_type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
@ -2497,7 +2647,7 @@ class RegExpQuantifier: public RegExpTree {
int max_;
int min_match_;
int max_match_;
Type type_;
QuantifierType quantifier_type_;
};
@ -2788,10 +2938,25 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
STATEMENT_WITH_LABELS(ForInStatement)
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
ZoneStringList* labels) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels);
VISIT_AND_RETURN(ForInStatement, stmt);
}
case ForEachStatement::ITERATE: {
ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels);
VISIT_AND_RETURN(ForOfStatement, stmt);
}
}
UNREACHABLE();
return NULL;
}
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
@ -3028,19 +3193,16 @@ class AstNodeFactory BASE_EMBEDDED {
int materialized_literal_count,
int expected_property_count,
int handler_count,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int parameter_count,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::Type type,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::IsFunctionFlag is_function,
FunctionLiteral::IsParenthesizedFlag is_parenthesized,
FunctionLiteral::IsGeneratorFlag is_generator) {
FunctionLiteral* lit = new(zone_) FunctionLiteral(
isolate_, name, scope, body,
materialized_literal_count, expected_property_count, handler_count,
has_only_simple_this_property_assignments, this_property_assignments,
parameter_count, type, has_duplicate_parameters, is_function,
parameter_count, function_type, has_duplicate_parameters, is_function,
is_parenthesized, is_generator);
// Top-level literal doesn't count for the AST's properties.
if (is_function == FunctionLiteral::kIsFunction) {

17
deps/v8/src/atomicops_internals_mips_gcc.h

@ -30,8 +30,6 @@
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
@ -111,9 +109,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
return res;
}
@ -126,19 +124,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return res;
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@ -176,6 +171,4 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_

55
deps/v8/src/bootstrapper.cc

@ -1086,11 +1086,13 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_string(),
factory->undefined_value(), DONT_ENUM));
factory->undefined_value(), DONT_ENUM,
Object::FORCE_TAGGED));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM));
factory->undefined_value(), DONT_ENUM,
Object::FORCE_TAGGED));
#ifdef DEBUG
LookupResult lookup(isolate);
@ -1320,10 +1322,11 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_array_buffer) {
// -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun =
InstallFunction(global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
InstallFunction(
global, "ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithInternalFields,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true, true);
native_context()->set_array_buffer_fun(*array_buffer_fun);
}
@ -1574,6 +1577,11 @@ void Genesis::InstallExperimentalNativeFunctions() {
}
if (FLAG_harmony_observation) {
INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
INSTALL_NATIVE(JSFunction, "EnqueueSpliceRecord", observers_enqueue_splice);
INSTALL_NATIVE(JSFunction, "BeginPerformSplice",
observers_begin_perform_splice);
INSTALL_NATIVE(JSFunction, "EndPerformSplice",
observers_end_perform_splice);
INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
observers_deliver_changes);
}
@ -1604,19 +1612,23 @@ Handle<JSFunction> Genesis::InstallInternalArray(
factory()->NewJSObject(isolate()->object_function(), TENURED);
SetPrototype(array_function, prototype);
array_function->shared()->set_construct_stub(
isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
if (FLAG_optimize_constructed_arrays) {
InternalArrayConstructorStub internal_array_constructor_stub(isolate());
Handle<Code> code = internal_array_constructor_stub.GetCode(isolate());
array_function->shared()->set_construct_stub(*code);
} else {
array_function->shared()->set_construct_stub(
isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode));
}
array_function->shared()->DontAdaptArguments();
MaybeObject* maybe_map = array_function->initial_map()->Copy();
Map* new_map;
if (!maybe_map->To(&new_map)) return Handle<JSFunction>::null();
new_map->set_elements_kind(elements_kind);
array_function->set_initial_map(new_map);
Handle<Map> original_map(array_function->initial_map());
Handle<Map> initial_map = factory()->CopyMap(original_map);
initial_map->set_elements_kind(elements_kind);
array_function->set_initial_map(*initial_map);
// Make "length" magic on instances.
Handle<Map> initial_map(array_function->initial_map());
Handle<DescriptorArray> array_descriptors(
factory()->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*array_descriptors);
@ -1870,14 +1882,11 @@ bool Genesis::InstallNatives() {
{
Handle<JSFunction> array_function =
InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
if (array_function.is_null()) return false;
native_context()->set_internal_array_function(*array_function);
}
{
Handle<JSFunction> array_function =
InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
if (array_function.is_null()) return false;
InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
}
if (FLAG_disable_native_files) {
@ -2129,7 +2138,8 @@ void Genesis::InstallJSFunctionResultCaches() {
#undef F
;
Handle<FixedArray> caches = FACTORY->NewFixedArray(kNumberOfCaches, TENURED);
Handle<FixedArray> caches =
factory()->NewFixedArray(kNumberOfCaches, TENURED);
int index = 0;
@ -2148,7 +2158,7 @@ void Genesis::InstallJSFunctionResultCaches() {
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
FACTORY->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
factory()->NewFixedArray(NormalizedMapCache::kEntries, TENURED));
native_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
@ -2508,14 +2518,13 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
// Cloning the elements array is sufficient.
Handle<FixedArray> from_elements =
Handle<FixedArray>(FixedArray::cast(from->elements()));
Handle<FixedArray> to_elements = FACTORY->CopyFixedArray(from_elements);
Handle<FixedArray> to_elements = factory()->CopyFixedArray(from_elements);
to->set_elements(*to_elements);
}
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
HandleScope outer(isolate());
Factory* factory = isolate()->factory();
ASSERT(!from->IsJSArray());
ASSERT(!to->IsJSArray());
@ -2525,7 +2534,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<Map> old_to_map = Handle<Map>(to->map());
Handle<Map> new_to_map = factory->CopyMap(old_to_map);
Handle<Map> new_to_map = factory()->CopyMap(old_to_map);
new_to_map->set_prototype(from->map()->prototype());
to->set_map(*new_to_map);
}

9
deps/v8/src/bootstrapper.h

@ -65,13 +65,14 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
HandleScope scope(shared->GetIsolate());
Isolate* isolate = shared->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
int length = cache_->length();
Handle<FixedArray> new_array =
FACTORY->NewFixedArray(length + 2, TENURED);
Handle<FixedArray> new_array = factory->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
Handle<String> str = factory->NewStringFromAscii(name, TENURED);
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));

87
deps/v8/src/builtins.cc

@ -194,64 +194,6 @@ BUILTIN(EmptyFunction) {
}
RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
// If we get 2 arguments then they are the stub parameters (constructor, type
// info). If we get 3, then the first one is a pointer to the arguments
// passed by the caller.
Arguments empty_args(0, NULL);
bool no_caller_args = args.length() == 2;
ASSERT(no_caller_args || args.length() == 3);
int parameters_start = no_caller_args ? 0 : 1;
Arguments* caller_args = no_caller_args
? &empty_args
: reinterpret_cast<Arguments*>(args[0]);
Handle<JSFunction> constructor = args.at<JSFunction>(parameters_start);
Handle<Object> type_info = args.at<Object>(parameters_start + 1);
bool holey = false;
if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
int value = Smi::cast((*caller_args)[0])->value();
holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
}
JSArray* array;
MaybeObject* maybe_array;
if (*type_info != isolate->heap()->undefined_value() &&
JSGlobalPropertyCell::cast(*type_info)->value()->IsSmi()) {
JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
Smi* smi = Smi::cast(cell->value());
ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
if (holey && !IsFastHoleyElementsKind(to_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
// Update the allocation site info to reflect the advice alteration.
cell->set_value(Smi::FromInt(to_kind));
}
maybe_array = isolate->heap()->AllocateJSObjectWithAllocationSite(
*constructor, type_info);
if (!maybe_array->To(&array)) return maybe_array;
} else {
ElementsKind kind = constructor->initial_map()->elements_kind();
ASSERT(kind == GetInitialFastElementsKind());
maybe_array = isolate->heap()->AllocateJSObject(*constructor);
if (!maybe_array->To(&array)) return maybe_array;
// We might need to transition to holey
if (holey) {
kind = GetHoleyElementsKind(kind);
maybe_array = array->TransitionElementsKind(kind);
if (maybe_array->IsFailure()) return maybe_array;
}
}
maybe_array = isolate->heap()->AllocateJSArrayStorage(array, 0, 0,
DONT_INITIALIZE_ARRAY_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
maybe_array = ArrayConstructInitializeElements(array, caller_args);
if (maybe_array->IsFailure()) return maybe_array;
return array;
}
static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
@ -563,7 +505,7 @@ BUILTIN(ArrayPush) {
}
// Add the provided values.
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int index = 0; index < to_add; index++) {
elms->set(index + len, args[index + 1], mode);
@ -612,7 +554,7 @@ BUILTIN(ArrayPush) {
}
// Add the provided values.
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
int index;
for (index = 0; index < to_add; index++) {
Object* arg = args[index + 1];
@ -695,7 +637,7 @@ BUILTIN(ArrayShift) {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
heap->MoveElements(elms, 0, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
@ -762,12 +704,12 @@ BUILTIN(ArrayUnshift) {
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
heap->MoveElements(elms, to_add, 0, len);
}
// Add the provided values.
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int i = 0; i < to_add; i++) {
elms->set(i, args[i + 1], mode);
@ -898,7 +840,7 @@ BUILTIN(ArraySlice) {
result_len,
result_len);
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
@ -1000,7 +942,7 @@ BUILTIN(ArraySplice) {
if (!maybe_array->To(&result_array)) return maybe_array;
if (actual_delete_count > 0) {
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, actual_start, elements_kind, result_array->elements(),
@ -1025,7 +967,7 @@ BUILTIN(ArraySplice) {
MoveDoubleElements(elms, delta, elms, 0, actual_start);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
heap->MoveElements(elms, delta, 0, actual_start);
}
@ -1041,7 +983,7 @@ BUILTIN(ArraySplice) {
FillWithHoles(elms, new_length, len);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
@ -1062,7 +1004,7 @@ BUILTIN(ArraySplice) {
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
@ -1083,7 +1025,7 @@ BUILTIN(ArraySplice) {
elms_obj = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
heap->MoveElements(elms, actual_start + item_count,
actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
@ -1102,7 +1044,7 @@ BUILTIN(ArraySplice) {
}
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
@ -1466,6 +1408,11 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
}
static void Generate_LoadIC_Slow(MacroAssembler* masm) {
LoadIC::GenerateRuntimeGetProperty(masm);
}
static void Generate_KeyedLoadIC_Initialize(MacroAssembler* masm) {
KeyedLoadIC::GenerateInitialize(masm);
}

2
deps/v8/src/builtins.h

@ -143,6 +143,8 @@ enum BuiltinExtraArguments {
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Slow, LOAD_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \

2
deps/v8/src/checks.cc

@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
i::AllowHandleDereference allow_deref;
i::AllowDeferredHandleDereference allow_deferred_deref;
fflush(stdout);
fflush(stderr);
fatal_error_handler_nesting_depth++;

344
deps/v8/src/code-stubs-hydrogen.cc

@ -36,10 +36,9 @@ namespace internal {
static LChunk* OptimizeGraph(HGraph* graph) {
Isolate* isolate = graph->isolate();
AssertNoAllocation no_gc;
NoHandleAllocation no_handles(isolate);
HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
ASSERT(graph != NULL);
SmartArrayPointer<char> bailout_reason;
@ -100,7 +99,23 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
IfBuilder checker_;
};
enum ArgumentClass {
NONE,
SINGLE,
MULTIPLE
};
HValue* BuildArrayConstructor(ElementsKind kind,
bool disable_allocation_sites,
ArgumentClass argument_class);
HValue* BuildInternalArrayConstructor(ElementsKind kind,
ArgumentClass argument_class);
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
ElementsKind kind);
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfoWithZone info_;
@ -148,7 +163,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HParameter::REGISTER_PARAMETER,
Representation::Integer32());
stack_parameter_count->set_type(HType::Smi());
// it's essential to bind this value to the environment in case of deopt
// It's essential to bind this value to the environment in case of deopt.
AddInstruction(stack_parameter_count);
start_environment->Bind(param_count, stack_parameter_count);
arguments_length_ = stack_parameter_count;
@ -169,7 +184,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HValue* return_value = BuildCodeStub();
// We might have extra expressions to pop from the stack in addition to the
// arguments above
// arguments above.
HInstruction* stack_pop_count = stack_parameter_count;
if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
if (!stack_parameter_count->IsConstant() &&
@ -186,11 +201,12 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
}
}
if (!current_block()->IsFinished()) {
if (current_block() != NULL) {
HReturn* hreturn_instruction = new(zone) HReturn(return_value,
context_,
stack_pop_count);
current_block()->Finish(hreturn_instruction);
set_current_block(NULL);
}
return true;
}
@ -204,10 +220,10 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
protected:
virtual HValue* BuildCodeStub() {
if (casted_stub()->IsMiss()) {
return BuildCodeInitializedStub();
} else {
if (casted_stub()->IsUninitialized()) {
return BuildCodeUninitializedStub();
} else {
return BuildCodeInitializedStub();
}
}
@ -276,16 +292,17 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (descriptor->register_param_count_ < 0) {
stub->InitializeInterfaceDescriptor(isolate, descriptor);
}
// The miss case without stack parameters can use a light-weight stub to enter
// If we are uninitialized we can use a light-weight stub to enter
// the runtime that is significantly faster than using the standard
// stub-failure deopt mechanism.
if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) {
if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
ASSERT(descriptor->stack_parameter_count_ == NULL);
return stub->GenerateLightweightMissCode(isolate);
} else {
CodeStubGraphBuilder<Stub> builder(stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
}
CodeStubGraphBuilder<Stub> builder(stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen();
}
@ -358,7 +375,6 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
HValue* undefined = graph()->GetConstantUndefined();
HInstruction* boilerplate =
@ -383,24 +399,17 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
if (FLAG_pretenure_literals) {
if (isolate()->heap()->ShouldGloballyPretenure()) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
}
HInstruction* object =
AddInstruction(new(zone) HAllocate(context(),
size_in_bytes,
HType::JSObject(),
flags));
HInstruction* object = AddInstruction(new(zone)
HAllocate(context(), size_in_bytes, HType::JSObject(), flags));
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(
boilerplate, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
value, true,
Representation::Tagged(), i));
HObjectAccess access = HObjectAccess::ForJSObjectOffset(i);
AddStore(object, access, AddLoad(boilerplate, access));
}
checker.ElseDeopt();
@ -418,7 +427,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
HInstruction* load = BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), NULL, NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
false, NEVER_RETURN_HOLE, STANDARD_STORE, Representation::Tagged());
false, NEVER_RETURN_HOLE, STANDARD_STORE);
return load;
}
@ -430,11 +439,11 @@ Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
Representation representation = casted_stub()->representation();
HInstruction* load = AddInstruction(DoBuildLoadNamedField(
GetParameter(0), casted_stub()->is_inobject(),
representation, casted_stub()->offset()));
return load;
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
casted_stub()->representation()));
}
@ -445,11 +454,11 @@ Handle<Code> LoadFieldStub::GenerateCode() {
template<>
HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() {
Representation representation = casted_stub()->representation();
HInstruction* load = AddInstruction(DoBuildLoadNamedField(
GetParameter(0), casted_stub()->is_inobject(),
representation, casted_stub()->offset()));
return load;
HObjectAccess access = casted_stub()->is_inobject() ?
HObjectAccess::ForJSObjectOffset(casted_stub()->offset()) :
HObjectAccess::ForBackingStoreOffset(casted_stub()->offset());
return AddInstruction(BuildLoadNamedField(GetParameter(0), access,
casted_stub()->representation()));
}
@ -463,8 +472,7 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
BuildUncheckedMonomorphicElementAccess(
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, NEVER_RETURN_HOLE, casted_stub()->store_mode(),
Representation::Tagged());
true, NEVER_RETURN_HOLE, casted_stub()->store_mode());
return GetParameter(2);
}
@ -487,8 +495,8 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HTrapAllocationMemento(js_array));
HInstruction* array_length =
AddInstruction(HLoadNamedField::NewArrayLength(
zone, js_array, js_array, HType::Smi()));
AddLoad(js_array, HObjectAccess::ForArrayLength());
array_length->set_type(HType::Smi());
ElementsKind to_kind = casted_stub()->to_kind();
BuildNewSpaceArrayCheck(array_length, to_kind);
@ -507,27 +515,19 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
HInstruction* elements_length =
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
HValue* new_elements =
BuildAllocateAndInitializeElements(context(), to_kind, elements_length);
HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
context(), to_kind, elements_length);
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
to_kind, array_length, elements_length);
Factory* factory = isolate()->factory();
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
Representation::Tagged(),
JSArray::kElementsOffset));
AddStore(js_array, HObjectAccess::ForElementsPointer(), new_elements);
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
map, true,
Representation::Tagged(),
JSArray::kMapOffset));
AddStore(js_array, HObjectAccess::ForMap(), map);
return js_array;
}
@ -536,40 +536,56 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
// ----------- S t a t e -------------
// -- Parameter 1 : type info cell
// -- Parameter 0 : constructor
// -----------------------------------
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind, bool disable_allocation_sites,
ArgumentClass argument_class) {
HValue* constructor = GetParameter(ArrayConstructorStubBase::kConstructor);
HValue* property_cell = GetParameter(ArrayConstructorStubBase::kPropertyCell);
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
// Get the right map
// Should be a constant
JSArrayBuilder array_builder(
this,
casted_stub()->elements_kind(),
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
return array_builder.AllocateEmptyArray();
ArrayContextChecker(this, constructor, array_function);
JSArrayBuilder array_builder(this, kind, property_cell,
disable_allocation_sites);
HValue* result = NULL;
switch (argument_class) {
case NONE:
result = array_builder.AllocateEmptyArray();
break;
case SINGLE:
result = BuildArraySingleArgumentConstructor(&array_builder);
break;
case MULTIPLE:
result = BuildArrayNArgumentsConstructor(&array_builder, kind);
break;
}
return result;
}
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
HValue* CodeStubGraphBuilderBase::BuildInternalArrayConstructor(
ElementsKind kind, ArgumentClass argument_class) {
HValue* constructor = GetParameter(
InternalArrayConstructorStubBase::kConstructor);
JSArrayBuilder array_builder(this, kind, constructor);
HValue* result = NULL;
switch (argument_class) {
case NONE:
result = array_builder.AllocateEmptyArray();
break;
case SINGLE:
result = BuildArraySingleArgumentConstructor(&array_builder);
break;
case MULTIPLE:
result = BuildArrayNArgumentsConstructor(&array_builder, kind);
break;
}
return result;
}
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
JSArrayBuilder* array_builder) {
// Smi check and range check on the input arg.
HValue* constant_one = graph()->GetConstant1();
HValue* constant_zero = graph()->GetConstant0();
@ -580,19 +596,13 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero));
HConstant* max_alloc_length =
new(zone()) HConstant(JSObject::kInitialMaxFastElementArray,
Representation::Tagged());
new(zone()) HConstant(JSObject::kInitialMaxFastElementArray);
AddInstruction(max_alloc_length);
const int initial_capacity = JSArray::kPreallocatedArrayElements;
HConstant* initial_capacity_node =
new(zone()) HConstant(initial_capacity, Representation::Tagged());
HConstant* initial_capacity_node = new(zone()) HConstant(initial_capacity);
AddInstruction(initial_capacity_node);
// Since we're forcing Integer32 representation for this HBoundsCheck,
// there's no need to Smi-check the index.
HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length,
ALLOW_SMI_KEY,
Representation::Tagged());
HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length);
IfBuilder if_builder(this);
if_builder.IfCompare(checked_arg, constant_zero, Token::EQ);
if_builder.Then();
@ -606,46 +616,23 @@ HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
// Figure out total size
HValue* length = Pop();
HValue* capacity = Pop();
JSArrayBuilder array_builder(
this,
casted_stub()->elements_kind(),
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
return array_builder.AllocateArray(capacity, length, true);
}
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
return array_builder->AllocateArray(capacity, length, true);
}
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* array_function = BuildGetArrayFunction(context());
ArrayContextChecker(this,
GetParameter(ArrayConstructorStubBase::kConstructor),
array_function);
ElementsKind kind = casted_stub()->elements_kind();
HValue* length = GetArgumentsLength();
JSArrayBuilder array_builder(
this,
kind,
GetParameter(ArrayConstructorStubBase::kPropertyCell),
casted_stub()->mode());
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
JSArrayBuilder* array_builder, ElementsKind kind) {
// We need to fill with the hole if it's a smi array in the multi-argument
// case because we might have to bail out while copying arguments into
// the array because they aren't compatible with a smi array.
// If it's a double array, no problem, and if it's fast then no
// problem either because doubles are boxed.
HValue* length = GetArgumentsLength();
bool fill_with_hole = IsFastSmiElementsKind(kind);
HValue* new_object = array_builder.AllocateArray(length,
length,
fill_with_hole);
HValue* elements = array_builder.GetElementsLocation();
HValue* new_object = array_builder->AllocateArray(length,
length,
fill_with_hole);
HValue* elements = array_builder->GetElementsLocation();
ASSERT(elements != NULL);
// Now populate the elements correctly.
@ -659,39 +646,108 @@ HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt(
argument_elements, length, key));
// Checks to prevent incompatible stores
if (IsFastSmiElementsKind(kind)) {
AddInstruction(new(zone()) HCheckSmi(argument));
}
AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind));
builder.EndBody();
return new_object;
}
template <>
HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
return BuildArrayConstructor(kind, disable_allocation_sites, NONE);
}
Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
return BuildArrayConstructor(kind, disable_allocation_sites, SINGLE);
}
Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
bool disable_allocation_sites = casted_stub()->disable_allocation_sites();
return BuildArrayConstructor(kind, disable_allocation_sites, MULTIPLE);
}
Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeUninitializedStub() {
HValue* CodeStubGraphBuilder<InternalArrayNoArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
return BuildInternalArrayConstructor(kind, NONE);
}
Handle<Code> InternalArrayNoArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<InternalArraySingleArgumentConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
return BuildInternalArrayConstructor(kind, SINGLE);
}
Handle<Code> InternalArraySingleArgumentConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<InternalArrayNArgumentsConstructorStub>::
BuildCodeStub() {
ElementsKind kind = casted_stub()->elements_kind();
return BuildInternalArrayConstructor(kind, MULTIPLE);
}
Handle<Code> InternalArrayNArgumentsConstructorStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
CompareNilICStub* stub = casted_stub();
HIfContinuation continuation;
Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map());
BuildCompareNil(GetParameter(0), stub->GetKind(),
BuildCompareNil(GetParameter(0),
stub->GetTypes(), sentinel_map,
RelocInfo::kNoPosition, &continuation);
IfBuilder if_nil(this, &continuation);
if_nil.Then();
if (continuation.IsFalseReachable()) {
if_nil.Else();
if_nil.Return(graph()->GetConstantSmi0());
if_nil.Return(graph()->GetConstant0());
}
if_nil.End();
return continuation.IsTrueReachable()
? graph()->GetConstantSmi1()
? graph()->GetConstant1()
: graph()->GetConstantUndefined();
}
@ -700,4 +756,24 @@ Handle<Code> CompareNilICStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
ToBooleanStub* stub = casted_stub();
IfBuilder if_true(this);
if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
if_true.Then();
if_true.Return(graph()->GetConstant1());
if_true.Else();
if_true.End();
return graph()->GetConstant0();
}
Handle<Code> ToBooleanStub::GenerateCode() {
return DoGenerateCode(this);
}
} } // namespace v8::internal

103
deps/v8/src/code-stubs.cc

@ -45,7 +45,8 @@ CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
function_mode_(NOT_JS_FUNCTION_STUB_MODE),
register_params_(NULL),
deoptimization_handler_(NULL),
miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { }
miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()),
has_miss_handler_(false) { }
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
@ -304,6 +305,27 @@ void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
}
InlineCacheState ICCompareStub::GetICState() {
CompareIC::State state = Max(left_, right_);
switch (state) {
case CompareIC::UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
case CompareIC::SMI:
case CompareIC::NUMBER:
case CompareIC::INTERNALIZED_STRING:
case CompareIC::STRING:
case CompareIC::UNIQUE_NAME:
case CompareIC::OBJECT:
case CompareIC::KNOWN_OBJECT:
return MONOMORPHIC;
case CompareIC::GENERIC:
return ::v8::internal::GENERIC;
}
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
}
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
@ -410,36 +432,44 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
void CompareNilICStub::Record(Handle<Object> object) {
ASSERT(types_ != Types::FullCompare());
if (equality_kind_ == kStrictEquality) {
// When testing for strict equality only one value will evaluate to true
types_.RemoveAll();
types_.Add((nil_value_ == kNullValue) ? NULL_TYPE:
UNDEFINED);
if (object->IsNull()) {
types_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
types_.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
types_ = Types::FullCompare();
} else if (IsMonomorphic()) {
types_ = Types::FullCompare();
} else {
if (object->IsNull()) {
types_.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
types_.Add(UNDEFINED);
} else if (object->IsUndetectableObject() ||
object->IsOddball() ||
!object->IsHeapObject()) {
types_ = Types::FullCompare();
} else if (IsMonomorphic()) {
types_ = Types::FullCompare();
} else {
types_.Add(MONOMORPHIC_MAP);
}
types_.Add(MONOMORPHIC_MAP);
}
}
void CompareNilICStub::Types::TraceTransition(Types to) const {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
stream.Add("[CompareNilIC : ");
Print(&stream);
stream.Add("=>");
to.Print(&stream);
stream.Add("]\n");
stream.OutputToStdOut();
#endif
}
void CompareNilICStub::PrintName(StringStream* stream) {
stream->Add("CompareNilICStub_");
types_.Print(stream);
stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
"(UndefinedValue|");
stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)":
"NonStrictEquality)");
}
@ -554,6 +584,14 @@ void CallConstructStub::PrintName(StringStream* stream) {
}
bool ToBooleanStub::Record(Handle<Object> object) {
Types old_types(types_);
bool to_boolean_value = types_.Record(object);
old_types.TraceTransition(types_);
return to_boolean_value;
}
void ToBooleanStub::PrintName(StringStream* stream) {
stream->Add("ToBooleanStub_");
types_.Print(stream);
@ -577,17 +615,19 @@ void ToBooleanStub::Types::Print(StringStream* stream) const {
void ToBooleanStub::Types::TraceTransition(Types to) const {
#ifdef DEBUG
if (!FLAG_trace_ic) return;
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
static_cast<unsigned>(sizeof(buffer)));
StringStream stream(&allocator);
stream.Add("[ToBooleanIC (");
stream.Add("[ToBooleanIC : ");
Print(&stream);
stream.Add("->");
stream.Add("=>");
to.Print(&stream);
stream.Add(")]\n");
stream.Add("]\n");
stream.OutputToStdOut();
#endif
}
@ -749,4 +789,19 @@ ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
}
void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
InternalArrayNoArgumentConstructorStub stub1(FAST_ELEMENTS);
InstallDescriptor(isolate, &stub1);
InternalArraySingleArgumentConstructorStub stub2(FAST_ELEMENTS);
InstallDescriptor(isolate, &stub2);
InternalArrayNArgumentsConstructorStub stub3(FAST_ELEMENTS);
InstallDescriptor(isolate, &stub3);
}
InternalArrayConstructorStub::InternalArrayConstructorStub(
Isolate* isolate) {
InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
}
} } // namespace v8::internal

333
deps/v8/src/code-stubs.h

@ -77,6 +77,9 @@ namespace internal {
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
V(InternalArrayNArgumentsConstructor) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(NameDictionaryLookup) \
@ -85,6 +88,7 @@ namespace internal {
V(StoreArrayLiteralElement) \
V(StubFailureTrampoline) \
V(ArrayConstructor) \
V(InternalArrayConstructor) \
V(ProfileEntryHook) \
/* IC Handler stubs */ \
V(LoadField) \
@ -277,7 +281,6 @@ struct CodeStubInterfaceDescriptor {
StubFunctionMode function_mode_;
Register* register_params_;
Address deoptimization_handler_;
ExternalReference miss_handler_;
int environment_length() const {
if (stack_parameter_count_ != NULL) {
@ -287,6 +290,24 @@ struct CodeStubInterfaceDescriptor {
}
bool initialized() const { return register_param_count_ >= 0; }
void SetMissHandler(ExternalReference handler) {
miss_handler_ = handler;
has_miss_handler_ = true;
}
ExternalReference miss_handler() {
ASSERT(has_miss_handler_);
return miss_handler_;
}
bool has_miss_handler() {
return has_miss_handler_;
}
private:
ExternalReference miss_handler_;
bool has_miss_handler_;
};
// A helper to make up for the fact that type Register is not fully
@ -300,12 +321,12 @@ struct CodeStubInterfaceDescriptor {
class HydrogenCodeStub : public CodeStub {
public:
enum InitializationState {
CODE_STUB_IS_NOT_MISS,
CODE_STUB_IS_MISS
UNINITIALIZED,
INITIALIZED
};
explicit HydrogenCodeStub(InitializationState state) {
is_miss_ = (state == CODE_STUB_IS_MISS);
explicit HydrogenCodeStub(InitializationState state = INITIALIZED) {
is_uninitialized_ = (state == UNINITIALIZED);
}
virtual Code::Kind GetCodeKind() const { return Code::STUB; }
@ -314,7 +335,7 @@ class HydrogenCodeStub : public CodeStub {
return isolate->code_stub_interface_descriptor(MajorKey());
}
bool IsMiss() { return is_miss_; }
bool IsUninitialized() { return is_uninitialized_; }
template<class SubClass>
static Handle<Code> GetUninitialized(Isolate* isolate) {
@ -339,11 +360,11 @@ class HydrogenCodeStub : public CodeStub {
void GenerateLightweightMiss(MacroAssembler* masm);
virtual int MinorKey() {
return IsMissBits::encode(is_miss_) |
return IsMissBits::encode(is_uninitialized_) |
MinorKeyBits::encode(NotMissMinorKey());
}
bool is_miss_;
bool is_uninitialized_;
};
@ -516,8 +537,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub {
FastCloneShallowArrayStub(Mode mode,
AllocationSiteMode allocation_site_mode,
int length)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
mode_(mode),
: mode_(mode),
allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0);
@ -577,8 +597,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub {
static const int kMaximumClonedProperties = 6;
explicit FastCloneShallowObjectStub(int length)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS),
length_(length) {
: length_(length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedProperties);
}
@ -655,9 +674,23 @@ class ArrayConstructorStub: public PlatformCodeStub {
};
class InternalArrayConstructorStub: public PlatformCodeStub {
public:
explicit InternalArrayConstructorStub(Isolate* isolate);
void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; }
virtual int MinorKey() { return 0; }
void GenerateCase(MacroAssembler* masm, ElementsKind kind);
};
class MathPowStub: public PlatformCodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
explicit MathPowStub(ExponentType exponent_type)
: exponent_type_(exponent_type) { }
@ -763,7 +796,7 @@ class HICStub: public HydrogenCodeStub {
virtual InlineCacheState GetICState() { return MONOMORPHIC; }
protected:
HICStub() : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { }
HICStub() { }
class KindBits: public BitField<Code::Kind, 0, 4> {};
virtual Code::Kind kind() const = 0;
};
@ -870,7 +903,9 @@ class BinaryOpStub: public PlatformCodeStub {
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
result_type_(BinaryOpIC::UNINITIALIZED),
has_fixed_right_arg_(false),
encoded_right_arg_(encode_arg_value(1)) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@ -879,13 +914,17 @@ class BinaryOpStub: public PlatformCodeStub {
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
BinaryOpIC::TypeInfo result_type,
bool has_fixed_right_arg,
int32_t fixed_right_arg_value)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type) { }
result_type_(result_type),
has_fixed_right_arg_(has_fixed_right_arg),
encoded_right_arg_(encode_arg_value(fixed_right_arg_value)) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
@ -903,6 +942,24 @@ class BinaryOpStub: public PlatformCodeStub {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
static bool decode_has_fixed_right_arg_from_minor_key(int minor_key) {
return HasFixedRightArgBits::decode(minor_key);
}
static int decode_fixed_right_arg_value_from_minor_key(int minor_key) {
return decode_arg_value(FixedRightArgValueBits::decode(minor_key));
}
int fixed_right_arg_value() const {
return decode_arg_value(encoded_right_arg_);
}
static bool can_encode_arg_value(int32_t value) {
return value > 0 &&
IsPowerOf2(value) &&
FixedRightArgValueBits::is_valid(WhichPowerOf2(value));
}
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
@ -918,15 +975,31 @@ class BinaryOpStub: public PlatformCodeStub {
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
bool has_fixed_right_arg_;
int encoded_right_arg_;
static int encode_arg_value(int32_t value) {
ASSERT(can_encode_arg_value(value));
return WhichPowerOf2(value);
}
static int32_t decode_arg_value(int value) {
return 1 << value;
}
virtual void PrintName(StringStream* stream);
// Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
// Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM.
// Note: We actually do not need 7 bits for the operation, just 4 bits to
// encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
class HasFixedRightArgBits: public BitField<bool, 19, 1> {};
class FixedRightArgValueBits: public BitField<int, 20, 5> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
@ -935,7 +1008,9 @@ class BinaryOpStub: public PlatformCodeStub {
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_);
| ResultTypeBits::encode(result_type_)
| HasFixedRightArgBits::encode(has_fixed_right_arg_)
| FixedRightArgValueBits::encode(encoded_right_arg_);
}
@ -1005,6 +1080,8 @@ class ICCompareStub: public PlatformCodeStub {
return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
virtual InlineCacheState GetICState();
private:
class OpField: public BitField<int, 0, 3> { };
class LeftStateField: public BitField<int, 3, 4> { };
@ -1069,6 +1146,7 @@ class CompareNilICStub : public HydrogenCodeStub {
}
void Print(StringStream* stream) const;
void TraceTransition(Types to) const;
};
// At most 6 different types can be distinguished, because the Code object
@ -1076,23 +1154,21 @@ class CompareNilICStub : public HydrogenCodeStub {
// boolean flags we need to store. :-P
STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
CompareNilICStub(EqualityKind kind, NilValue nil, Types types)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), types_(types) {
equality_kind_ = kind;
CompareNilICStub(NilValue nil, Types types = Types())
: types_(types) {
nil_value_ = nil;
}
explicit CompareNilICStub(Code::ExtraICState ic_state)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
equality_kind_ = EqualityKindField::decode(ic_state);
CompareNilICStub(Code::ExtraICState ic_state,
InitializationState init_state = INITIALIZED)
: HydrogenCodeStub(init_state) {
nil_value_ = NilValueField::decode(ic_state);
types_ = Types(ExtractTypesFromExtraICState(ic_state));
}
static Handle<Code> GetUninitialized(Isolate* isolate,
EqualityKind kind,
NilValue nil) {
return CompareNilICStub(kind, nil, CODE_STUB_IS_MISS).GetCode(isolate);
return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate);
}
virtual void InitializeInterfaceDescriptor(
@ -1100,8 +1176,7 @@ class CompareNilICStub : public HydrogenCodeStub {
CodeStubInterfaceDescriptor* descriptor);
static void InitializeForIsolate(Isolate* isolate) {
CompareNilICStub compare_stub(kStrictEquality, kNullValue,
CODE_STUB_IS_MISS);
CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
compare_stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
@ -1121,53 +1196,38 @@ class CompareNilICStub : public HydrogenCodeStub {
Handle<Code> GenerateCode();
// extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0
// extra ic state = nil_value | type_n-1 | ... | type_0
virtual Code::ExtraICState GetExtraICState() {
return NilValueField::encode(nil_value_) |
EqualityKindField::encode(equality_kind_) |
types_.ToIntegral();
}
static byte ExtractTypesFromExtraICState(
Code::ExtraICState state) {
return state & ((1<<NUMBER_OF_TYPES)-1);
return state & ((1 << NUMBER_OF_TYPES) - 1);
}
void Record(Handle<Object> object);
bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
EqualityKind GetKind() const { return equality_kind_; }
NilValue GetNilValue() const { return nil_value_; }
Types GetTypes() const { return types_; }
void ClearTypes() { types_.RemoveAll(); }
void SetKind(EqualityKind kind) { equality_kind_ = kind; }
virtual void PrintName(StringStream* stream);
private:
friend class CompareNilIC;
CompareNilICStub(EqualityKind kind, NilValue nil,
InitializationState init_state)
: HydrogenCodeStub(init_state), types_(0) {
equality_kind_ = kind;
nil_value_ = nil;
}
CompareNilICStub(Code::ExtraICState ic_state, InitializationState init_state)
CompareNilICStub(NilValue nil, InitializationState init_state)
: HydrogenCodeStub(init_state) {
equality_kind_ = EqualityKindField::decode(ic_state);
nil_value_ = NilValueField::decode(ic_state);
types_ = Types(ExtractTypesFromExtraICState(ic_state));
nil_value_ = nil;
}
class EqualityKindField : public BitField<EqualityKind, NUMBER_OF_TYPES, 1> {
};
class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES+1, 1> {};
class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
virtual CodeStub::Major MajorKey() { return CompareNilIC; }
virtual int NotMissMinorKey() { return GetExtraICState(); }
EqualityKind equality_kind_;
NilValue nil_value_;
Types types_;
@ -1567,8 +1627,7 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
class KeyedLoadFastElementStub : public HydrogenCodeStub {
public:
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array);
}
@ -1603,8 +1662,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
public:
KeyedStoreFastElementStub(bool is_js_array,
ElementsKind elements_kind,
KeyedAccessStoreMode mode)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
KeyedAccessStoreMode mode) {
bit_field_ = ElementsKindBits::encode(elements_kind) |
IsJSArrayBits::encode(is_js_array) |
StoreModeBits::encode(mode);
@ -1644,8 +1702,7 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub {
class TransitionElementsKindStub : public HydrogenCodeStub {
public:
TransitionElementsKindStub(ElementsKind from_kind,
ElementsKind to_kind)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
ElementsKind to_kind) {
bit_field_ = FromKindBits::encode(from_kind) |
ToKindBits::encode(to_kind);
}
@ -1678,20 +1735,22 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode)
: HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) {
ArrayConstructorStubBase(ElementsKind kind, bool disable_allocation_sites) {
// It only makes sense to override local allocation site behavior
// if there is a difference between the global allocation site policy
// for an ElementsKind and the desired usage of the stub.
ASSERT(!disable_allocation_sites ||
AllocationSiteInfo::GetMode(kind) == TRACK_ALLOCATION_SITE);
bit_field_ = ElementsKindBits::encode(kind) |
AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE);
DisableAllocationSitesBits::encode(disable_allocation_sites);
}
ElementsKind elements_kind() const {
return ElementsKindBits::decode(bit_field_);
}
AllocationSiteMode mode() const {
return AllocationSiteModeBits::decode(bit_field_)
? TRACK_ALLOCATION_SITE
: DONT_TRACK_ALLOCATION_SITE;
bool disable_allocation_sites() const {
return DisableAllocationSitesBits::decode(bit_field_);
}
virtual bool IsPregenerated() { return true; }
@ -1706,7 +1765,7 @@ class ArrayConstructorStubBase : public HydrogenCodeStub {
int NotMissMinorKey() { return bit_field_; }
class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
class AllocationSiteModeBits: public BitField<bool, 8, 1> {};
class DisableAllocationSitesBits: public BitField<bool, 8, 1> {};
uint32_t bit_field_;
DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
@ -1717,8 +1776,8 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNoArgumentConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
: ArrayConstructorStubBase(kind, mode) {
bool disable_allocation_sites = false)
: ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@ -1738,8 +1797,8 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
public:
ArraySingleArgumentConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE)
: ArrayConstructorStubBase(kind, mode) {
bool disable_allocation_sites = false)
: ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@ -1759,8 +1818,8 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
public:
ArrayNArgumentsConstructorStub(
ElementsKind kind,
AllocationSiteMode mode = TRACK_ALLOCATION_SITE) :
ArrayConstructorStubBase(kind, mode) {
bool disable_allocation_sites = false)
: ArrayConstructorStubBase(kind, disable_allocation_sites) {
}
virtual Handle<Code> GenerateCode();
@ -1776,6 +1835,87 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
};
class InternalArrayConstructorStubBase : public HydrogenCodeStub {
public:
explicit InternalArrayConstructorStubBase(ElementsKind kind) {
kind_ = kind;
}
virtual bool IsPregenerated() { return true; }
static void GenerateStubsAheadOfTime(Isolate* isolate);
static void InstallDescriptors(Isolate* isolate);
// Parameters accessed via CodeStubGraphBuilder::GetParameter()
static const int kConstructor = 0;
ElementsKind elements_kind() const { return kind_; }
private:
int NotMissMinorKey() { return kind_; }
ElementsKind kind_;
DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase);
};
class InternalArrayNoArgumentConstructorStub : public
InternalArrayConstructorStubBase {
public:
explicit InternalArrayNoArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
Major MajorKey() { return InternalArrayNoArgumentConstructor; }
DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub);
};
class InternalArraySingleArgumentConstructorStub : public
InternalArrayConstructorStubBase {
public:
explicit InternalArraySingleArgumentConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
Major MajorKey() { return InternalArraySingleArgumentConstructor; }
DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub);
};
class InternalArrayNArgumentsConstructorStub : public
InternalArrayConstructorStubBase {
public:
explicit InternalArrayNArgumentsConstructorStub(ElementsKind kind)
: InternalArrayConstructorStubBase(kind) { }
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
Major MajorKey() { return InternalArrayNArgumentsConstructor; }
DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub);
};
class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
@ -1811,7 +1951,7 @@ class KeyedStoreElementStub : public PlatformCodeStub {
};
class ToBooleanStub: public PlatformCodeStub {
class ToBooleanStub: public HydrogenCodeStub {
public:
enum Type {
UNDEFINED,
@ -1845,31 +1985,54 @@ class ToBooleanStub: public PlatformCodeStub {
static Types no_types() { return Types(); }
static Types all_types() { return Types((1 << NUMBER_OF_TYPES) - 1); }
explicit ToBooleanStub(Register tos, Types types = Types())
: tos_(tos), types_(types) { }
explicit ToBooleanStub(Types types = Types())
: types_(types) { }
explicit ToBooleanStub(Code::ExtraICState state)
: types_(static_cast<byte>(state)) { }
bool Record(Handle<Object> object);
Types GetTypes() { return types_; }
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
void Generate(MacroAssembler* masm);
virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
virtual void PrintName(StringStream* stream);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) |
types_.ToByte(); }
static void InitializeForIsolate(Isolate* isolate) {
ToBooleanStub stub;
stub.InitializeInterfaceDescriptor(
isolate,
isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
}
virtual void FinishCode(Handle<Code> code) {
code->set_to_boolean_state(types_.ToByte());
static Handle<Code> GetUninitialized(Isolate* isolate) {
return ToBooleanStub(UNINITIALIZED).GetCode(isolate);
}
void CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
bool result);
void GenerateTypeTransition(MacroAssembler* masm);
virtual Code::ExtraICState GetExtraICState() {
return types_.ToIntegral();
}
virtual InlineCacheState GetICState() {
if (types_.IsEmpty()) {
return ::v8::internal::UNINITIALIZED;
} else {
return MONOMORPHIC;
}
}
private:
Major MajorKey() { return ToBoolean; }
int NotMissMinorKey() { return GetExtraICState(); }
explicit ToBooleanStub(InitializationState init_state) :
HydrogenCodeStub(init_state) {}
Register tos_;
Types types_;
};

12
deps/v8/src/codegen.cc

@ -58,13 +58,12 @@ Comment::~Comment() {
#undef __
void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG
void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
bool print_source = false;
bool print_ast = false;
const char* ftype;
if (Isolate::Current()->bootstrapper()->IsActive()) {
if (info->isolate()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
ftype = "builtin";
@ -75,17 +74,18 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
}
if (FLAG_trace_codegen || print_source || print_ast) {
PrintF("*** Generate code for %s function: ", ftype);
PrintF("[generating %s code for %s function: ", kind, ftype);
if (info->IsStub()) {
const char* name =
CodeStub::MajorName(info->code_stub()->MajorKey(), true);
PrintF("%s", name == NULL ? "<unknown>" : name);
} else {
info->function()->name()->ShortPrint();
PrintF("%s", *info->function()->debug_name()->ToCString());
}
PrintF(" ***\n");
PrintF("]\n");
}
#ifdef DEBUG
if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));

12
deps/v8/src/codegen.h

@ -113,18 +113,6 @@ class ElementsTransitionGenerator : public AllStatic {
};
class SeqStringSetCharGenerator : public AllStatic {
public:
static void Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value);
private:
DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_

38
deps/v8/src/compiler.cc

@ -36,6 +36,7 @@
#include "deoptimizer.h"
#include "full-codegen.h"
#include "gdb-jit.h"
#include "typing.h"
#include "hydrogen.h"
#include "isolate-inl.h"
#include "lithium.h"
@ -361,11 +362,11 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
isolate()->GetHTracer()->TraceCompilation(info());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
// Type-check the function.
AstTyper::Type(info());
graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
@ -392,9 +393,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
}
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
AssertNoAllocation no_gc;
NoHandleAllocation no_handles(isolate());
HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW);
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
@ -423,8 +424,7 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
// graph creation. To make sure that we don't encounter inconsistencies
// between graph creation and code generation, we disallow accessing
// objects through deferred handles during the latter, with exceptions.
HandleDereferenceGuard no_deref_deferred(
isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED);
DisallowDeferredHandleDereference no_deferred_handle_deref;
Handle<Code> optimized_code = chunk_->Codegen();
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
@ -649,7 +649,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
// in that case too.
// Create a script object describing the script to be compiled.
Handle<Script> script = FACTORY->NewScript(source);
Handle<Script> script = isolate->factory()->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
@ -771,13 +771,6 @@ static bool InstallFullCode(CompilationInfo* info) {
int expected = lit->expected_property_count();
SetExpectedNofPropertiesFromEstimate(shared, expected);
// Set the optimization hints after performing lazy compilation, as
// these are not set when the function is set up as a lazily
// compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
// Check the function has compiled code.
ASSERT(shared->is_compiled());
shared->set_code_age(0);
@ -957,9 +950,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
if (status == OptimizingCompiler::SUCCEEDED) {
info.Detach();
shared->code()->set_profiler_ticks(0);
// Do a scavenge to put off the next scavenge as far as possible.
// This may ease the issue that GVN blocks the next scavenge.
isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile");
isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
@ -1054,6 +1044,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.SetLanguageMode(literal->scope()->language_mode());
Isolate* isolate = info.isolate();
Factory* factory = isolate->factory();
LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
@ -1083,7 +1074,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
FACTORY->NewSharedFunctionInfo(literal->name(),
factory->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
literal->is_generator(),
info.code(),
@ -1120,9 +1111,6 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->set_is_anonymous(lit->is_anonymous());
function_info->set_is_toplevel(is_toplevel);
function_info->set_inferred_name(*lit->inferred_name());
function_info->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_allows_lazy_compilation_without_context(
lit->AllowsLazyCompilationWithoutContext());

2
deps/v8/src/compiler.h

@ -449,7 +449,6 @@ class OptimizingCompiler: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
: info_(info),
oracle_(NULL),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
@ -478,7 +477,6 @@ class OptimizingCompiler: public ZoneObject {
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;

8
deps/v8/src/contexts.h

@ -172,6 +172,11 @@ enum BindingFlags {
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, \
observers_begin_perform_splice) \
V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
observers_end_perform_splice) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
@ -317,6 +322,9 @@ class Context: public FixedArray {
DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_ENQUEUE_SPLICE_INDEX,
OBSERVERS_BEGIN_SPLICE_INDEX,
OBSERVERS_END_SPLICE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,

3
deps/v8/src/cpu-profiler.cc

@ -461,10 +461,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Enumerate stuff we already have in the heap.
if (isolate_->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
isolate_->logger()->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
isolate_->logger()->LogCompiledFunctions();
isolate_->logger()->LogAccessorCallbacks();

97
deps/v8/src/d8-posix.cc

@ -238,7 +238,7 @@ class ExecArgs {
// Gets the optional timeouts from the arguments to the system() call.
static bool GetTimeouts(const Arguments& args,
static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
int* read_timeout,
int* total_timeout) {
if (args.Length() > 3) {
@ -448,25 +448,28 @@ static bool WaitForChild(int pid,
// Implementation of the system() function (see d8.h for details).
Handle<Value> Shell::System(const Arguments& args) {
void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
int read_timeout = -1;
int total_timeout = -1;
if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined();
if (!GetTimeouts(args, &read_timeout, &total_timeout)) return;
Handle<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
return ThrowException(String::New("system: Argument 2 must be an array"));
ThrowException(String::New("system: Argument 2 must be an array"));
return;
}
command_args = Handle<Array>::Cast(args[1]);
} else {
command_args = Array::New(0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
return ThrowException(String::New("Too many arguments to system()"));
ThrowException(String::New("Too many arguments to system()"));
return;
}
if (args.Length() < 1) {
return ThrowException(String::New("Too few arguments to system()"));
ThrowException(String::New("Too few arguments to system()"));
return;
}
struct timeval start_time;
@ -474,16 +477,18 @@ Handle<Value> Shell::System(const Arguments& args) {
ExecArgs exec_args;
if (!exec_args.Init(args[0], command_args)) {
return v8::Undefined();
return;
}
int exec_error_fds[2];
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
return ThrowException(String::New("pipe syscall failed."));
ThrowException(String::New("pipe syscall failed."));
return;
}
if (pipe(stdout_fds) != 0) {
return ThrowException(String::New("pipe syscall failed."));
ThrowException(String::New("pipe syscall failed."));
return;
}
pid_t pid = fork();
@ -499,7 +504,7 @@ Handle<Value> Shell::System(const Arguments& args) {
OpenFDCloser error_read_closer(exec_error_fds[kReadFD]);
OpenFDCloser stdout_read_closer(stdout_fds[kReadFD]);
if (!ChildLaunchedOK(exec_error_fds)) return v8::Undefined();
if (!ChildLaunchedOK(exec_error_fds)) return;
Handle<Value> accumulator = GetStdout(stdout_fds[kReadFD],
start_time,
@ -507,7 +512,8 @@ Handle<Value> Shell::System(const Arguments& args) {
total_timeout);
if (accumulator->IsUndefined()) {
kill(pid, SIGINT); // On timeout, kill the subprocess.
return accumulator;
args.GetReturnValue().Set(accumulator);
return;
}
if (!WaitForChild(pid,
@ -515,42 +521,47 @@ Handle<Value> Shell::System(const Arguments& args) {
start_time,
read_timeout,
total_timeout)) {
return v8::Undefined();
return;
}
return scope.Close(accumulator);
args.GetReturnValue().Set(accumulator);
}
Handle<Value> Shell::ChangeDirectory(const Arguments& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.chdir(): String conversion of argument failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
if (chdir(*directory) != 0) {
return ThrowException(String::New(strerror(errno)));
ThrowException(String::New(strerror(errno)));
return;
}
return v8::Undefined();
}
Handle<Value> Shell::SetUMask(const Arguments& args) {
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
if (args[0]->IsNumber()) {
mode_t mask = args[0]->Int32Value();
int previous = umask(mask);
return Number::New(previous);
args.GetReturnValue().Set(previous);
return;
} else {
const char* message = "umask() argument must be numeric";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
}
@ -598,79 +609,85 @@ static bool mkdirp(char* directory, mode_t mask) {
}
Handle<Value> Shell::MakeDirectory(const Arguments& args) {
void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mode_t mask = 0777;
if (args.Length() == 2) {
if (args[1]->IsNumber()) {
mask = args[1]->Int32Value();
} else {
const char* message = "mkdirp() second argument must be numeric";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
} else if (args.Length() != 1) {
const char* message = "mkdirp() takes one or two arguments";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.mkdirp(): String conversion of argument failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
mkdirp(*directory, mask);
return v8::Undefined();
}
Handle<Value> Shell::RemoveDirectory(const Arguments& args) {
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
String::Utf8Value directory(args[0]);
if (*directory == NULL) {
const char* message = "os.rmdir(): String conversion of argument failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
rmdir(*directory);
return v8::Undefined();
}
Handle<Value> Shell::SetEnvironment(const Arguments& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
String::Utf8Value value(args[1]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
if (*value == NULL) {
const char* message =
"os.setenv(): String conversion of variable contents failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
setenv(*var, *value, 1);
return v8::Undefined();
}
Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
return ThrowException(String::New(message));
ThrowException(String::New(message));
return;
}
unsetenv(*var);
return v8::Undefined();
}

236
deps/v8/src/d8.cc

@ -40,11 +40,6 @@
#include <string.h>
#include <sys/stat.h>
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_ARROW
#define V8_ALLOW_ACCESS_TO_RAW_HANDLE_CONSTRUCTOR
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#ifdef V8_SHARED
#include <assert.h>
#endif // V8_SHARED
@ -243,8 +238,10 @@ bool Shell::ExecuteString(Isolate* isolate,
#if !defined(V8_SHARED)
} else {
v8::TryCatch try_catch;
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("Stringify"));
Handle<Value> argv[1] = { result };
Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv);
@ -266,8 +263,7 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_current_ = 0;
data_->realm_switch_ = 0;
data_->realms_ = new Persistent<Context>[1];
data_->realms_[0] =
Persistent<Context>::New(data_->isolate_, Context::GetEntered());
data_->realms_[0].Reset(data_->isolate_, Context::GetEntered());
data_->realm_shared_.Clear();
}
@ -291,143 +287,152 @@ int PerIsolateData::RealmFind(Handle<Context> context) {
// Realm.current() returns the index of the currently active realm.
Handle<Value> Shell::RealmCurrent(const Arguments& args) {
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmFind(Context::GetEntered());
if (index == -1) return Undefined(isolate);
return Number::New(index);
if (index == -1) return;
args.GetReturnValue().Set(index);
}
// Realm.owner(o) returns the index of the realm that created o.
Handle<Value> Shell::RealmOwner(const Arguments& args) {
void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
return Throw("Invalid argument");
Throw("Invalid argument");
return;
}
int index = data->RealmFind(args[0]->ToObject()->CreationContext());
if (index == -1) return Undefined(isolate);
return Number::New(index);
if (index == -1) return;
args.GetReturnValue().Set(index);
}
// Realm.global(i) returns the global object of realm i.
// (Note that properties of global objects cannot be read/written cross-realm.)
Handle<Value> Shell::RealmGlobal(const Arguments& args) {
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(args.GetIsolate());
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
Throw("Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
Throw("Invalid realm index");
return;
}
return data->realms_[index]->Global();
args.GetReturnValue().Set(
Local<Context>::New(args.GetIsolate(), data->realms_[index])->Global());
}
// Realm.create() creates a new realm and returns its index.
Handle<Value> Shell::RealmCreate(const Arguments& args) {
void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
Persistent<Context>* old_realms = data->realms_;
int index = data->realm_count_;
data->realms_ = new Persistent<Context>[++data->realm_count_];
for (int i = 0; i < index; ++i) data->realms_[i] = old_realms[i];
for (int i = 0; i < index; ++i) {
data->realms_[i].Reset(isolate, old_realms[i]);
}
delete[] old_realms;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
data->realms_[index] = Persistent<Context>::New(
data->realms_[index].Reset(
isolate, Context::New(isolate, NULL, global_template));
return Number::New(index);
args.GetReturnValue().Set(index);
}
// Realm.dispose(i) disposes the reference to the realm i.
Handle<Value> Shell::RealmDispose(const Arguments& args) {
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
Throw("Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty() ||
index == 0 ||
index == data->realm_current_ || index == data->realm_switch_) {
return Throw("Invalid realm index");
Throw("Invalid realm index");
return;
}
data->realms_[index].Dispose(isolate);
data->realms_[index].Clear();
return Undefined(isolate);
}
// Realm.switch(i) switches to the realm i for consecutive interactive inputs.
Handle<Value> Shell::RealmSwitch(const Arguments& args) {
void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsNumber()) {
return Throw("Invalid argument");
Throw("Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
Throw("Invalid realm index");
return;
}
data->realm_switch_ = index;
return Undefined(isolate);
}
// Realm.eval(i, s) evaluates s in realm i and returns the result.
Handle<Value> Shell::RealmEval(const Arguments& args) {
void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) {
return Throw("Invalid argument");
Throw("Invalid argument");
return;
}
int index = args[0]->Uint32Value();
if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) {
return Throw("Invalid realm index");
Throw("Invalid realm index");
return;
}
Handle<Script> script = Script::New(args[1]->ToString());
if (script.IsEmpty()) return Undefined(isolate);
if (script.IsEmpty()) return;
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
Handle<Value> result = script->Run();
realm->Exit();
return result;
args.GetReturnValue().Set(result);
}
// Realm.shared is an accessor for a single shared value across realms.
Handle<Value> Shell::RealmSharedGet(Local<String> property,
const AccessorInfo& info) {
void Shell::RealmSharedGet(Local<String> property,
const PropertyCallbackInfo<Value>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (data->realm_shared_.IsEmpty()) return Undefined(isolate);
return Local<Value>::New(isolate, data->realm_shared_);
if (data->realm_shared_.IsEmpty()) return;
info.GetReturnValue().Set(data->realm_shared_);
}
void Shell::RealmSharedSet(Local<String> property,
Local<Value> value,
const AccessorInfo& info) {
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate);
data->realm_shared_ = Persistent<Value>::New(isolate, value);
data->realm_shared_.Reset(isolate, value);
}
Handle<Value> Shell::Print(const Arguments& args) {
Handle<Value> val = Write(args);
void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
Write(args);
printf("\n");
fflush(stdout);
return val;
}
Handle<Value> Shell::Write(const Arguments& args) {
void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
if (i != 0) {
@ -437,7 +442,10 @@ Handle<Value> Shell::Write(const Arguments& args) {
// Explicitly catch potential exceptions in toString().
v8::TryCatch try_catch;
Handle<String> str_obj = args[i]->ToString();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (try_catch.HasCaught()) {
try_catch.ReThrow();
return;
}
v8::String::Utf8Value str(str_obj);
int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
@ -446,32 +454,31 @@ Handle<Value> Shell::Write(const Arguments& args) {
Exit(1);
}
}
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::EnableProfiler(const Arguments& args) {
void Shell::EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
V8::ResumeProfiler();
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::DisableProfiler(const Arguments& args) {
void Shell::DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args) {
V8::PauseProfiler();
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Read(const Arguments& args) {
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
return Throw("Error loading file");
Throw("Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
return Throw("Error loading file");
Throw("Error loading file");
return;
}
return source;
args.GetReturnValue().Set(source);
}
@ -505,47 +512,52 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
}
Handle<Value> Shell::Load(const Arguments& args) {
void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args[i]);
if (*file == NULL) {
return Throw("Error loading file");
Throw("Error loading file");
return;
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
return Throw("Error loading file");
Throw("Error loading file");
return;
}
if (!ExecuteString(args.GetIsolate(),
source,
String::New(*file),
false,
true)) {
return Throw("Error executing file");
Throw("Error executing file");
return;
}
}
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Quit(const Arguments& args) {
void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
int exit_code = args[0]->Int32Value();
OnExit();
exit(exit_code);
return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Version(const Arguments& args) {
return String::New(V8::GetVersion());
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(String::New(V8::GetVersion()));
}
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
Handle<Context> utility_context;
bool enter_context = !Context::InContext();
if (enter_context) utility_context_->Enter();
if (enter_context) {
utility_context = Local<Context>::New(isolate, utility_context_);
utility_context->Enter();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
@ -582,7 +594,7 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
}
printf("\n");
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
if (enter_context) utility_context_->Exit();
if (enter_context) utility_context->Exit();
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
@ -592,11 +604,15 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<String> text,
Handle<String> full) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
v8::Local<v8::Context> utility_context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(utility_context);
Handle<Object> global = utility_context->Global();
Handle<Value> fun = global->Get(String::New("GetCompletions"));
static const int kArgc = 3;
Handle<Value> argv[kArgc] = { evaluation_context_->Global(), text, full };
v8::Local<v8::Context> evaluation_context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return handle_scope.Close(Handle<Array>::Cast(val));
}
@ -606,8 +622,10 @@ Handle<Array> Shell::GetCompletions(Isolate* isolate,
Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
Handle<String> message) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugMessageDetails"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { message };
@ -619,8 +637,10 @@ Handle<Object> Shell::DebugMessageDetails(Isolate* isolate,
Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
Handle<String> command) {
HandleScope handle_scope(isolate);
Context::Scope context_scope(isolate, utility_context_);
Handle<Object> global = utility_context_->Global();
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Context::Scope context_scope(context);
Handle<Object> global = context->Global();
Handle<Value> fun = global->Get(String::New("DebugCommandToJSONRequest"));
static const int kArgc = 1;
Handle<Value> argv[kArgc] = { command };
@ -632,7 +652,9 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Isolate* isolate,
void Shell::DispatchDebugMessages() {
Isolate* isolate = v8::Isolate::GetCurrent();
HandleScope handle_scope(isolate);
v8::Context::Scope scope(isolate, Shell::evaluation_context_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, Shell::evaluation_context_);
v8::Context::Scope context_scope(context);
v8::Debug::ProcessDebugMessages();
}
#endif // ENABLE_DEBUGGER_SUPPORT
@ -743,9 +765,13 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
HandleScope scope(isolate);
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined(isolate));
evaluation_context_->SetSecurityToken(Undefined(isolate));
Context::Scope utility_scope(isolate, utility_context_);
v8::Local<v8::Context> utility_context =
v8::Local<v8::Context>::New(isolate, utility_context_);
v8::Local<v8::Context> evaluation_context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
utility_context->SetSecurityToken(Undefined(isolate));
evaluation_context->SetSecurityToken(Undefined(isolate));
v8::Context::Scope context_scope(utility_context);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
@ -754,7 +780,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global_object());
utility_context_->Global()->Set(String::New("$debug"),
utility_context->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
#endif // ENABLE_DEBUGGER_SUPPORT
@ -923,16 +949,17 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Context::Scope scope(context);
#ifndef V8_SHARED
i::Factory* factory = i::Isolate::Current()->factory();
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
FACTORY->NewFixedArray(js_args.argc());
factory->NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
i::Handle<i::String> arg =
FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
factory->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
FACTORY->NewJSArrayWithElements(arguments_array);
factory->NewJSArrayWithElements(arguments_array);
context->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#endif // V8_SHARED
@ -1048,24 +1075,40 @@ static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
return chars;
}
static void ReadBufferWeakCallback(v8::Isolate* isolate,
Persistent<Value>* object,
uint8_t* data) {
size_t byte_length = ArrayBuffer::Cast(**object)->ByteLength();
isolate->AdjustAmountOfExternalAllocatedMemory(
-static_cast<intptr_t>(byte_length));
delete[] data;
object->Dispose(isolate);
}
Handle<Value> Shell::ReadBuffer(const Arguments& args) {
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
ASSERT(sizeof(char) == sizeof(uint8_t)); // NOLINT
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
return Throw("Error loading file");
Throw("Error loading file");
return;
}
Isolate* isolate = args.GetIsolate();
uint8_t* data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) {
return Throw("Error reading file");
Throw("Error reading file");
return;
}
Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(length);
memcpy(buffer->Data(), data, length);
delete[] data;
return buffer;
Handle<v8::ArrayBuffer> buffer = ArrayBuffer::New(data, length);
v8::Persistent<v8::Value> weak_handle(isolate, buffer);
weak_handle.MakeWeak(isolate, data, ReadBufferWeakCallback);
weak_handle.MarkIndependent();
isolate->AdjustAmountOfExternalAllocatedMemory(length);
args.GetReturnValue().Set(buffer);
}
@ -1106,7 +1149,9 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
void Shell::RunShell(Isolate* isolate) {
Locker locker(isolate);
HandleScope outer_scope(isolate);
Context::Scope context_scope(isolate, evaluation_context_);
v8::Local<v8::Context> context =
v8::Local<v8::Context>::New(isolate, evaluation_context_);
v8::Context::Scope context_scope(context);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Handle<String> name = String::New("(d8)");
LineEditor* console = LineEditor::Get();
@ -1526,6 +1571,13 @@ static void EnableHarmonyTypedArraysViaCommandLine() {
#endif
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) { return malloc(length); }
virtual void Free(void* data) { free(data); }
};
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
#ifndef V8_SHARED
@ -1534,6 +1586,8 @@ int Shell::Main(int argc, char* argv[]) {
#else
EnableHarmonyTypedArraysViaCommandLine();
#endif
ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
DumbLineEditor dumb_line_editor(isolate);

94
deps/v8/src/d8.h

@ -300,45 +300,46 @@ class Shell : public i::AllStatic {
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_SHARED
static Handle<Value> RealmCurrent(const Arguments& args);
static Handle<Value> RealmOwner(const Arguments& args);
static Handle<Value> RealmGlobal(const Arguments& args);
static Handle<Value> RealmCreate(const Arguments& args);
static Handle<Value> RealmDispose(const Arguments& args);
static Handle<Value> RealmSwitch(const Arguments& args);
static Handle<Value> RealmEval(const Arguments& args);
static Handle<Value> RealmSharedGet(Local<String> property,
const AccessorInfo& info);
static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmSharedGet(Local<String> property,
const PropertyCallbackInfo<Value>& info);
static void RealmSharedSet(Local<String> property,
Local<Value> value,
const AccessorInfo& info);
static Handle<Value> Print(const Arguments& args);
static Handle<Value> Write(const Arguments& args);
static Handle<Value> Quit(const Arguments& args);
static Handle<Value> Version(const Arguments& args);
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
const PropertyCallbackInfo<void>& info);
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
static void EnableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
static void DisableProfiler(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Read(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
static Handle<String> ReadFromStdin(Isolate* isolate);
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin(args.GetIsolate());
static void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(ReadFromStdin(args.GetIsolate()));
}
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
static Handle<Value> Int8Array(const Arguments& args);
static Handle<Value> Uint8Array(const Arguments& args);
static Handle<Value> Int16Array(const Arguments& args);
static Handle<Value> Uint16Array(const Arguments& args);
static Handle<Value> Int32Array(const Arguments& args);
static Handle<Value> Uint32Array(const Arguments& args);
static Handle<Value> Float32Array(const Arguments& args);
static Handle<Value> Float64Array(const Arguments& args);
static Handle<Value> Uint8ClampedArray(const Arguments& args);
static Handle<Value> ArrayBufferSlice(const Arguments& args);
static Handle<Value> ArraySubArray(const Arguments& args);
static Handle<Value> ArraySet(const Arguments& args);
static void Load(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ArrayBuffer(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Int8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint8Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Int16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint16Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Int32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float32Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Float64Array(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Uint8ClampedArray(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void ArrayBufferSlice(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ArraySubArray(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ArraySet(const v8::FunctionCallbackInfo<v8::Value>& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@ -365,14 +366,14 @@ class Shell : public i::AllStatic {
// with the current umask. Intermediate directories are created if necessary.
// An exception is not thrown if the directory already exists. Analogous to
// the "mkdir -p" command.
static Handle<Value> OSObject(const Arguments& args);
static Handle<Value> System(const Arguments& args);
static Handle<Value> ChangeDirectory(const Arguments& args);
static Handle<Value> SetEnvironment(const Arguments& args);
static Handle<Value> UnsetEnvironment(const Arguments& args);
static Handle<Value> SetUMask(const Arguments& args);
static Handle<Value> MakeDirectory(const Arguments& args);
static Handle<Value> RemoveDirectory(const Arguments& args);
static void OSObject(const v8::FunctionCallbackInfo<v8::Value>& args);
static void System(const v8::FunctionCallbackInfo<v8::Value>& args);
static void ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
static void UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args);
static void SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args);
static void MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args);
static void AddOSMethods(Handle<ObjectTemplate> os_template);
@ -412,9 +413,10 @@ class Shell : public i::AllStatic {
int32_t byteLength,
int32_t byteOffset,
int32_t element_size);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size);
static void CreateExternalArray(
const v8::FunctionCallbackInfo<v8::Value>& args,
ExternalArrayType type,
int32_t element_size);
static void ExternalArrayWeakCallback(Isolate* isolate,
Persistent<Object>* object,
uint8_t* data);

49
deps/v8/src/debug.cc

@ -121,7 +121,7 @@ BreakLocationIterator::~BreakLocationIterator() {
void BreakLocationIterator::Next() {
AssertNoAllocation nogc;
DisallowHeapAllocation no_gc;
ASSERT(!RinfoDone());
// Iterate through reloc info for code and original code stopping at each
@ -211,14 +211,15 @@ void BreakLocationIterator::Next(int count) {
}
// Find the break point closest to the supplied address.
// Find the break point at the supplied address, or the closest one before
// the address.
void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Run through all break points to locate the one closest to the address.
int closest_break_point = 0;
int distance = kMaxInt;
while (!Done()) {
// Check if this break point is closer that what was previously found.
if (this->pc() < pc && pc - this->pc() < distance) {
if (this->pc() <= pc && pc - this->pc() < distance) {
closest_break_point = break_point();
distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
@ -619,14 +620,14 @@ void ScriptCache::Add(Handle<Script> script) {
(global_handles->Create(*script)));
global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
this,
NULL,
ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
Handle<FixedArray> ScriptCache::GetScripts() {
Handle<FixedArray> instances = FACTORY->NewFixedArray(occupancy());
Factory* factory = Isolate::Current()->factory();
Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
int count = 0;
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry->value != NULL);
@ -664,12 +665,12 @@ void ScriptCache::Clear() {
void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* data) {
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
reinterpret_cast<Script**>(Utils::OpenHandle(*obj).location());
reinterpret_cast<Script**>(Utils::OpenHandle(**obj).location());
ASSERT((*location)->IsScript());
// Remove the entry from the cache.
@ -678,8 +679,7 @@ void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
obj.Dispose(isolate);
obj.Clear();
obj->Dispose(isolate);
}
@ -699,7 +699,7 @@ void Debug::SetUp(bool create_heap_objects) {
void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* data) {
Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
@ -727,7 +727,6 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
(global_handles->Create(debug_info)));
global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
this,
NULL,
Debug::HandleWeakDebugInfo);
}
@ -790,7 +789,7 @@ bool Debug::CompileDebuggerScript(int index) {
MessageLocation computed_location;
isolate->ComputeLocation(&computed_location);
Handle<Object> message = MessageHandler::MakeMessageObject(
"error_loading_debugger", &computed_location,
isolate, "error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
@ -945,7 +944,9 @@ Object* Debug::Break(Arguments args) {
// Find the break point where execution has stopped.
BreakLocationIterator break_location_iterator(debug_info,
ALL_BREAK_LOCATIONS);
break_location_iterator.FindBreakLocationFromAddress(frame->pc());
// pc points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
// Check whether step next reached a new statement.
if (!StepNextContinue(&break_location_iterator, frame)) {
@ -1240,15 +1241,11 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
// Get information in the break point.
BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
Handle<DebugInfo> debug_info = node->debug_info();
Handle<SharedFunctionInfo> shared(debug_info->shared());
int source_position = break_point_info->statement_position()->value();
// Source positions starts with zero.
ASSERT(source_position >= 0);
// Find the break point and clear it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
it.FindBreakLocationFromPosition(source_position);
it.FindBreakLocationFromAddress(debug_info->code()->entry() +
break_point_info->code_position()->value());
it.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@ -1406,7 +1403,9 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// Find the break location where execution has stopped.
BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
it.FindBreakLocationFromAddress(frame->pc());
// pc points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
it.FindBreakLocationFromAddress(frame->pc() - 1);
// Compute whether or not the target is a call target.
bool is_load_or_store = false;
@ -2025,7 +2024,7 @@ void Debug::PrepareForBreakPoints() {
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
Object* active_code_marker = heap->the_hole_value();
@ -2140,7 +2139,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
while (!done) {
{ // Extra scope for iterator and no-allocation.
heap->EnsureHeapIsIterable();
AssertNoAllocation no_alloc_during_heap_iteration;
DisallowHeapAllocation no_alloc_during_heap_iteration;
HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
@ -2229,6 +2228,8 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// Ensures the debug information is present for shared.
bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
Handle<JSFunction> function) {
Isolate* isolate = shared->GetIsolate();
// Return if we already have the debug info for shared.
if (HasDebugInfo(shared)) {
ASSERT(shared->is_compiled());
@ -2245,7 +2246,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
}
// Create the debug info object.
Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
// Add debug info to the list.
DebugInfoListNode* node = new DebugInfoListNode(*debug_info);
@ -2476,7 +2477,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
HeapIterator iterator(heap);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {

4
deps/v8/src/debug.h

@ -190,7 +190,7 @@ class ScriptCache : private HashMap {
// Weak handle callback for scripts in the cache.
static void HandleWeakScript(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* data);
// List used during GC to temporarily store id's of collected scripts.
@ -387,7 +387,7 @@ class Debug {
// Passed to MakeWeak.
static void HandleWeakDebugInfo(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* data);
friend class Debugger;

88
deps/v8/src/deoptimizer.cc

@ -285,7 +285,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
ASSERT(context->IsNativeContext());
@ -313,7 +313,7 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
void Deoptimizer::VisitAllOptimizedFunctions(
Isolate* isolate,
OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@ -335,7 +335,7 @@ static void PartitionOptimizedFunctions(Context* context,
ZoneList<Code*>* partitions,
Zone* zone,
Object* undefined) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
Object* remainder_head = undefined;
Object* remainder_tail = undefined;
@ -388,7 +388,7 @@ class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
if (FLAG_trace_deopt) {
PrintF("[deoptimize all contexts]\n");
@ -400,7 +400,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
DeoptimizeAllFilter filter;
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
@ -451,7 +451,7 @@ void Deoptimizer::DeoptimizeAllFunctionsForContext(
void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
OptimizedFunctionFilter* filter) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = isolate->heap()->native_contexts_list();
@ -463,7 +463,7 @@ void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate,
void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* parameter) {
DeoptimizingCodeListNode* node =
reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
@ -558,7 +558,10 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
trace_ = TraceEnabledFor(type, frame_type);
ASSERT(HEAP->allow_allocation(false));
#ifdef DEBUG
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
input_->SetFrameType(frame_type);
@ -608,6 +611,7 @@ void Deoptimizer::PrintFunctionName() {
Deoptimizer::~Deoptimizer() {
ASSERT(input_ == NULL && output_ == NULL);
ASSERT(disallow_heap_allocation_ == NULL);
}
@ -619,7 +623,12 @@ void Deoptimizer::DeleteFrameDescriptions() {
delete[] output_;
input_ = NULL;
output_ = NULL;
ASSERT(!HEAP->allow_allocation(true));
#ifdef DEBUG
CHECK(!AllowHeapAllocation::IsAllowed());
CHECK(disallow_heap_allocation_ != NULL);
delete disallow_heap_allocation_;
disallow_heap_allocation_ = NULL;
#endif // DEBUG
}
@ -1977,56 +1986,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
static bool ObjectToInt32(Object* obj, int32_t* value) {
if (obj->IsSmi()) {
*value = Smi::cast(obj)->value();
return true;
}
if (obj->IsHeapNumber()) {
double num = HeapNumber::cast(obj)->value();
if (FastI2D(FastD2I(num)) != num) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to int32 ****\n",
HeapNumber::cast(obj)->value());
}
return false;
}
*value = FastD2I(num);
return true;
}
return false;
}
static bool ObjectToUint32(Object* obj, uint32_t* value) {
if (obj->IsSmi()) {
if (Smi::cast(obj)->value() < 0) return false;
*value = static_cast<uint32_t>(Smi::cast(obj)->value());
return true;
}
if (obj->IsHeapNumber()) {
double num = HeapNumber::cast(obj)->value();
if ((num < 0) || (FastUI2D(FastD2UI(num)) != num)) {
if (FLAG_trace_osr) {
PrintF("**** %g could not be converted to uint32 ****\n",
HeapNumber::cast(obj)->value());
}
return false;
}
*value = FastD2UI(num);
return true;
}
return false;
}
bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int* input_offset) {
disasm::NameConverter converter;
@ -2070,7 +2029,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_REGISTER: {
int32_t int32_value = 0;
if (!ObjectToInt32(input_object, &int32_value)) return false;
if (!input_object->ToInt32(&int32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
@ -2085,7 +2044,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_REGISTER: {
uint32_t uint32_value = 0;
if (!ObjectToUint32(input_object, &uint32_value)) return false;
if (!input_object->ToUint32(&uint32_value)) return false;
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
@ -2132,7 +2091,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_STACK_SLOT: {
int32_t int32_value = 0;
if (!ObjectToInt32(input_object, &int32_value)) return false;
if (!input_object->ToInt32(&int32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
@ -2149,7 +2108,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::UINT32_STACK_SLOT: {
uint32_t uint32_value = 0;
if (!ObjectToUint32(input_object, &uint32_value)) return false;
if (!input_object->ToUint32(&uint32_value)) return false;
int output_index = iterator->Next();
unsigned output_offset =
@ -2750,7 +2709,6 @@ DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
code_ = Handle<Code>::cast(global_handles->Create(code));
global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
this,
NULL,
Deoptimizer::HandleWeakDeoptimizedCode);
}
@ -2855,7 +2813,7 @@ Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count) {
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
int deopt_index = Safepoint::kNoDeoptimizationIndex;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);

39
deps/v8/src/deoptimizer.h

@ -38,6 +38,24 @@
namespace v8 {
namespace internal {
static inline double read_double_value(Address p) {
#ifdef V8_HOST_CAN_READ_UNALIGNED
return Memory::double_at(p);
#else // V8_HOST_CAN_READ_UNALIGNED
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned address.
union conversion {
double d;
uint32_t u[2];
} c;
c.u[0] = *reinterpret_cast<uint32_t*>(p);
c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
return c.d;
#endif // V8_HOST_CAN_READ_UNALIGNED
}
class FrameDescription;
class TranslationIterator;
class DeoptimizingCodeListNode;
@ -385,7 +403,7 @@ class Deoptimizer : public Malloced {
// Weak handle callback for deoptimizing code objects.
static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
v8::Persistent<v8::Value> obj,
v8::Persistent<v8::Value>* obj,
void* data);
// Deoptimize function assuming that function->next_function_link() points
@ -431,6 +449,9 @@ class Deoptimizer : public Malloced {
List<Object*> deferred_arguments_objects_values_;
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
#ifdef DEBUG
DisallowHeapAllocation* disallow_heap_allocation_;
#endif // DEBUG
bool trace_;
@ -476,19 +497,7 @@ class FrameDescription {
double GetDoubleFrameSlot(unsigned offset) {
intptr_t* ptr = GetFrameSlotPointer(offset);
#if V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned double. Uses two lwc1 instructions.
union conversion {
double d;
uint32_t u[2];
} c;
c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
return c.d;
#else
return *reinterpret_cast<double*>(ptr);
#endif
return read_double_value(reinterpret_cast<Address>(ptr));
}
void SetFrameSlot(unsigned offset, intptr_t value) {
@ -818,7 +827,7 @@ class SlotRef BASE_EMBEDDED {
}
case DOUBLE: {
double value = Memory::double_at(addr_);
double value = read_double_value(addr_);
return isolate->factory()->NewNumber(value);
}

4
deps/v8/src/disassembler.cc

@ -115,8 +115,8 @@ static int DecodeIt(Isolate* isolate,
const V8NameConverter& converter,
byte* begin,
byte* end) {
NoHandleAllocation ha(isolate);
AssertNoAllocation no_alloc;
SealHandleScope shs(isolate);
DisallowHeapAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;

8
deps/v8/src/elements.cc

@ -155,7 +155,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
@ -204,7 +204,7 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
uint32_t to_start,
int raw_copy_size) {
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
@ -840,7 +840,7 @@ class ElementsAccessorBase : public ElementsAccessor {
// Fill in the content
{
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len0; i++) {
Object* e = to->get(i);
@ -2044,7 +2044,7 @@ MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
}
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS: {
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
FixedArray* object_elms = FixedArray::cast(elms);
for (int index = 0; index < number_of_elements; index++) {

5
deps/v8/src/execution.cc

@ -107,7 +107,7 @@ static Handle<Object> Invoke(bool is_construct,
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
NoHandleAllocation na(isolate);
SealHandleScope shs(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
@ -641,7 +641,8 @@ Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
Handle<Object> Execution::NewDate(double time, bool* exc) {
Handle<Object> time_obj = FACTORY->NewNumber(time);
Isolate* isolate = Isolate::Current();
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
}

32
deps/v8/src/extensions/externalize-string-extension.cc

@ -72,26 +72,29 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
}
v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
const v8::Arguments& args) {
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
return v8::ThrowException(v8::String::New(
v8::ThrowException(v8::String::New(
"First parameter to externalizeString() must be a string."));
return;
}
bool force_two_byte = false;
if (args.Length() >= 2) {
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue();
} else {
return v8::ThrowException(v8::String::New(
"Second parameter to externalizeString() must be a boolean."));
v8::ThrowException(v8::String::New(
"Second parameter to externalizeString() must be a boolean."));
return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (string->IsExternalString()) {
return v8::ThrowException(v8::String::New(
v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
uint8_t* data = new uint8_t[string->length()];
@ -115,21 +118,22 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
return v8::ThrowException(v8::String::New("externalizeString() failed."));
v8::ThrowException(v8::String::New("externalizeString() failed."));
return;
}
return v8::Undefined();
}
v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
const v8::Arguments& args) {
void ExternalizeStringExtension::IsAscii(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
return v8::ThrowException(v8::String::New(
v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
return;
}
return
Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
v8::True() : v8::False();
bool is_one_byte =
Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation();
args.GetReturnValue().Set(is_one_byte);
}

4
deps/v8/src/extensions/externalize-string-extension.h

@ -38,8 +38,8 @@ class ExternalizeStringExtension : public v8::Extension {
ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;

3
deps/v8/src/extensions/gc-extension.cc

@ -38,13 +38,12 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
}
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
void GCExtension::GC(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
return v8::Undefined();
}

2
deps/v8/src/extensions/gc-extension.h

@ -38,7 +38,7 @@ class GCExtension : public v8::Extension {
explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GC(const v8::Arguments& args);
static void GC(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
};

6
deps/v8/src/extensions/statistics-extension.cc

@ -58,8 +58,8 @@ static void AddNumber(v8::Local<v8::Object> object,
}
v8::Handle<v8::Value> StatisticsExtension::GetCounters(
const v8::Arguments& args) {
void StatisticsExtension::GetCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
@ -141,7 +141,7 @@ v8::Handle<v8::Value> StatisticsExtension::GetCounters(
"lo_space_commited_bytes");
AddNumber(result, heap->amount_of_external_allocated_memory(),
"amount_of_external_allocated_memory");
return result;
args.GetReturnValue().Set(result);
}

2
deps/v8/src/extensions/statistics-extension.h

@ -38,7 +38,7 @@ class StatisticsExtension : public v8::Extension {
StatisticsExtension() : v8::Extension("v8/statistics", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GetCounters(const v8::Arguments& args);
static void GetCounters(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Register();
private:
static const char* const kSource;

54
deps/v8/src/factory.cc

@ -41,6 +41,14 @@ namespace v8 {
namespace internal {
Handle<Box> Factory::NewBox(Handle<Object> value, PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateBox(*value, pretenure),
Box);
}
Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
ASSERT(0 <= size);
CALL_HEAP_FUNCTION(
@ -476,8 +484,7 @@ Handle<ExternalArray> Factory::NewExternalArray(int length,
Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
Handle<Object> value) {
ALLOW_HANDLE_DEREF(isolate(),
"converting a handle into a global property cell");
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateJSGlobalPropertyCell(*value),
@ -677,9 +684,9 @@ Handle<JSObject> Factory::NewNeanderObject() {
}
Handle<Object> Factory::NewTypeError(const char* type,
Handle<Object> Factory::NewTypeError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeTypeError", type, args);
return NewError("MakeTypeError", message, args);
}
@ -688,9 +695,9 @@ Handle<Object> Factory::NewTypeError(Handle<String> message) {
}
Handle<Object> Factory::NewRangeError(const char* type,
Handle<Object> Factory::NewRangeError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeRangeError", type, args);
return NewError("MakeRangeError", message, args);
}
@ -699,8 +706,9 @@ Handle<Object> Factory::NewRangeError(Handle<String> message) {
}
Handle<Object> Factory::NewSyntaxError(const char* type, Handle<JSArray> args) {
return NewError("MakeSyntaxError", type, args);
Handle<Object> Factory::NewSyntaxError(const char* message,
Handle<JSArray> args) {
return NewError("MakeSyntaxError", message, args);
}
@ -709,9 +717,9 @@ Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
}
Handle<Object> Factory::NewReferenceError(const char* type,
Handle<Object> Factory::NewReferenceError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeReferenceError", type, args);
return NewError("MakeReferenceError", message, args);
}
@ -721,7 +729,7 @@ Handle<Object> Factory::NewReferenceError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* maker,
const char* type,
const char* message,
Vector< Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
@ -730,24 +738,24 @@ Handle<Object> Factory::NewError(const char* maker,
array->set(i, *args[i]);
}
Handle<JSArray> object = NewJSArrayWithElements(array);
Handle<Object> result = NewError(maker, type, object);
Handle<Object> result = NewError(maker, message, object);
return result.EscapeFrom(&scope);
}
Handle<Object> Factory::NewEvalError(const char* type,
Handle<Object> Factory::NewEvalError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeEvalError", type, args);
return NewError("MakeEvalError", message, args);
}
Handle<Object> Factory::NewError(const char* type,
Handle<Object> Factory::NewError(const char* message,
Vector< Handle<Object> > args) {
return NewError("MakeError", type, args);
return NewError("MakeError", message, args);
}
Handle<String> Factory::EmergencyNewError(const char* type,
Handle<String> Factory::EmergencyNewError(const char* message,
Handle<JSArray> args) {
const int kBufferSize = 1000;
char buffer[kBufferSize];
@ -755,8 +763,8 @@ Handle<String> Factory::EmergencyNewError(const char* type,
char* p = &buffer[0];
Vector<char> v(buffer, kBufferSize);
OS::StrNCpy(v, type, space);
space -= Min(space, strlen(type));
OS::StrNCpy(v, message, space);
space -= Min(space, strlen(message));
p = &buffer[kBufferSize] - space;
for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
@ -785,7 +793,7 @@ Handle<String> Factory::EmergencyNewError(const char* type,
Handle<Object> Factory::NewError(const char* maker,
const char* type,
const char* message,
Handle<JSArray> args) {
Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj(
@ -794,11 +802,11 @@ Handle<Object> Factory::NewError(const char* maker,
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) {
return EmergencyNewError(type, args);
return EmergencyNewError(message, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
Handle<Object> type_obj = InternalizeUtf8String(type);
Handle<Object> argv[] = { type_obj, args };
Handle<Object> message_obj = InternalizeUtf8String(message);
Handle<Object> argv[] = { message_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
// running the factory method, use the exception as the result.

23
deps/v8/src/factory.h

@ -39,6 +39,11 @@ namespace internal {
class Factory {
public:
// Allocate a new boxed value.
Handle<Box> NewBox(
Handle<Object> value,
PretenureFlag pretenure = NOT_TENURED);
// Allocate a new uninitialized fixed array.
Handle<FixedArray> NewFixedArray(
int size,
@ -369,33 +374,33 @@ class Factory {
// Interface for creating error objects.
Handle<Object> NewError(const char* maker, const char* type,
Handle<Object> NewError(const char* maker, const char* message,
Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* type, Handle<JSArray> args);
Handle<Object> NewError(const char* maker, const char* type,
Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
Handle<Object> NewError(const char* maker, const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewError(const char* type,
Handle<Object> NewError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewError(Handle<String> message);
Handle<Object> NewError(const char* constructor,
Handle<String> message);
Handle<Object> NewTypeError(const char* type,
Handle<Object> NewTypeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewTypeError(Handle<String> message);
Handle<Object> NewRangeError(const char* type,
Handle<Object> NewRangeError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewRangeError(Handle<String> message);
Handle<Object> NewSyntaxError(const char* type, Handle<JSArray> args);
Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
Handle<Object> NewSyntaxError(Handle<String> message);
Handle<Object> NewReferenceError(const char* type,
Handle<Object> NewReferenceError(const char* message,
Vector< Handle<Object> > args);
Handle<Object> NewReferenceError(Handle<String> message);
Handle<Object> NewEvalError(const char* type,
Handle<Object> NewEvalError(const char* message,
Vector< Handle<Object> > args);

20
deps/v8/src/flag-definitions.h

@ -170,6 +170,7 @@ DEFINE_bool(harmony_array_buffer, false,
"enable harmony array buffer")
DEFINE_implication(harmony_typed_arrays, harmony_array_buffer)
DEFINE_bool(harmony_generators, false, "enable harmony generators")
DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
@ -177,7 +178,9 @@ DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony, harmony_generators)
// TODO(wingo): Re-enable when GC bug that appeared in r15060 is gone.
// DEFINE_implication(harmony, harmony_generators)
DEFINE_implication(harmony, harmony_iteration)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// TODO[dslomov] add harmony => harmony_typed_arrays
@ -192,12 +195,17 @@ DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
DEFINE_bool(pretenuring, true, "allocate objects in old space")
// TODO(hpayer): We will remove this flag as soon as we have pretenuring
// support for specific allocation sites.
DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
DEFINE_bool(track_fields, true, "track fields with only smi values")
DEFINE_bool(track_double_fields, true, "track fields with double values")
DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
DEFINE_implication(track_double_fields, track_fields)
DEFINE_implication(track_heap_object_fields, track_fields)
DEFINE_implication(track_computed_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@ -251,6 +259,8 @@ DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(analyze_environment_liveness, true,
"analyze liveness of environment slots and zap dead values")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
@ -258,7 +268,7 @@ DEFINE_bool(unreachable_code_elimination, false,
"eliminate unreachable code (hidden behind soft deopts)")
DEFINE_bool(track_allocation_sites, true,
"Use allocation site info to reduce transitions")
DEFINE_bool(optimize_constructed_arrays, false,
DEFINE_bool(optimize_constructed_arrays, true,
"Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
@ -377,6 +387,8 @@ DEFINE_bool(stack_trace_on_abort, true,
"print a stack trace if an assertion failure occurs")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
DEFINE_bool(trace, false, "trace function calls")
DEFINE_bool(mask_constants_with_cookie,
true,
@ -636,8 +648,6 @@ DEFINE_bool(enable_slow_asserts, false,
"enable asserts that are slow to execute")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_bool(trace_codegen, false,
"print name of functions for which code is generated")
DEFINE_bool(print_source, false, "pretty print source code")
DEFINE_bool(print_builtin_source, false,
"pretty print source code for builtins")

2
deps/v8/src/frames.cc

@ -777,7 +777,7 @@ void JavaScriptFrame::PrintTop(Isolate* isolate,
bool print_line_number) {
// constructor calls
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
JavaScriptFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {

15
deps/v8/src/full-codegen.cc

@ -163,6 +163,12 @@ void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
}
void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
// For-of is breakable because of the next() call.
is_breakable_ = true;
}
void BreakableStatementChecker::VisitTryCatchStatement(
TryCatchStatement* stmt) {
// Mark try catch as breakable to avoid adding a break slot in front of it.
@ -304,10 +310,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
int len = String::cast(script->source())->length();
isolate->counters()->total_full_codegen_source_size()->Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Full Compiler - ");
}
CodeGenerator::MakeCodePrologue(info);
CodeGenerator::MakeCodePrologue(info, "full");
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
@ -923,10 +926,10 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
}
void FullCodeGenerator::EmitGeneratorSend(CallRuntime* expr) {
void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 2);
EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::SEND);
EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
}

5
deps/v8/src/full-codegen.h

@ -491,6 +491,11 @@ class FullCodeGenerator: public AstVisitor {
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
#undef EMIT_INLINE_RUNTIME_CALL
void EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask);
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
Expression *value,

2
deps/v8/src/gdb-jit.cc

@ -2062,7 +2062,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit) return;
ScopedLock lock(mutex.Pointer());
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;

19
deps/v8/src/generator.js

@ -34,26 +34,16 @@
// ----------------------------------------------------------------------------
// TODO(wingo): Give link to specification. For now, the following diagram is
// the spec:
// http://wiki.ecmascript.org/lib/exe/fetch.php?cache=cache&media=harmony:es6_generator_object_model_3-29-13.png
// Generator functions and objects are specified by ES6, sections 15.19.3 and
// 15.19.4.
function GeneratorObjectNext() {
function GeneratorObjectNext(value) {
if (!IS_GENERATOR(this)) {
throw MakeTypeError('incompatible_method_receiver',
['[Generator].prototype.next', this]);
}
return %_GeneratorSend(this, void 0);
}
function GeneratorObjectSend(value) {
if (!IS_GENERATOR(this)) {
throw MakeTypeError('incompatible_method_receiver',
['[Generator].prototype.send', this]);
}
return %_GeneratorSend(this, value);
return %_GeneratorNext(this, value);
}
function GeneratorObjectThrow(exn) {
@ -71,7 +61,6 @@ function SetUpGenerators() {
InstallFunctions(GeneratorObjectPrototype,
DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext,
"send", GeneratorObjectSend,
"throw", GeneratorObjectThrow]);
%SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);

146
deps/v8/src/global-handles.cc

@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// TODO(dcarney): remove
#define V8_ALLOW_ACCESS_TO_PERSISTENT_IMPLICIT
#include "v8.h"
#include "api.h"
@ -92,7 +89,7 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
near_death_callback_ = NULL;
weak_reference_callback_ = NULL;
}
#endif
@ -105,7 +102,7 @@ class GlobalHandles::Node {
*first_free = this;
}
void Acquire(Object* object, GlobalHandles* global_handles) {
void Acquire(Object* object) {
ASSERT(state() == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
@ -113,11 +110,11 @@ class GlobalHandles::Node {
set_partially_dependent(false);
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
near_death_callback_ = NULL;
IncreaseBlockUses(global_handles);
weak_reference_callback_ = NULL;
IncreaseBlockUses();
}
void Release(GlobalHandles* global_handles) {
void Release() {
ASSERT(state() != FREE);
set_state(FREE);
#ifdef ENABLE_EXTRA_CHECKS
@ -126,11 +123,9 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
near_death_callback_ = NULL;
weak_reference_callback_ = NULL;
#endif
parameter_or_next_free_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
DecreaseBlockUses(global_handles);
DecreaseBlockUses();
}
// Object slot accessors.
@ -201,9 +196,9 @@ class GlobalHandles::Node {
set_independent(true);
}
void MarkPartiallyDependent(GlobalHandles* global_handles) {
void MarkPartiallyDependent() {
ASSERT(state() != FREE);
if (global_handles->isolate()->heap()->InNewSpace(object_)) {
if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
set_partially_dependent(true);
}
}
@ -233,41 +228,31 @@ class GlobalHandles::Node {
parameter_or_next_free_.next_free = value;
}
void MakeWeak(GlobalHandles* global_handles,
void* parameter,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
void MakeWeak(void* parameter,
RevivableCallback weak_reference_callback) {
ASSERT(state() != FREE);
set_state(WEAK);
set_parameter(parameter);
if (weak_reference_callback != NULL) {
flags_ = IsWeakCallback::update(flags_, true);
near_death_callback_ =
reinterpret_cast<NearDeathCallback>(weak_reference_callback);
} else {
flags_ = IsWeakCallback::update(flags_, false);
near_death_callback_ = near_death_callback;
}
weak_reference_callback_ = weak_reference_callback;
}
void ClearWeakness(GlobalHandles* global_handles) {
void ClearWeakness() {
ASSERT(state() != FREE);
set_state(NORMAL);
set_parameter(NULL);
}
bool PostGarbageCollectionProcessing(Isolate* isolate,
GlobalHandles* global_handles) {
bool PostGarbageCollectionProcessing(Isolate* isolate) {
if (state() != Node::PENDING) return false;
if (near_death_callback_ == NULL) {
Release(global_handles);
if (weak_reference_callback_ == NULL) {
Release();
return false;
}
void* par = parameter();
set_state(NEAR_DEATH);
set_parameter(NULL);
v8::Persistent<v8::Value> object = ToApi<v8::Value>(handle());
Object** object = location();
{
// Check that we are not passing a finalized external string to
// the callback.
@ -277,19 +262,9 @@ class GlobalHandles::Node {
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState<EXTERNAL> state(isolate);
if (near_death_callback_ != NULL) {
if (IsWeakCallback::decode(flags_)) {
RevivableCallback callback =
reinterpret_cast<RevivableCallback>(near_death_callback_);
callback(reinterpret_cast<v8::Isolate*>(isolate),
&object,
par);
} else {
near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
object,
weak_reference_callback_(reinterpret_cast<v8::Isolate*>(isolate),
reinterpret_cast<Persistent<Value>*>(&object),
par);
}
}
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
@ -299,8 +274,9 @@ class GlobalHandles::Node {
private:
inline NodeBlock* FindBlock();
inline void IncreaseBlockUses(GlobalHandles* global_handles);
inline void DecreaseBlockUses(GlobalHandles* global_handles);
inline GlobalHandles* GetGlobalHandles();
inline void IncreaseBlockUses();
inline void DecreaseBlockUses();
// Storage for object pointer.
// Placed first to avoid offset computation.
@ -321,12 +297,11 @@ class GlobalHandles::Node {
class IsIndependent: public BitField<bool, 4, 1> {};
class IsPartiallyDependent: public BitField<bool, 5, 1> {};
class IsInNewSpaceList: public BitField<bool, 6, 1> {};
class IsWeakCallback: public BitField<bool, 7, 1> {};
uint8_t flags_;
// Handle specific callback - might be a weak reference in disguise.
NearDeathCallback near_death_callback_;
RevivableCallback weak_reference_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@ -343,8 +318,12 @@ class GlobalHandles::NodeBlock {
public:
static const int kSize = 256;
explicit NodeBlock(NodeBlock* next)
: next_(next), used_nodes_(0), next_used_(NULL), prev_used_(NULL) {}
explicit NodeBlock(GlobalHandles* global_handles, NodeBlock* next)
: next_(next),
used_nodes_(0),
next_used_(NULL),
prev_used_(NULL),
global_handles_(global_handles) {}
void PutNodesOnFreeList(Node** first_free) {
for (int i = kSize - 1; i >= 0; --i) {
@ -357,11 +336,11 @@ class GlobalHandles::NodeBlock {
return &nodes_[index];
}
void IncreaseUses(GlobalHandles* global_handles) {
void IncreaseUses() {
ASSERT(used_nodes_ < kSize);
if (used_nodes_++ == 0) {
NodeBlock* old_first = global_handles->first_used_block_;
global_handles->first_used_block_ = this;
NodeBlock* old_first = global_handles_->first_used_block_;
global_handles_->first_used_block_ = this;
next_used_ = old_first;
prev_used_ = NULL;
if (old_first == NULL) return;
@ -369,17 +348,19 @@ class GlobalHandles::NodeBlock {
}
}
void DecreaseUses(GlobalHandles* global_handles) {
void DecreaseUses() {
ASSERT(used_nodes_ > 0);
if (--used_nodes_ == 0) {
if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
if (this == global_handles->first_used_block_) {
global_handles->first_used_block_ = next_used_;
if (this == global_handles_->first_used_block_) {
global_handles_->first_used_block_ = next_used_;
}
}
}
GlobalHandles* global_handles() { return global_handles_; }
// Next block in the list of all blocks.
NodeBlock* next() const { return next_; }
@ -393,9 +374,15 @@ class GlobalHandles::NodeBlock {
int used_nodes_;
NodeBlock* next_used_;
NodeBlock* prev_used_;
GlobalHandles* global_handles_;
};
GlobalHandles* GlobalHandles::Node::GetGlobalHandles() {
return FindBlock()->global_handles();
}
GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
intptr_t ptr = reinterpret_cast<intptr_t>(this);
ptr = ptr - index_ * sizeof(Node);
@ -405,13 +392,23 @@ GlobalHandles::NodeBlock* GlobalHandles::Node::FindBlock() {
}
void GlobalHandles::Node::IncreaseBlockUses(GlobalHandles* global_handles) {
FindBlock()->IncreaseUses(global_handles);
void GlobalHandles::Node::IncreaseBlockUses() {
NodeBlock* node_block = FindBlock();
node_block->IncreaseUses();
GlobalHandles* global_handles = node_block->global_handles();
global_handles->isolate()->counters()->global_handles()->Increment();
global_handles->number_of_global_handles_++;
}
void GlobalHandles::Node::DecreaseBlockUses(GlobalHandles* global_handles) {
FindBlock()->DecreaseUses(global_handles);
void GlobalHandles::Node::DecreaseBlockUses() {
NodeBlock* node_block = FindBlock();
GlobalHandles* global_handles = node_block->global_handles();
parameter_or_next_free_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
node_block->DecreaseUses();
global_handles->isolate()->counters()->global_handles()->Decrement();
global_handles->number_of_global_handles_--;
}
@ -465,17 +462,15 @@ GlobalHandles::~GlobalHandles() {
Handle<Object> GlobalHandles::Create(Object* value) {
isolate_->counters()->global_handles()->Increment();
number_of_global_handles_++;
if (first_free_ == NULL) {
first_block_ = new NodeBlock(first_block_);
first_block_ = new NodeBlock(this, first_block_);
first_block_->PutNodesOnFreeList(&first_free_);
}
ASSERT(first_free_ != NULL);
// Take the first node in the free list.
Node* result = first_free_;
first_free_ = result->next_free();
result->Acquire(value, this);
result->Acquire(value);
if (isolate_->heap()->InNewSpace(value) &&
!result->is_in_new_space_list()) {
new_space_nodes_.Add(result);
@ -486,27 +481,20 @@ Handle<Object> GlobalHandles::Create(Object* value) {
void GlobalHandles::Destroy(Object** location) {
isolate_->counters()->global_handles()->Decrement();
number_of_global_handles_--;
if (location == NULL) return;
Node::FromLocation(location)->Release(this);
if (location != NULL) Node::FromLocation(location)->Release();
}
void GlobalHandles::MakeWeak(Object** location,
void* parameter,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback) {
ASSERT((weak_reference_callback == NULL) != (near_death_callback == NULL));
Node::FromLocation(location)->MakeWeak(this,
parameter,
weak_reference_callback,
near_death_callback);
RevivableCallback weak_reference_callback) {
ASSERT(weak_reference_callback != NULL);
Node::FromLocation(location)->MakeWeak(parameter, weak_reference_callback);
}
void GlobalHandles::ClearWeakness(Object** location) {
Node::FromLocation(location)->ClearWeakness(this);
Node::FromLocation(location)->ClearWeakness();
}
@ -516,7 +504,7 @@ void GlobalHandles::MarkIndependent(Object** location) {
void GlobalHandles::MarkPartiallyDependent(Object** location) {
Node::FromLocation(location)->MarkPartiallyDependent(this);
Node::FromLocation(location)->MarkPartiallyDependent();
}
@ -653,7 +641,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
continue;
}
node->clear_partially_dependent();
if (node->PostGarbageCollectionProcessing(isolate_, this)) {
if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
// PostGarbageCollection processing. The current node might
@ -669,7 +657,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
return next_gc_likely_to_collect_more;

15
deps/v8/src/global-handles.h

@ -128,7 +128,7 @@ class GlobalHandles {
Handle<Object> Create(Object* value);
// Destroy a global handle.
void Destroy(Object** location);
static void Destroy(Object** location);
typedef WeakReferenceCallbacks<v8::Value, void>::Revivable RevivableCallback;
@ -138,10 +138,9 @@ class GlobalHandles {
// function is invoked (for each handle) with the handle and corresponding
// parameter as arguments. Note: cleared means set to Smi::FromInt(0). The
// reason is that Smi::FromInt(0) does not change during garage collection.
void MakeWeak(Object** location,
void* parameter,
RevivableCallback weak_reference_callback,
NearDeathCallback near_death_callback);
static void MakeWeak(Object** location,
void* parameter,
RevivableCallback weak_reference_callback);
void RecordStats(HeapStats* stats);
@ -158,13 +157,13 @@ class GlobalHandles {
}
// Clear the weakness of a global handle.
void ClearWeakness(Object** location);
static void ClearWeakness(Object** location);
// Clear the weakness of a global handle.
void MarkIndependent(Object** location);
static void MarkIndependent(Object** location);
// Mark the reference to this object externaly unreachable.
void MarkPartiallyDependent(Object** location);
static void MarkPartiallyDependent(Object** location);
static bool IsIndependent(Object** location);

113
deps/v8/src/handles-inl.h

@ -57,7 +57,8 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
if (location_ == other.location_) return true;
if (location_ == NULL || other.location_ == NULL) return false;
// Dereferencing deferred handles to check object equality is safe.
SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true));
SLOW_ASSERT(IsDereferenceAllowed(NO_DEFERRED_CHECK) &&
other.IsDereferenceAllowed(NO_DEFERRED_CHECK));
return *location_ == *other.location_;
}
@ -65,20 +66,21 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
template <typename T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL && !(*location_)->IsFailure());
SLOW_ASSERT(IsDereferenceAllowed(false));
SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return *BitCast<T**>(location_);
}
template <typename T>
inline T** Handle<T>::location() const {
ASSERT(location_ == NULL || !(*location_)->IsFailure());
SLOW_ASSERT(location_ == NULL || IsDereferenceAllowed(false));
SLOW_ASSERT(location_ == NULL ||
IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
return location_;
}
#ifdef DEBUG
template <typename T>
bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
ASSERT(location_ != NULL);
Object* object = *BitCast<T**>(location_);
if (object->IsSmi()) return true;
@ -90,22 +92,15 @@ bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
handle < roots_array_start + Heap::kStrongRootListLength) {
return true;
}
if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
!Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
return false;
if (!AllowHandleDereference::IsAllowed()) return false;
if (mode == INCLUDE_DEFERRED_CHECK &&
!AllowDeferredHandleDereference::IsAllowed()) {
// Accessing maps and internalized strings is safe.
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return !isolate->IsDeferredHandle(handle);
}
switch (isolate->HandleDereferenceGuardState()) {
case HandleDereferenceGuard::ALLOW:
return true;
case HandleDereferenceGuard::DISALLOW:
return false;
case HandleDereferenceGuard::DISALLOW_DEFERRED:
// Accessing maps and internalized strings is safe.
if (heap_object->IsMap()) return true;
if (heap_object->IsInternalizedString()) return true;
return allow_deferred || !isolate->IsDeferredHandle(handle);
}
return false;
return true;
}
#endif
@ -122,31 +117,37 @@ HandleScope::HandleScope(Isolate* isolate) {
HandleScope::~HandleScope() {
CloseScope();
CloseScope(isolate_, prev_next_, prev_limit_);
}
void HandleScope::CloseScope() {
void HandleScope::CloseScope(Isolate* isolate,
Object** prev_next,
Object** prev_limit) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
current->next = prev_next_;
isolate->handle_scope_data();
current->next = prev_next;
current->level--;
if (current->limit != prev_limit_) {
current->limit = prev_limit_;
DeleteExtensions(isolate_);
if (current->limit != prev_limit) {
current->limit = prev_limit;
DeleteExtensions(isolate);
}
#ifdef ENABLE_EXTRA_CHECKS
ZapRange(prev_next_, prev_limit_);
ZapRange(prev_next, prev_limit);
#endif
}
template <typename T>
Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
T* value = *handle_value;
// Throw away all handles in the current scope.
CloseScope();
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
T* value = *handle_value;
// Throw away all handles in the current scope.
CloseScope(isolate_, prev_next_, prev_limit_);
// Allocate one handle in the parent scope.
ASSERT(current->level > 0);
Handle<T> result(CreateHandle<T>(isolate_, value));
@ -161,6 +162,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
ASSERT(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
@ -178,44 +180,29 @@ T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
#ifdef DEBUG
inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate)
: isolate_(isolate) {
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
CHECK(AllowHandleAllocation::IsAllowed());
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
current->limit = current->next;
level_ = current->level;
current->level = 0;
}
}
inline NoHandleAllocation::~NoHandleAllocation() {
if (active_) {
// Restore state in current handle scope to re-enable handle
// allocations.
v8::ImplementationUtilities::HandleScopeData* data =
isolate_->handle_scope_data();
ASSERT_EQ(0, data->level);
data->level = level_;
}
// Shrink the current handle scope to make it impossible to do
// handle allocations without an explicit handle scope.
limit_ = current->limit;
current->limit = current->next;
level_ = current->level;
current->level = 0;
}
HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state)
: isolate_(isolate) {
old_state_ = isolate_->HandleDereferenceGuardState();
isolate_->SetHandleDereferenceGuardState(state);
}
HandleDereferenceGuard::~HandleDereferenceGuard() {
isolate_->SetHandleDereferenceGuardState(old_state_);
inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
ASSERT_EQ(0, current->level);
current->level = level_;
ASSERT_EQ(current->next, current->limit);
current->limit = limit_;
}
#endif

21
deps/v8/src/handles.cc

@ -345,9 +345,9 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
// associated with the wrapper and get rid of both the wrapper and the
// handle.
static void ClearWrapperCache(v8::Isolate* v8_isolate,
Persistent<v8::Value> handle,
Persistent<v8::Value>* handle,
void*) {
Handle<Object> cache = Utils::OpenHandle(*handle);
Handle<Object> cache = Utils::OpenHandle(**handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
ASSERT(foreign->foreign_address() ==
@ -387,7 +387,6 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
// garbage collector when it is not used anymore.
Handle<Object> handle = isolate->global_handles()->Create(*result);
isolate->global_handles()->MakeWeak(handle.location(),
NULL,
NULL,
&ClearWrapperCache);
script->wrapper()->set_foreign_address(
@ -457,7 +456,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
List<int> line_ends(line_count_estimate);
Isolate* isolate = src->GetIsolate();
{
AssertNoAllocation no_heap_allocation; // ensure vectors stay valid.
DisallowHeapAllocation no_allocation; // ensure vectors stay valid.
// Dispatch on type of strings.
String::FlatContent content = src->GetFlatContent();
ASSERT(content.IsFlat());
@ -485,7 +484,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
// Convert code position into line number.
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_ends_len = line_ends_array->length();
@ -512,7 +511,7 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
int line_number = GetScriptLineNumber(script, code_pos);
if (line_number == -1) return -1;
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
line_number = line_number - script->line_offset()->value();
if (line_number == 0) return code_pos + script->column_offset()->value();
@ -522,7 +521,7 @@ int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
return GetScriptLineNumber(script, code_pos);
}
@ -567,7 +566,8 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
#if ENABLE_EXTRA_CHECKS
CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
#endif
return result;
return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
result);
}
@ -592,7 +592,8 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
#endif
}
}
return result;
return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
result);
}
@ -802,7 +803,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
if (details.type() != FIELD) {
indices = Handle<FixedArray>();
} else {
int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
int field_index = descs->GetFieldIndex(i);
if (field_index >= map->inobject_properties()) {
field_index = -(field_index - map->inobject_properties() + 1);
}

54
deps/v8/src/handles.h

@ -61,7 +61,7 @@ class Handle {
location_ = reinterpret_cast<T**>(handle.location_);
}
INLINE(T* operator ->() const) { return operator*(); }
INLINE(T* operator->() const) { return operator*(); }
// Check if this handle refers to the exact same object as the other handle.
INLINE(bool is_identical_to(const Handle<T> other) const);
@ -85,7 +85,9 @@ class Handle {
inline Handle<T> EscapeFrom(v8::HandleScope* scope);
#ifdef DEBUG
bool IsDereferenceAllowed(bool allow_deferred) const;
enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
bool IsDereferenceAllowed(DereferenceCheckMode mode) const;
#endif // DEBUG
private:
@ -155,18 +157,21 @@ class HandleScope {
void* operator new(size_t size);
void operator delete(void* size_t);
inline void CloseScope();
Isolate* isolate_;
Object** prev_next_;
Object** prev_limit_;
// Close the handle scope resetting limits to a previous state.
static inline void CloseScope(Isolate* isolate,
Object** prev_next,
Object** prev_limit);
// Extend the handle scope making room for more handles.
static internal::Object** Extend(Isolate* isolate);
#ifdef ENABLE_EXTRA_CHECKS
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
static void ZapRange(Object** start, Object** end);
#endif
friend class v8::HandleScope;
@ -327,45 +332,24 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
Handle<Object> key,
Handle<Object> value);
class NoHandleAllocation BASE_EMBEDDED {
public:
#ifndef DEBUG
explicit NoHandleAllocation(Isolate* isolate) {}
~NoHandleAllocation() {}
#else
explicit inline NoHandleAllocation(Isolate* isolate);
inline ~NoHandleAllocation();
private:
Isolate* isolate_;
int level_;
bool active_;
#endif
};
class HandleDereferenceGuard BASE_EMBEDDED {
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
class SealHandleScope BASE_EMBEDDED {
public:
enum State { ALLOW, DISALLOW, DISALLOW_DEFERRED };
#ifndef DEBUG
HandleDereferenceGuard(Isolate* isolate, State state) { }
~HandleDereferenceGuard() { }
explicit SealHandleScope(Isolate* isolate) {}
~SealHandleScope() {}
#else
inline HandleDereferenceGuard(Isolate* isolate, State state);
inline ~HandleDereferenceGuard();
explicit inline SealHandleScope(Isolate* isolate);
inline ~SealHandleScope();
private:
Isolate* isolate_;
State old_state_;
Object** limit_;
int level_;
#endif
};
#ifdef DEBUG
#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) \
HandleDereferenceGuard allow_deref(isolate, \
HandleDereferenceGuard::ALLOW);
#else
#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe)
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_HANDLES_H_

64
deps/v8/src/heap-inl.h

@ -211,8 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE ||
@ -642,21 +641,6 @@ Isolate* Heap::isolate() {
return __maybe_object__)
#ifdef DEBUG
inline bool Heap::allow_allocation(bool new_state) {
bool old = allocation_allowed_;
allocation_allowed_ = new_state;
return old;
}
inline void Heap::set_allow_allocation(bool allocation_allowed) {
allocation_allowed_ = allocation_allowed;
}
#endif
void ExternalStringTable::AddString(String* string) {
ASSERT(string->IsExternalString());
if (heap_->InNewSpace(string)) {
@ -867,52 +851,6 @@ DisallowAllocationFailure::~DisallowAllocationFailure() {
}
#ifdef DEBUG
bool EnterAllocationScope(Isolate* isolate, bool allow_allocation) {
bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
bool last_state = isolate->heap()->IsAllocationAllowed();
if (active) {
// TODO(yangguo): Make HandleDereferenceGuard avoid isolate mutation in the
// same way if running on the optimizer thread.
isolate->heap()->set_allow_allocation(allow_allocation);
}
return last_state;
}
void ExitAllocationScope(Isolate* isolate, bool last_state) {
bool active = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active) {
isolate->heap()->set_allow_allocation(last_state);
}
}
AssertNoAllocation::AssertNoAllocation()
: last_state_(EnterAllocationScope(ISOLATE, false)) {
}
AssertNoAllocation::~AssertNoAllocation() {
ExitAllocationScope(ISOLATE, last_state_);
}
DisableAssertNoAllocation::DisableAssertNoAllocation()
: last_state_(EnterAllocationScope(ISOLATE, true)) {
}
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
ExitAllocationScope(ISOLATE, last_state_);
}
#else
AssertNoAllocation::AssertNoAllocation() { }
AssertNoAllocation::~AssertNoAllocation() { }
DisableAssertNoAllocation::DisableAssertNoAllocation() { }
DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
#endif
} } // namespace v8::internal
#endif // V8_HEAP_INL_H_

10
deps/v8/src/heap-snapshot-generator.cc

@ -637,7 +637,7 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
// First perform a full GC in order to avoid dead objects.
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"HeapSnapshotsCollection::FindHeapObjectById");
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
HeapObject* object = NULL;
HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
@ -1210,10 +1210,6 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry,
"inferred_name", shared->inferred_name(),
SharedFunctionInfo::kInferredNameOffset);
SetInternalReference(obj, entry,
"this_property_assignments",
shared->this_property_assignments(),
SharedFunctionInfo::kThisPropertyAssignmentsOffset);
SetWeakReference(obj, entry,
1, shared->initial_map(),
SharedFunctionInfo::kInitialMapOffset);
@ -1827,7 +1823,7 @@ void V8HeapExplorer::TagGlobalObjects() {
}
}
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
for (int i = 0, l = enumerator.count(); i < l; ++i) {
objects_tags_.SetTag(*enumerator.at(i), urls[i]);
}
@ -2219,7 +2215,7 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
// The following code uses heap iterators, so we want the heap to be
// stable. It should follow TagGlobalObjects as that can allocate.
AssertNoAllocation no_alloc;
DisallowHeapAllocation no_alloc;
#ifdef VERIFY_HEAP
debug_heap->Verify();

293
deps/v8/src/heap.cc

@ -113,13 +113,11 @@ Heap::Heap()
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
old_gen_allocation_limit_(kMinimumAllocationLimit),
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
size_of_old_gen_at_last_old_space_gc_(0),
external_allocation_limit_(0),
amount_of_external_allocated_memory_(0),
@ -182,6 +180,7 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
native_contexts_list_ = NULL;
array_buffers_list_ = Smi::FromInt(0);
mark_compact_collector_.heap_ = this;
external_string_table_.heap_ = this;
// Put a dummy entry in the remembered pages so we can find the list the
@ -282,7 +281,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
}
// Is enough data promoted to justify a global GC?
if (OldGenerationPromotionLimitReached()) {
if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
@ -419,24 +418,25 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
{ AllowHeapAllocation for_the_first_part_of_prologue;
isolate_->transcendental_cache()->Clear();
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
if (FLAG_flush_code && FLAG_flush_code_incrementally) {
mark_compact_collector()->EnableCodeFlushing(true);
}
if (FLAG_flush_code && FLAG_flush_code_incrementally) {
mark_compact_collector()->EnableCodeFlushing(true);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
if (FLAG_verify_heap) {
Verify();
}
#endif
}
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false);
ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
@ -481,8 +481,9 @@ void Heap::GarbageCollectionEpilogue() {
}
#endif
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
allow_allocation(true);
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
@ -644,6 +645,8 @@ bool Heap::CollectGarbage(AllocationSpace space,
bool next_gc_likely_to_collect_more = false;
{ GCTracer tracer(this, gc_reason, collector_reason);
ASSERT(AllowHeapAllocation::IsAllowed());
DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
// The GC count was incremented in the prologue. Tell the tracer about
// it.
@ -916,10 +919,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
old_gen_promotion_limit_ =
OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_allocation_limit_ =
OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
old_gen_exhausted_ = false;
} else {
@ -938,7 +939,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
new_space_high_promotion_mode_active_ = true;
SetNewSpaceHighPromotionModeActive(true);
if (FLAG_trace_gc) {
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
@ -947,7 +948,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// heuristic indicator of whether to pretenure or not, we trigger
// deoptimization here to take advantage of pre-tenuring as soon as
// possible.
if (FLAG_pretenure_literals) {
if (FLAG_pretenuring) {
isolate_->stack_guard()->FullDeopt();
}
} else if (new_space_high_promotion_mode_active_ &&
@ -956,14 +957,14 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
new_space_high_promotion_mode_active_ = false;
SetNewSpaceHighPromotionModeActive(false);
if (FLAG_trace_gc) {
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
// Trigger deoptimization here to turn off pre-tenuring as soon as
// possible.
if (FLAG_pretenure_literals) {
if (FLAG_pretenuring) {
isolate_->stack_guard()->FullDeopt();
}
}
@ -980,7 +981,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
{ DisableAssertNoAllocation allow_allocation;
{ AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
isolate_->global_handles()->PostGarbageCollectionProcessing(
@ -1539,11 +1540,6 @@ static Object* ProcessFunctionWeakReferences(Heap* heap,
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
Object* undefined = undefined_value();
Object* head = undefined;
Context* tail = NULL;
Object* candidate = native_contexts_list_;
// We don't record weak slots during marking or scavenges.
// Instead we do it once when we complete mark-compact cycle.
// Note that write barrier has no effect if we are already in the middle of
@ -1551,6 +1547,16 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
bool record_slots =
gc_state() == MARK_COMPACT &&
mark_compact_collector()->is_compacting();
ProcessArrayBuffers(retainer, record_slots);
ProcessNativeContexts(retainer, record_slots);
}
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
bool record_slots) {
Object* undefined = undefined_value();
Object* head = undefined;
Context* tail = NULL;
Object* candidate = native_contexts_list_;
while (candidate != undefined) {
// Check whether to keep the candidate in the list.
@ -1619,8 +1625,103 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
template <class T>
struct WeakListVisitor;
template <class T>
static Object* VisitWeakList(Object* list,
MarkCompactCollector* collector,
WeakObjectRetainer* retainer, bool record_slots) {
Object* head = Smi::FromInt(0);
T* tail = NULL;
while (list != Smi::FromInt(0)) {
Object* retained = retainer->RetainAs(list);
if (retained != NULL) {
if (head == Smi::FromInt(0)) {
head = retained;
} else {
ASSERT(tail != NULL);
WeakListVisitor<T>::set_weak_next(tail, retained);
if (record_slots) {
Object** next_slot =
HeapObject::RawField(tail, WeakListVisitor<T>::kWeakNextOffset);
collector->RecordSlot(next_slot, next_slot, retained);
}
}
tail = reinterpret_cast<T*>(retained);
WeakListVisitor<T>::VisitLiveObject(
tail, collector, retainer, record_slots);
}
list = WeakListVisitor<T>::get_weak_next(reinterpret_cast<T*>(list));
}
if (tail != NULL) {
tail->set_weak_next(Smi::FromInt(0));
}
return head;
}
template<>
struct WeakListVisitor<JSTypedArray> {
static void set_weak_next(JSTypedArray* obj, Object* next) {
obj->set_weak_next(next);
}
static Object* get_weak_next(JSTypedArray* obj) {
return obj->weak_next();
}
static void VisitLiveObject(JSTypedArray* obj,
MarkCompactCollector* collector,
WeakObjectRetainer* retainer,
bool record_slots) {}
static const int kWeakNextOffset = JSTypedArray::kWeakNextOffset;
};
template<>
struct WeakListVisitor<JSArrayBuffer> {
static void set_weak_next(JSArrayBuffer* obj, Object* next) {
obj->set_weak_next(next);
}
static Object* get_weak_next(JSArrayBuffer* obj) {
return obj->weak_next();
}
static void VisitLiveObject(JSArrayBuffer* array_buffer,
MarkCompactCollector* collector,
WeakObjectRetainer* retainer,
bool record_slots) {
Object* typed_array_obj =
VisitWeakList<JSTypedArray>(array_buffer->weak_first_array(),
collector, retainer, record_slots);
array_buffer->set_weak_first_array(typed_array_obj);
if (typed_array_obj != Smi::FromInt(0) && record_slots) {
Object** slot = HeapObject::RawField(
array_buffer, JSArrayBuffer::kWeakFirstArrayOffset);
collector->RecordSlot(slot, slot, typed_array_obj);
}
}
static const int kWeakNextOffset = JSArrayBuffer::kWeakNextOffset;
};
void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
bool record_slots) {
Object* array_buffer_obj =
VisitWeakList<JSArrayBuffer>(array_buffers_list(),
mark_compact_collector(),
retainer, record_slots);
set_array_buffers_list(array_buffer_obj);
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
DisallowHeapAllocation no_allocation;
// Both the external string table and the string table may contain
// external strings, but neither lists them exhaustively, nor is the
@ -1794,6 +1895,14 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
table_.Register(kVisitJSArrayBuffer,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
table_.Register(kVisitJSTypedArray,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
Visit);
@ -2679,7 +2788,6 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
// This version of AllocateHeapNumber is optimized for
// allocation in new space.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
Object* result;
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -2702,6 +2810,15 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
}
MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
Box* result;
MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
if (!maybe_result->To(&result)) return maybe_result;
result->set_value(value);
return result;
}
MaybeObject* Heap::CreateOddball(const char* to_string,
Object* to_number,
byte kind) {
@ -2846,6 +2963,13 @@ bool Heap::CreateInitialObjects() {
}
set_the_hole_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("uninitialized",
Smi::FromInt(-1),
Oddball::kUninitialized);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_uninitialized_value(Oddball::cast(obj));
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-4),
Oddball::kArgumentMarker);
@ -2964,7 +3088,18 @@ bool Heap::CreateInitialObjects() {
}
set_observation_state(JSObject::cast(obj));
// Handling of script id generation is in FACTORY->NewScript.
{ MaybeObject* maybe_obj = AllocateSymbol();
if (!maybe_obj->ToObject(&obj)) return false;
}
set_frozen_symbol(Symbol::cast(obj));
{ MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
// Handling of script id generation is in Factory::NewScript.
set_last_script_id(undefined_value());
// Initialize keyed lookup cache.
@ -3368,7 +3503,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
share->set_ast_node_count(0);
share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
share->set_counters(0);
@ -3383,7 +3517,6 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_function_token_position(0);
// All compiler hints default to false or 0.
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_opt_count(0);
return share;
@ -3567,7 +3700,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
cons_string->set_length(length);
@ -3648,7 +3781,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
SlicedString* sliced_string = SlicedString::cast(result);
sliced_string->set_length(length);
sliced_string->set_hash_field(String::kEmptyHashField);
@ -4113,7 +4246,7 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
// This calls Copy directly rather than using Heap::AllocateRaw so we
// duplicate the check here.
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
// Check that the size of the boilerplate matches our
// expectations. The ArgumentsAccessStub::GenerateNewObject relies
@ -4152,20 +4285,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
}
static bool HasDuplicates(DescriptorArray* descriptors) {
int count = descriptors->number_of_descriptors();
if (count > 1) {
Name* prev_key = descriptors->GetKey(0);
for (int i = 1; i != count; i++) {
Name* current_key = descriptors->GetKey(i);
if (prev_key == current_key) return true;
prev_key = current_key;
}
}
return false;
}
MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
@ -4200,48 +4319,6 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
map->set_prototype(prototype);
ASSERT(map->has_fast_object_elements());
// If the function has only simple this property assignments add
// field descriptors for these to the initial map as the object
// cannot be constructed without having these properties. Guard by
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
if (!fun->shared()->is_generator() &&
fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
// Inline constructor can only handle inobject properties.
fun->shared()->ForbidInlineConstructor();
} else {
DescriptorArray* descriptors;
MaybeObject* maybe_descriptors = DescriptorArray::Allocate(count);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsInternalizedString());
// TODO(verwaest): Since we cannot update the boilerplate's map yet,
// initialize to the worst case.
FieldDescriptor field(name, i, NONE, Representation::Tagged());
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
// The descriptors may contain duplicates because the compiler does not
// guarantee the uniqueness of property names (it would have required
// quadratic time). Once the descriptors are sorted we can check for
// duplicates in linear time.
if (HasDuplicates(descriptors)) {
fun->shared()->ForbidInlineConstructor();
} else {
map->InitializeDescriptors(descriptors);
map->set_pre_allocated_property_fields(count);
map->set_unused_property_fields(in_object_properties - count);
}
}
}
if (!fun->shared()->is_generator()) {
fun->shared()->StartInobjectSlackTracking(map);
}
@ -4293,10 +4370,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
int prop_size =
map->pre_allocated_property_fields() +
map->unused_property_fields() -
map->inobject_properties();
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
@ -4333,10 +4407,7 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
// Allocate the backing storage for the properties.
int prop_size =
map->pre_allocated_property_fields() +
map->unused_property_fields() -
map->inobject_properties();
int prop_size = map->InitialPropertiesLength();
ASSERT(prop_size >= 0);
Object* properties;
{ MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
@ -5319,7 +5390,7 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
result->set_length(len);
// Copy the content
AssertNoAllocation no_gc;
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
return result;
@ -5742,7 +5813,7 @@ bool Heap::IsHeapIterable() {
void Heap::EnsureHeapIsIterable() {
ASSERT(IsAllocationAllowed());
ASSERT(AllowHeapAllocation::IsAllowed());
if (!IsHeapIterable()) {
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
}
@ -5959,10 +6030,8 @@ void Heap::ReportHeapStatistics(const char* title) {
USE(title);
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_promotion_limit_);
PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_gen_allocation_limit_);
PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
old_generation_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
@ -7063,7 +7132,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
visitor.TransitiveClosure();
}
AssertNoAllocation no_alloc;
DisallowHeapAllocation no_allocation_;
};
@ -7512,6 +7581,8 @@ GCTracer::~GCTracer() {
PrintF("intracompaction_ptrs=%.1f ",
scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
@ -7749,7 +7820,7 @@ void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
Object* object = list_[i];
JSFunction* getter_fun;
{ AssertNoAllocation assert;
{ DisallowHeapAllocation no_gc;
// Skip possible holes in the list.
if (object->IsTheHole()) continue;
if (isolate->heap()->InNewSpace(object) || budget == 0) {

139
deps/v8/src/heap.h

@ -31,6 +31,7 @@
#include <cmath>
#include "allocation.h"
#include "assert-scope.h"
#include "globals.h"
#include "incremental-marking.h"
#include "list.h"
@ -58,6 +59,7 @@ namespace internal {
V(Oddball, null_value, NullValue) \
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, uninitialized_value, UninitializedValue) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
@ -181,7 +183,10 @@ namespace internal {
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap)
V(Map, external_map, ExternalMap) \
V(Symbol, frozen_symbol, FrozenSymbol) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@ -288,10 +293,10 @@ namespace internal {
V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
V(query_colon_string, "(?:)") \
V(Generator_string, "Generator") \
V(send_string, "send") \
V(throw_string, "throw") \
V(done_string, "done") \
V(value_string, "value")
V(value_string, "value") \
V(next_string, "next")
// Forward declarations.
class GCTracer;
@ -547,7 +552,7 @@ class Heap {
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
int MaxNewSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 3/4; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@ -934,6 +939,10 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSGlobalPropertyCell(Object* value);
// Allocate Box.
MUST_USE_RESULT MaybeObject* AllocateBox(Object* value,
PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@ -1343,6 +1352,12 @@ class Heap {
}
Object* native_contexts_list() { return native_contexts_list_; }
void set_array_buffers_list(Object* object) {
array_buffers_list_ = object;
}
Object* array_buffers_list() { return array_buffers_list_; }
// Number of mark-sweeps.
unsigned int ms_count() { return ms_count_; }
@ -1493,10 +1508,6 @@ class Heap {
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
#ifdef DEBUG
bool IsAllocationAllowed() { return allocation_allowed_; }
inline void set_allow_allocation(bool allocation_allowed);
inline bool allow_allocation(bool enable);
bool disallow_allocation_failure() {
return disallow_allocation_failure_;
}
@ -1546,7 +1557,12 @@ class Heap {
// Predicate that governs global pre-tenuring decisions based on observed
// promotion rates of previous collections.
inline bool ShouldGloballyPretenure() {
return new_space_high_promotion_mode_active_;
return FLAG_pretenuring && new_space_high_promotion_mode_active_;
}
// This is only needed for testing high promotion mode.
void SetNewSpaceHighPromotionModeActive(bool mode) {
new_space_high_promotion_mode_active_ = mode;
}
inline PretenureFlag GetPretenureMode() {
@ -1561,44 +1577,23 @@ class Heap {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
// True if we have reached the allocation limit in the old generation that
// should force the next GC (caused normally) to be a full one.
inline bool OldGenerationPromotionLimitReached() {
return PromotedTotalSize() > old_gen_promotion_limit_;
}
inline intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ - PromotedTotalSize();
return old_generation_allocation_limit_ - PromotedTotalSize();
}
inline intptr_t OldGenerationCapacityAvailable() {
return max_old_generation_size_ - PromotedTotalSize();
}
static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
static const intptr_t kMinimumAllocationLimit =
static const intptr_t kMinimumOldGenerationAllocationLimit =
8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 10 :
new_space_high_promotion_mode_active_ ? 1 : 3;
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
limit += new_space_.Capacity();
// TODO(hpayer): Can be removed when when pretenuring is supported for all
// allocation sites.
if (IsHighSurvivalRate() && IsStableOrIncreasingSurvivalTrend()) {
limit *= 2;
}
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
const int divisor = FLAG_stress_compaction ? 8 :
new_space_high_promotion_mode_active_ ? 1 : 2;
intptr_t limit =
Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
Max(old_gen_size + old_gen_size / divisor,
kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
// TODO(hpayer): Can be removed when when pretenuring is supported for all
// allocation sites.
@ -1679,22 +1674,14 @@ class Heap {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t total_promoted = PromotedTotalSize();
intptr_t adjusted_promotion_limit =
old_gen_promotion_limit_ - new_space_.Capacity();
if (total_promoted >= adjusted_promotion_limit) return true;
intptr_t adjusted_allocation_limit =
old_gen_allocation_limit_ - new_space_.Capacity() / 5;
old_generation_allocation_limit_ - new_space_.Capacity();
if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true;
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
return false;
}
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@ -2000,8 +1987,6 @@ class Heap {
#undef ROOT_ACCESSOR
#ifdef DEBUG
bool allocation_allowed_;
// If the --gc-interval flag is set to a positive value, this
// variable holds the value indicating the number of allocations
// remain until the next failure and garbage collection.
@ -2019,13 +2004,9 @@ class Heap {
// Limit that triggers a global GC on the next (normally caused) GC. This
// is checked when we have already decided to do a GC to help determine
// which collector to invoke.
intptr_t old_gen_promotion_limit_;
// Limit that triggers a global GC as soon as is reasonable. This is
// checked before expanding a paged space in the old generation and on
// every allocation in large object space.
intptr_t old_gen_allocation_limit_;
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
// Used to adjust the limits that control the timing of the next GC.
intptr_t size_of_old_gen_at_last_old_space_gc_;
@ -2043,10 +2024,12 @@ class Heap {
// Indicates that an allocation has failed in the old generation since the
// last GC.
int old_gen_exhausted_;
bool old_gen_exhausted_;
Object* native_contexts_list_;
Object* array_buffers_list_;
StoreBufferRebuilder store_buffer_rebuilder_;
struct StringTypeTable {
@ -2190,6 +2173,9 @@ class Heap {
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
void ProcessNativeContexts(WeakObjectRetainer* retainer, bool record_slots);
void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool record_slots);
// Record statistics before and after garbage collection.
void ReportStatisticsBeforeGC();
void ReportStatisticsAfterGC();
@ -2724,43 +2710,6 @@ class DescriptorLookupCache {
};
// A helper class to document/test C++ scopes where we do not
// expect a GC. Usage:
//
// /* Allocation not allowed: we cannot handle a GC in this scope. */
// { AssertNoAllocation nogc;
// ...
// }
#ifdef DEBUG
inline bool EnterAllocationScope(Isolate* isolate, bool allow_allocation);
inline void ExitAllocationScope(Isolate* isolate, bool last_state);
#endif
class AssertNoAllocation {
public:
inline AssertNoAllocation();
inline ~AssertNoAllocation();
#ifdef DEBUG
private:
bool last_state_;
#endif
};
class DisableAssertNoAllocation {
public:
inline DisableAssertNoAllocation();
inline ~DisableAssertNoAllocation();
#ifdef DEBUG
private:
bool last_state_;
#endif
};
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
@ -2780,6 +2729,8 @@ class GCTracer BASE_EMBEDDED {
MC_UPDATE_POINTERS_TO_EVACUATED,
MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
MC_UPDATE_MISC_POINTERS,
MC_WEAKMAP_PROCESS,
MC_WEAKMAP_CLEAR,
MC_FLUSH_CODE,
kNumberOfScopes
};
@ -3075,7 +3026,7 @@ class PathTracer : public ObjectVisitor {
what_to_find_(what_to_find),
visit_mode_(visit_mode),
object_stack_(20),
no_alloc() {}
no_allocation() {}
virtual void VisitPointers(Object** start, Object** end);
@ -3104,7 +3055,7 @@ class PathTracer : public ObjectVisitor {
VisitMode visit_mode_;
List<Object*> object_stack_;
AssertNoAllocation no_alloc; // i.e. no gc allowed.
DisallowHeapAllocation no_allocation; // i.e. no gc allowed.
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);

267
deps/v8/src/hydrogen-environment-liveness.cc

@ -0,0 +1,267 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen-environment-liveness.h"
namespace v8 {
namespace internal {
EnvironmentSlotLivenessAnalyzer::EnvironmentSlotLivenessAnalyzer(
HGraph* graph)
: graph_(graph),
zone_(graph->isolate()),
zone_scope_(&zone_, DELETE_ON_EXIT),
block_count_(graph->blocks()->length()),
maximum_environment_size_(graph->maximum_environment_size()),
collect_markers_(true),
last_simulate_(NULL) {
if (maximum_environment_size_ == 0) return;
live_at_block_start_ =
new(zone()) ZoneList<BitVector*>(block_count_, zone());
first_simulate_ = new(zone()) ZoneList<HSimulate*>(block_count_, zone());
first_simulate_invalid_for_index_ =
new(zone()) ZoneList<BitVector*>(block_count_, zone());
markers_ = new(zone())
ZoneList<HEnvironmentMarker*>(maximum_environment_size_, zone());
went_live_since_last_simulate_ =
new(zone()) BitVector(maximum_environment_size_, zone());
for (int i = 0; i < block_count_; ++i) {
live_at_block_start_->Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
first_simulate_->Add(NULL, zone());
first_simulate_invalid_for_index_->Add(
new(zone()) BitVector(maximum_environment_size_, zone()), zone());
}
}
void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlot(int index,
HSimulate* simulate) {
int operand_index = simulate->ToOperandIndex(index);
if (operand_index == -1) {
simulate->AddAssignedValue(index, graph_->GetConstantUndefined());
} else {
simulate->SetOperandAt(operand_index, graph_->GetConstantUndefined());
}
}
void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsInSuccessors(
HBasicBlock* block,
BitVector* live) {
// When a value is live in successor A but dead in B, we must
// explicitly zap it in B.
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
HBasicBlock* successor = it.Current();
int successor_id = successor->block_id();
BitVector* live_in_successor = live_at_block_start_->at(successor_id);
if (live_in_successor->Equals(*live)) continue;
for (int i = 0; i < live->length(); ++i) {
if (!live->Contains(i)) continue;
if (live_in_successor->Contains(i)) continue;
if (first_simulate_invalid_for_index_->at(successor_id)->Contains(i)) {
continue;
}
HSimulate* simulate = first_simulate_->at(successor_id);
if (simulate == NULL) continue;
ASSERT(simulate->closure().is_identical_to(
block->last_environment()->closure()));
ZapEnvironmentSlot(i, simulate);
}
}
}
void EnvironmentSlotLivenessAnalyzer::ZapEnvironmentSlotsForInstruction(
HEnvironmentMarker* marker) {
if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
HSimulate* simulate = marker->next_simulate();
if (simulate != NULL) {
ASSERT(simulate->closure().is_identical_to(marker->closure()));
ZapEnvironmentSlot(marker->index(), simulate);
}
}
void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtBlockEnd(
HBasicBlock* block,
BitVector* live) {
// Liveness at the end of each block: union of liveness in successors.
live->Clear();
for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
live->Union(*live_at_block_start_->at(it.Current()->block_id()));
}
}
void EnvironmentSlotLivenessAnalyzer::UpdateLivenessAtInstruction(
HInstruction* instr,
BitVector* live) {
switch (instr->opcode()) {
case HValue::kEnvironmentMarker: {
HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
int index = marker->index();
if (!live->Contains(index)) {
marker->SetFlag(HValue::kEndsLiveRange);
} else {
marker->ClearFlag(HValue::kEndsLiveRange);
}
if (!went_live_since_last_simulate_->Contains(index)) {
marker->set_next_simulate(last_simulate_);
}
if (marker->kind() == HEnvironmentMarker::LOOKUP) {
live->Add(index);
} else {
ASSERT(marker->kind() == HEnvironmentMarker::BIND);
live->Remove(index);
went_live_since_last_simulate_->Add(index);
}
if (collect_markers_) {
// Populate |markers_| list during the first pass.
markers_->Add(marker, &zone_);
}
break;
}
case HValue::kLeaveInlined:
// No environment values are live at the end of an inlined section.
live->Clear();
last_simulate_ = NULL;
// The following ASSERTs guard the assumption used in case
// kEnterInlined below:
ASSERT(instr->next()->IsSimulate());
ASSERT(instr->next()->next()->IsGoto());
break;
case HValue::kEnterInlined: {
// Those environment values are live that are live at any return
// target block. Here we make use of the fact that the end of an
// inline sequence always looks like this: HLeaveInlined, HSimulate,
// HGoto (to return_target block), with no environment lookups in
// between (see ASSERTs above).
HEnterInlined* enter = HEnterInlined::cast(instr);
live->Clear();
for (int i = 0; i < enter->return_targets()->length(); ++i) {
int return_id = enter->return_targets()->at(i)->block_id();
// When an AbnormalExit is involved, it can happen that the return
// target block doesn't actually exist.
if (return_id < live_at_block_start_->length()) {
live->Union(*live_at_block_start_->at(return_id));
}
}
last_simulate_ = NULL;
break;
}
case HValue::kDeoptimize: {
// Keep all environment slots alive.
HDeoptimize* deopt = HDeoptimize::cast(instr);
for (int i = deopt->first_local_index();
i < deopt->first_expression_index(); ++i) {
live->Add(i);
}
break;
}
case HValue::kSimulate:
last_simulate_ = HSimulate::cast(instr);
went_live_since_last_simulate_->Clear();
break;
default:
break;
}
}
void EnvironmentSlotLivenessAnalyzer::AnalyzeAndTrim() {
HPhase phase("H_EnvironmentLivenessAnalysis", graph_);
if (maximum_environment_size_ == 0) return;
// Main iteration. Compute liveness of environment slots, and store it
// for each block until it doesn't change any more. For efficiency, visit
// blocks in reverse order and walk backwards through each block. We
// need several iterations to propagate liveness through nested loops.
BitVector* live = new(zone()) BitVector(maximum_environment_size_, zone());
BitVector* worklist = new(zone()) BitVector(block_count_, zone());
for (int i = 0; i < block_count_; ++i) {
worklist->Add(i);
}
while (!worklist->IsEmpty()) {
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
if (!worklist->Contains(block_id)) {
continue;
}
worklist->Remove(block_id);
last_simulate_ = NULL;
HBasicBlock* block = graph_->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, live);
for (HInstruction* instr = block->last(); instr != NULL;
instr = instr->previous()) {
UpdateLivenessAtInstruction(instr, live);
}
// Reached the start of the block, do necessary bookkeeping:
// store computed information for this block and add predecessors
// to the work list as necessary.
first_simulate_->Set(block_id, last_simulate_);
first_simulate_invalid_for_index_->at(block_id)->CopyFrom(
*went_live_since_last_simulate_);
if (live_at_block_start_->at(block_id)->UnionIsChanged(*live)) {
for (int i = 0; i < block->predecessors()->length(); ++i) {
worklist->Add(block->predecessors()->at(i)->block_id());
}
if (block->IsInlineReturnTarget()) {
worklist->Add(block->inlined_entry_block()->block_id());
}
}
}
// Only collect bind/lookup instructions during the first pass.
collect_markers_ = false;
}
// Analysis finished. Zap dead environment slots.
for (int i = 0; i < markers_->length(); ++i) {
ZapEnvironmentSlotsForInstruction(markers_->at(i));
}
for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
HBasicBlock* block = graph_->blocks()->at(block_id);
UpdateLivenessAtBlockEnd(block, live);
ZapEnvironmentSlotsInSuccessors(block, live);
}
// Finally, remove the HEnvironment{Bind,Lookup} markers.
for (int i = 0; i < markers_->length(); ++i) {
markers_->at(i)->DeleteAndReplaceWith(NULL);
}
}
} } // namespace v8::internal

94
deps/v8/src/hydrogen-environment-liveness.h

@ -0,0 +1,94 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
#define V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_
#include "hydrogen.h"
namespace v8 {
namespace internal {
// Trims live ranges of environment slots by doing explicit liveness analysis.
// Values in the environment are kept alive by every subsequent LInstruction
// that is assigned an LEnvironment, which creates register pressure and
// unnecessary spill slot moves. Therefore it is beneficial to trim the
// live ranges of environment slots by zapping them with a constant after
// the last lookup that refers to them.
// Slots are identified by their index and only affected if whitelisted in
// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
class EnvironmentSlotLivenessAnalyzer {
public:
explicit EnvironmentSlotLivenessAnalyzer(HGraph* graph);
void AnalyzeAndTrim();
private:
void ZapEnvironmentSlot(int index, HSimulate* simulate);
void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
Zone* zone() { return &zone_; }
HGraph* graph_;
// Use a dedicated Zone for this phase, with a ZoneScope to ensure it
// gets freed.
Zone zone_;
ZoneScope zone_scope_;
int block_count_;
// Largest number of local variables in any environment in the graph
// (including inlined environments).
int maximum_environment_size_;
// Per-block data. All these lists are indexed by block_id.
ZoneList<BitVector*>* live_at_block_start_;
ZoneList<HSimulate*>* first_simulate_;
ZoneList<BitVector*>* first_simulate_invalid_for_index_;
// List of all HEnvironmentMarker instructions for quick iteration/deletion.
// It is populated during the first pass over the graph, controlled by
// |collect_markers_|.
ZoneList<HEnvironmentMarker*>* markers_;
bool collect_markers_;
// Keeps track of the last simulate seen, as well as the environment slots
// for which a new live range has started since (so they must not be zapped
// in that simulate when the end of another live range of theirs is found).
HSimulate* last_simulate_;
BitVector* went_live_since_last_simulate_;
};
} } // namespace v8::internal
#endif /* V8_HYDROGEN_ENVIRONMENT_LIVENESS_H_ */

855
deps/v8/src/hydrogen-gvn.cc

@ -0,0 +1,855 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "hydrogen.h"
#include "hydrogen-gvn.h"
#include "v8.h"
namespace v8 {
namespace internal {
class HValueMap: public ZoneObject {
public:
explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
present_flags_(0),
array_(NULL),
lists_(NULL),
free_list_head_(kNil) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
void Kill(GVNFlagSet flags);
void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags());
Insert(value, zone);
}
HValue* Lookup(HValue* value) const;
HValueMap* Copy(Zone* zone) const {
return new(zone) HValueMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
// A linked list of HValue* values. Stored in arrays.
struct HValueMapListElement {
HValue* value;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
// Must be a power of 2.
static const int kInitialSize = 16;
HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
int count_; // The number of values stored in the HValueMap.
GVNFlagSet present_flags_; // All flags that are in any value in the
// HValueMap.
HValueMapListElement* array_; // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
HValueMapListElement* lists_; // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
};
class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(GVNFlagSet flags);
void Store(GVNFlagSet flags, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
inline HInstruction* operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < kNumberOfTrackedSideEffects);
return data_[i];
}
inline HInstruction* at(int i) const { return operator[](i); }
private:
int count_;
HInstruction* data_[kNumberOfTrackedSideEffects];
};
void TraceGVN(const char* msg, ...) {
va_list arguments;
va_start(arguments, msg);
OS::VPrint(msg, arguments);
va_end(arguments);
}
// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
// --trace-gvn is off.
#define TRACE_GVN_1(msg, a1) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1); \
}
#define TRACE_GVN_2(msg, a1, a2) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2); \
}
#define TRACE_GVN_3(msg, a1, a2, a3) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3); \
}
#define TRACE_GVN_4(msg, a1, a2, a3, a4) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3, a4); \
}
#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5) \
if (FLAG_trace_gvn) { \
TraceGVN(msg, a1, a2, a3, a4, a5); \
}
HValueMap::HValueMap(Zone* zone, const HValueMap* other)
: array_size_(other->array_size_),
lists_size_(other->lists_size_),
count_(other->count_),
present_flags_(other->present_flags_),
array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
OS::MemCopy(
array_, other->array_, array_size_ * sizeof(HValueMapListElement));
OS::MemCopy(
lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
void HValueMap::Kill(GVNFlagSet flags) {
GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
if (!present_flags_.ContainsAnyOf(depends_flags)) return;
present_flags_.RemoveAll();
for (int i = 0; i < array_size_; ++i) {
HValue* value = array_[i].value;
if (value != NULL) {
// Clear list of collisions first, so we know if it becomes empty.
int kept = kNil; // List of kept elements.
int next;
for (int current = array_[i].next; current != kNil; current = next) {
next = lists_[current].next;
HValue* value = lists_[current].value;
if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
// Drop it.
count_--;
lists_[current].next = free_list_head_;
free_list_head_ = current;
} else {
// Keep it.
lists_[current].next = kept;
kept = current;
present_flags_.Add(value->gvn_flags());
}
}
array_[i].next = kept;
// Now possibly drop directly indexed element.
value = array_[i].value;
if (value->gvn_flags().ContainsAnyOf(depends_flags)) { // Drop it.
count_--;
int head = array_[i].next;
if (head == kNil) {
array_[i].value = NULL;
} else {
array_[i].value = lists_[head].value;
array_[i].next = lists_[head].next;
lists_[head].next = free_list_head_;
free_list_head_ = head;
}
} else {
present_flags_.Add(value->gvn_flags()); // Keep it.
}
}
}
}
HValue* HValueMap::Lookup(HValue* value) const {
uint32_t hash = static_cast<uint32_t>(value->Hashcode());
uint32_t pos = Bound(hash);
if (array_[pos].value != NULL) {
if (array_[pos].value->Equals(value)) return array_[pos].value;
int next = array_[pos].next;
while (next != kNil) {
if (lists_[next].value->Equals(value)) return lists_[next].value;
next = lists_[next].next;
}
}
return NULL;
}
void HValueMap::Resize(int new_size, Zone* zone) {
ASSERT(new_size > count_);
// Hashing the values into the new array has no more collisions than in the
// old hash map, so we can use the existing lists_ array, if we are careful.
// Make sure we have at least one free element.
if (free_list_head_ == kNil) {
ResizeLists(lists_size_ << 1, zone);
}
HValueMapListElement* new_array =
zone->NewArray<HValueMapListElement>(new_size);
memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_array = array_;
int old_size = array_size_;
int old_count = count_;
count_ = 0;
// Do not modify present_flags_. It is currently correct.
array_size_ = new_size;
array_ = new_array;
if (old_array != NULL) {
// Iterate over all the elements in lists, rehashing them.
for (int i = 0; i < old_size; ++i) {
if (old_array[i].value != NULL) {
int current = old_array[i].next;
while (current != kNil) {
Insert(lists_[current].value, zone);
int next = lists_[current].next;
lists_[current].next = free_list_head_;
free_list_head_ = current;
current = next;
}
// Rehash the directly stored value.
Insert(old_array[i].value, zone);
}
}
}
USE(old_count);
ASSERT(count_ == old_count);
}
void HValueMap::ResizeLists(int new_size, Zone* zone) {
ASSERT(new_size > lists_size_);
HValueMapListElement* new_lists =
zone->NewArray<HValueMapListElement>(new_size);
memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
HValueMapListElement* old_lists = lists_;
int old_size = lists_size_;
lists_size_ = new_size;
lists_ = new_lists;
if (old_lists != NULL) {
OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
free_list_head_ = i;
}
}
void HValueMap::Insert(HValue* value, Zone* zone) {
ASSERT(value != NULL);
// Resizing when half of the hashtable is filled up.
if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
ASSERT(count_ < array_size_);
count_++;
uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
if (array_[pos].value == NULL) {
array_[pos].value = value;
array_[pos].next = kNil;
} else {
if (free_list_head_ == kNil) {
ResizeLists(lists_size_ << 1, zone);
}
int new_element_pos = free_list_head_;
ASSERT(new_element_pos != kNil);
free_list_head_ = lists_[free_list_head_].next;
lists_[new_element_pos].value = value;
lists_[new_element_pos].next = array_[pos].next;
ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
array_[pos].next = new_element_pos;
}
}
HSideEffectMap::HSideEffectMap() : count_(0) {
memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
}
HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
*this = *other; // Calls operator=.
}
HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
if (this != &other) {
OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
void HSideEffectMap::Kill(GVNFlagSet flags) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
if (flags.Contains(changes_flag)) {
if (data_[i] != NULL) count_--;
data_[i] = NULL;
}
}
}
void HSideEffectMap::Store(GVNFlagSet flags, HInstruction* instr) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
if (flags.Contains(changes_flag)) {
if (data_[i] == NULL) count_++;
data_[i] = instr;
}
}
}
HGlobalValueNumberer::HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
: graph_(graph),
info_(info),
removed_side_effects_(false),
block_side_effects_(graph->blocks()->length(), graph->zone()),
loop_side_effects_(graph->blocks()->length(), graph->zone()),
visited_on_paths_(graph->zone(), graph->blocks()->length()) {
ASSERT(!AllowHandleAllocation::IsAllowed());
block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length(),
graph_->zone());
}
bool HGlobalValueNumberer::Analyze() {
removed_side_effects_ = false;
ComputeBlockSideEffects();
if (FLAG_loop_invariant_code_motion) {
LoopInvariantCodeMotion();
}
AnalyzeGraph();
return removed_side_effects_;
}
void HGlobalValueNumberer::ComputeBlockSideEffects() {
// The Analyze phase of GVN can be called multiple times. Clear loop side
// effects before computing them to erase the contents from previous Analyze
// passes.
for (int i = 0; i < loop_side_effects_.length(); ++i) {
loop_side_effects_[i].RemoveAll();
}
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
// Compute side effects for the block.
HBasicBlock* block = graph_->blocks()->at(i);
HInstruction* instr = block->first();
int id = block->block_id();
GVNFlagSet side_effects;
while (instr != NULL) {
side_effects.Add(instr->ChangesFlags());
if (instr->IsSoftDeoptimize()) {
block_side_effects_[id].RemoveAll();
side_effects.RemoveAll();
break;
}
instr = instr->next();
}
block_side_effects_[id].Add(side_effects);
// Loop headers are part of their loop.
if (block->IsLoopHeader()) {
loop_side_effects_[id].Add(side_effects);
}
// Propagate loop side effects upwards.
if (block->HasParentLoopHeader()) {
int header_id = block->parent_loop_header()->block_id();
loop_side_effects_[header_id].Add(block->IsLoopHeader()
? loop_side_effects_[id]
: side_effects);
}
}
}
SmartArrayPointer<char> GetGVNFlagsString(GVNFlagSet flags) {
char underlying_buffer[kLastFlag * 128];
Vector<char> buffer(underlying_buffer, sizeof(underlying_buffer));
#if DEBUG
int offset = 0;
const char* separator = "";
const char* comma = ", ";
buffer[0] = 0;
uint32_t set_depends_on = 0;
uint32_t set_changes = 0;
for (int bit = 0; bit < kLastFlag; ++bit) {
if ((flags.ToIntegral() & (1 << bit)) != 0) {
if (bit % 2 == 0) {
set_changes++;
} else {
set_depends_on++;
}
}
}
bool positive_changes = set_changes < (kLastFlag / 2);
bool positive_depends_on = set_depends_on < (kLastFlag / 2);
if (set_changes > 0) {
if (positive_changes) {
offset += OS::SNPrintF(buffer + offset, "changes [");
} else {
offset += OS::SNPrintF(buffer + offset, "changes all except [");
}
for (int bit = 0; bit < kLastFlag; ++bit) {
if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_changes) {
switch (static_cast<GVNFlag>(bit)) {
#define DECLARE_FLAG(type) \
case kChanges##type: \
offset += OS::SNPrintF(buffer + offset, separator); \
offset += OS::SNPrintF(buffer + offset, #type); \
separator = comma; \
break;
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
default:
break;
}
}
}
offset += OS::SNPrintF(buffer + offset, "]");
}
if (set_depends_on > 0) {
separator = "";
if (set_changes > 0) {
offset += OS::SNPrintF(buffer + offset, ", ");
}
if (positive_depends_on) {
offset += OS::SNPrintF(buffer + offset, "depends on [");
} else {
offset += OS::SNPrintF(buffer + offset, "depends on all except [");
}
for (int bit = 0; bit < kLastFlag; ++bit) {
if (((flags.ToIntegral() & (1 << bit)) != 0) == positive_depends_on) {
switch (static_cast<GVNFlag>(bit)) {
#define DECLARE_FLAG(type) \
case kDependsOn##type: \
offset += OS::SNPrintF(buffer + offset, separator); \
offset += OS::SNPrintF(buffer + offset, #type); \
separator = comma; \
break;
GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
#undef DECLARE_FLAG
default:
break;
}
}
}
offset += OS::SNPrintF(buffer + offset, "]");
}
#else
OS::SNPrintF(buffer, "0x%08X", flags.ToIntegral());
#endif
size_t string_len = strlen(underlying_buffer) + 1;
ASSERT(string_len <= sizeof(underlying_buffer));
char* result = new char[strlen(underlying_buffer) + 1];
OS::MemCopy(result, underlying_buffer, string_len);
return SmartArrayPointer<char>(result);
}
void HGlobalValueNumberer::LoopInvariantCodeMotion() {
TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
graph_->use_optimistic_licm() ? "yes" : "no");
for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
HBasicBlock* block = graph_->blocks()->at(i);
if (block->IsLoopHeader()) {
GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
block->block_id(),
*GetGVNFlagsString(side_effects));
GVNFlagSet accumulated_first_time_depends;
GVNFlagSet accumulated_first_time_changes;
HBasicBlock* last = block->loop_information()->GetLastBackEdge();
for (int j = block->block_id(); j <= last->block_id(); ++j) {
ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
&accumulated_first_time_depends,
&accumulated_first_time_changes);
}
}
}
}
void HGlobalValueNumberer::ProcessLoopBlock(
HBasicBlock* block,
HBasicBlock* loop_header,
GVNFlagSet loop_kills,
GVNFlagSet* first_time_depends,
GVNFlagSet* first_time_changes) {
HBasicBlock* pre_header = loop_header->predecessors()->at(0);
GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
TRACE_GVN_2("Loop invariant motion for B%d %s\n",
block->block_id(),
*GetGVNFlagsString(depends_flags));
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
bool hoisted = false;
if (instr->CheckFlag(HValue::kUseGVN)) {
TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
instr->id(),
instr->Mnemonic(),
*GetGVNFlagsString(instr->gvn_flags()),
*GetGVNFlagsString(loop_kills));
bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
if (can_hoist && !graph()->use_optimistic_licm()) {
can_hoist = block->IsLoopSuccessorDominator();
}
if (can_hoist) {
bool inputs_loop_invariant = true;
for (int i = 0; i < instr->OperandCount(); ++i) {
if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
inputs_loop_invariant = false;
}
}
if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
// Move the instruction out of the loop.
instr->Unlink();
instr->InsertBefore(pre_header->end());
if (instr->HasSideEffects()) removed_side_effects_ = true;
hoisted = true;
}
}
}
if (!hoisted) {
// If an instruction is not hoisted, we have to account for its side
// effects when hoisting later HTransitionElementsKind instructions.
GVNFlagSet previous_depends = *first_time_depends;
GVNFlagSet previous_changes = *first_time_changes;
first_time_depends->Add(instr->DependsOnFlags());
first_time_changes->Add(instr->ChangesFlags());
if (!(previous_depends == *first_time_depends)) {
TRACE_GVN_1("Updated first-time accumulated %s\n",
*GetGVNFlagsString(*first_time_depends));
}
if (!(previous_changes == *first_time_changes)) {
TRACE_GVN_1("Updated first-time accumulated %s\n",
*GetGVNFlagsString(*first_time_changes));
}
}
instr = next;
}
}
bool HGlobalValueNumberer::AllowCodeMotion() {
return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
}
bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
HBasicBlock* loop_header) {
// If we've disabled code motion or we're in a block that unconditionally
// deoptimizes, don't move any instructions.
return AllowCodeMotion() && !instr->block()->IsDeoptimizing();
}
GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
GVNFlagSet side_effects;
for (int i = 0; i < dominated->predecessors()->length(); ++i) {
HBasicBlock* block = dominated->predecessors()->at(i);
if (dominator->block_id() < block->block_id() &&
block->block_id() < dominated->block_id() &&
visited_on_paths_.Add(block->block_id())) {
side_effects.Add(block_side_effects_[block->block_id()]);
if (block->IsLoopHeader()) {
side_effects.Add(loop_side_effects_[block->block_id()]);
}
side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
dominator, block));
}
}
return side_effects;
}
// Each instance of this class is like a "stack frame" for the recursive
// traversal of the dominator tree done during GVN (the stack is handled
// as a double linked list).
// We reuse frames when possible so the list length is limited by the depth
// of the dominator tree but this forces us to initialize each frame calling
// an explicit "Initialize" method instead of a using constructor.
class GvnBasicBlockState: public ZoneObject {
public:
static GvnBasicBlockState* CreateEntry(Zone* zone,
HBasicBlock* entry_block,
HValueMap* entry_map) {
return new(zone)
GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
}
HBasicBlock* block() { return block_; }
HValueMap* map() { return map_; }
HSideEffectMap* dominators() { return &dominators_; }
GvnBasicBlockState* next_in_dominator_tree_traversal(
Zone* zone,
HBasicBlock** dominator) {
// This assignment needs to happen before calling next_dominated() because
// that call can reuse "this" if we are at the last dominated block.
*dominator = block();
GvnBasicBlockState* result = next_dominated(zone);
if (result == NULL) {
GvnBasicBlockState* dominator_state = pop();
if (dominator_state != NULL) {
// This branch is guaranteed not to return NULL because pop() never
// returns a state where "is_done() == true".
*dominator = dominator_state->block();
result = dominator_state->next_dominated(zone);
} else {
// Unnecessary (we are returning NULL) but done for cleanness.
*dominator = NULL;
}
}
return result;
}
private:
void Initialize(HBasicBlock* block,
HValueMap* map,
HSideEffectMap* dominators,
bool copy_map,
Zone* zone) {
block_ = block;
map_ = copy_map ? map->Copy(zone) : map;
dominated_index_ = -1;
length_ = block->dominated_blocks()->length();
if (dominators != NULL) {
dominators_ = *dominators;
}
}
bool is_done() { return dominated_index_ >= length_; }
GvnBasicBlockState(GvnBasicBlockState* previous,
HBasicBlock* block,
HValueMap* map,
HSideEffectMap* dominators,
Zone* zone)
: previous_(previous), next_(NULL) {
Initialize(block, map, dominators, true, zone);
}
GvnBasicBlockState* next_dominated(Zone* zone) {
dominated_index_++;
if (dominated_index_ == length_ - 1) {
// No need to copy the map for the last child in the dominator tree.
Initialize(block_->dominated_blocks()->at(dominated_index_),
map(),
dominators(),
false,
zone);
return this;
} else if (dominated_index_ < length_) {
return push(zone,
block_->dominated_blocks()->at(dominated_index_),
dominators());
} else {
return NULL;
}
}
GvnBasicBlockState* push(Zone* zone,
HBasicBlock* block,
HSideEffectMap* dominators) {
if (next_ == NULL) {
next_ =
new(zone) GvnBasicBlockState(this, block, map(), dominators, zone);
} else {
next_->Initialize(block, map(), dominators, true, zone);
}
return next_;
}
GvnBasicBlockState* pop() {
GvnBasicBlockState* result = previous_;
while (result != NULL && result->is_done()) {
TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
block()->block_id(),
previous_->block()->block_id())
result = result->previous_;
}
return result;
}
GvnBasicBlockState* previous_;
GvnBasicBlockState* next_;
HBasicBlock* block_;
HValueMap* map_;
HSideEffectMap dominators_;
int dominated_index_;
int length_;
};
// This is a recursive traversal of the dominator tree but it has been turned
// into a loop to avoid stack overflows.
// The logical "stack frames" of the recursion are kept in a list of
// GvnBasicBlockState instances.
void HGlobalValueNumberer::AnalyzeGraph() {
HBasicBlock* entry_block = graph_->entry_block();
HValueMap* entry_map = new(zone()) HValueMap(zone());
GvnBasicBlockState* current =
GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
while (current != NULL) {
HBasicBlock* block = current->block();
HValueMap* map = current->map();
HSideEffectMap* dominators = current->dominators();
TRACE_GVN_2("Analyzing block B%d%s\n",
block->block_id(),
block->IsLoopHeader() ? " (loop header)" : "");
// If this is a loop header kill everything killed by the loop.
if (block->IsLoopHeader()) {
map->Kill(loop_side_effects_[block->block_id()]);
}
// Go through all instructions of the current block.
HInstruction* instr = block->first();
while (instr != NULL) {
HInstruction* next = instr->next();
GVNFlagSet flags = instr->ChangesFlags();
if (!flags.IsEmpty()) {
// Clear all instructions in the map that are affected by side effects.
// Store instruction as the dominating one for tracked side effects.
map->Kill(flags);
dominators->Store(flags, instr);
TRACE_GVN_2("Instruction %d %s\n", instr->id(),
*GetGVNFlagsString(flags));
}
if (instr->CheckFlag(HValue::kUseGVN)) {
ASSERT(!instr->HasObservableSideEffects());
HValue* other = map->Lookup(instr);
if (other != NULL) {
ASSERT(instr->Equals(other) && other->Equals(instr));
TRACE_GVN_4("Replacing value %d (%s) with value %d (%s)\n",
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
if (instr->HasSideEffects()) removed_side_effects_ = true;
instr->DeleteAndReplaceWith(other);
} else {
map->Add(instr, zone());
}
}
if (instr->IsLinked() &&
instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
GVNFlag depends_on_flag = HValue::DependsOnFlagFromInt(i);
if (instr->DependsOnFlags().Contains(depends_on_flag) &&
(other != NULL)) {
TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
i,
instr->id(),
instr->Mnemonic(),
other->id(),
other->Mnemonic());
instr->SetSideEffectDominator(changes_flag, other);
}
}
}
instr = next;
}
HBasicBlock* dominator_block;
GvnBasicBlockState* next =
current->next_in_dominator_tree_traversal(zone(), &dominator_block);
if (next != NULL) {
HBasicBlock* dominated = next->block();
HValueMap* successor_map = next->map();
HSideEffectMap* successor_dominators = next->dominators();
// Kill everything killed on any path between this block and the
// dominated block. We don't have to traverse these paths if the
// value map and the dominators list is already empty. If the range
// of block ids (block_id, dominated_id) is empty there are no such
// paths.
if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
dominator_block->block_id() + 1 < dominated->block_id()) {
visited_on_paths_.Clear();
GVNFlagSet side_effects_on_all_paths =
CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
dominated);
successor_map->Kill(side_effects_on_all_paths);
successor_dominators->Kill(side_effects_on_all_paths);
}
}
current = next;
}
}
} } // namespace v8::internal

123
deps/v8/src/hydrogen-gvn.h

@ -0,0 +1,123 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_HYDROGEN_GVN_H_
#define V8_HYDROGEN_GVN_H_
#include "hydrogen.h"
#include "hydrogen-instructions.h"
#include "compiler.h"
#include "zone.h"
namespace v8 {
namespace internal {
// Simple sparse set with O(1) add, contains, and clear.
class SparseSet {
public:
SparseSet(Zone* zone, int capacity)
: capacity_(capacity),
length_(0),
dense_(zone->NewArray<int>(capacity)),
sparse_(zone->NewArray<int>(capacity)) {
#ifndef NVALGRIND
// Initialize the sparse array to make valgrind happy.
memset(sparse_, 0, sizeof(sparse_[0]) * capacity);
#endif
}
bool Contains(int n) const {
ASSERT(0 <= n && n < capacity_);
int d = sparse_[n];
return 0 <= d && d < length_ && dense_[d] == n;
}
bool Add(int n) {
if (Contains(n)) return false;
dense_[length_] = n;
sparse_[n] = length_;
++length_;
return true;
}
void Clear() { length_ = 0; }
private:
int capacity_;
int length_;
int* dense_;
int* sparse_;
DISALLOW_COPY_AND_ASSIGN(SparseSet);
};
class HGlobalValueNumberer BASE_EMBEDDED {
public:
HGlobalValueNumberer(HGraph* graph, CompilationInfo* info);
// Returns true if values with side effects are removed.
bool Analyze();
private:
GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator,
HBasicBlock* dominated);
void AnalyzeGraph();
void ComputeBlockSideEffects();
void LoopInvariantCodeMotion();
void ProcessLoopBlock(HBasicBlock* block,
HBasicBlock* before_loop,
GVNFlagSet loop_kills,
GVNFlagSet* accumulated_first_time_depends,
GVNFlagSet* accumulated_first_time_changes);
bool AllowCodeMotion();
bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
HGraph* graph() { return graph_; }
CompilationInfo* info() { return info_; }
Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
CompilationInfo* info_;
bool removed_side_effects_;
// A map of block IDs to their side effects.
ZoneList<GVNFlagSet> block_side_effects_;
// A map of loop header block IDs to their loop's side effects.
ZoneList<GVNFlagSet> loop_side_effects_;
// Used when collecting side effects on paths from dominator to
// dominated.
SparseSet visited_on_paths_;
};
} } // namespace v8::internal
#endif // V8_HYDROGEN_GVN_H_

505
deps/v8/src/hydrogen-instructions.cc

@ -108,10 +108,12 @@ Representation HValue::RepresentationFromUses() {
int tagged_count = use_count[Representation::kTagged];
int double_count = use_count[Representation::kDouble];
int int32_count = use_count[Representation::kInteger32];
int smi_count = use_count[Representation::kSmi];
if (tagged_count > 0) return Representation::Tagged();
if (double_count > 0) return Representation::Double();
if (int32_count > 0) return Representation::Integer32();
if (smi_count > 0) return Representation::Smi();
return Representation::None();
}
@ -122,20 +124,9 @@ void HValue::UpdateRepresentation(Representation new_rep,
const char* reason) {
Representation r = representation();
if (new_rep.is_more_general_than(r)) {
// When an HConstant is marked "not convertible to integer", then
// never try to represent it as an integer.
if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
new_rep = Representation::Tagged();
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
" (%s want i)\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
} else {
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s based on %s\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s based on %s\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
ChangeRepresentation(new_rep);
AddDependantsToWorklist(h_infer);
@ -537,6 +528,17 @@ bool HValue::CheckUsesForFlag(Flag f) {
}
bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) {
bool return_value = false;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
return_value = true;
}
return return_value;
}
HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
Advance();
}
@ -987,6 +989,11 @@ void HDummyUse::PrintDataTo(StringStream* stream) {
}
void HEnvironmentMarker::PrintDataTo(StringStream* stream) {
stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index());
}
void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@ -1062,6 +1069,7 @@ void HBoundsCheck::ApplyIndexChange() {
block()->graph()->GetInvalidContext(), current_index, add_offset);
add->InsertBefore(this);
add->AssumeRepresentation(index()->representation());
add->ClearFlag(kCanOverflow);
current_index = add;
}
@ -1140,19 +1148,17 @@ void HBoundsCheck::PrintDataTo(StringStream* stream) {
void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation r;
HValue* actual_length = length()->ActualValue();
HValue* actual_index = index()->ActualValue();
if (key_mode_ == DONT_ALLOW_SMI_KEY ||
!actual_length->representation().IsTagged()) {
HValue* actual_length = length()->ActualValue();
Representation index_rep = actual_index->representation();
if (!actual_length->representation().IsSmiOrTagged()) {
r = Representation::Integer32();
} else if (actual_index->representation().IsTagged() ||
(actual_index->IsConstant() &&
HConstant::cast(actual_index)->HasSmiValue())) {
// If the index is tagged, or a constant that holds a Smi, allow the length
// to be tagged, since it is usually already tagged from loading it out of
// the length field of a JSArray. This allows for direct comparison without
// untagging.
r = Representation::Tagged();
} else if ((index_rep.IsTagged() && actual_index->type().IsSmi()) ||
index_rep.IsSmi()) {
// If the index is smi, allow the length to be smi, since it is usually
// already smi from loading it out of the length field of a JSArray. This
// allows for direct comparison without untagging.
r = Representation::Smi();
} else {
r = Representation::Integer32();
}
@ -1314,6 +1320,30 @@ const char* HUnaryMathOperation::OpName() const {
}
Range* HUnaryMathOperation::InferRange(Zone* zone) {
Representation r = representation();
if (r.IsSmiOrInteger32() && value()->HasRange()) {
if (op() == kMathAbs) {
int upper = value()->range()->upper();
int lower = value()->range()->lower();
bool spans_zero = value()->range()->CanBeZero();
// Math.abs(kMinInt) overflows its representation, on which the
// instruction deopts. Hence clamp it to kMaxInt.
int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
Range* result =
new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
Max(abs_lower, abs_upper));
// In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
// Smi::kMaxValue.
if (r.IsSmi()) result->ClampToSmi();
return result;
}
}
return HValue::InferRange(zone);
}
void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
const char* name = OpName();
stream->Add("%s ", name);
@ -1410,14 +1440,6 @@ HValue* HBitNot::Canonicalize() {
}
HValue* HArithmeticBinaryOperation::Canonicalize() {
if (representation().IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
return this;
}
static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
return arg1->representation().IsSpecialization() &&
arg2->EqualsInteger32Constant(identity);
@ -1427,13 +1449,13 @@ static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
HValue* HAdd::Canonicalize() {
if (IsIdentityOperation(left(), right(), 0)) return left();
if (IsIdentityOperation(right(), left(), 0)) return right();
return HArithmeticBinaryOperation::Canonicalize();
return this;
}
HValue* HSub::Canonicalize() {
if (IsIdentityOperation(left(), right(), 0)) return left();
return HArithmeticBinaryOperation::Canonicalize();
return this;
}
@ -1485,7 +1507,7 @@ void HChange::PrintDataTo(StringStream* stream) {
if (CanTruncateToInt32()) stream->Add(" truncating-int32");
if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
}
@ -1494,6 +1516,11 @@ HValue* HUnaryMathOperation::Canonicalize() {
// If the input is integer32 then we replace the floor instruction
// with its input. This happens before the representation changes are
// introduced.
// TODO(2205): The above comment is lying. All of this happens
// *after* representation changes are introduced. We should check
// for value->IsChange() and react accordingly if yes.
if (value()->representation().IsInteger32()) return value();
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
@ -1687,7 +1714,7 @@ Range* HValue::InferRange(Zone* zone) {
Range* HChange::InferRange(Zone* zone) {
Range* input_range = value()->range();
if (from().IsInteger32() &&
to().IsTagged() &&
to().IsSmiOrTagged() &&
!value()->CheckFlag(HInstruction::kUint32) &&
input_range != NULL && input_range->IsInSmiRange()) {
set_type(HType::Smi());
@ -1734,11 +1761,13 @@ Range* HAdd::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
if (!res->AddAndCheckOverflow(b)) {
if (!res->AddAndCheckOverflow(b) ||
CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
res->set_can_be_minus_zero(m0);
if (!CheckFlag(kAllUsesTruncatingToInt32)) {
res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeMinusZero());
}
return res;
} else {
return HValue::InferRange(zone);
@ -1751,10 +1780,13 @@ Range* HSub::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* res = a->Copy(zone);
if (!res->SubAndCheckOverflow(b)) {
if (!res->SubAndCheckOverflow(b) ||
CheckFlag(kAllUsesTruncatingToInt32)) {
ClearFlag(kCanOverflow);
}
res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
if (!CheckFlag(kAllUsesTruncatingToInt32)) {
res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
}
return res;
} else {
return HValue::InferRange(zone);
@ -1768,11 +1800,16 @@ Range* HMul::InferRange(Zone* zone) {
Range* b = right()->range();
Range* res = a->Copy(zone);
if (!res->MulAndCheckOverflow(b)) {
// Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
// would be wrong, because truncated integer multiplication is too
// precise and therefore not the same as converting to Double and back.
ClearFlag(kCanOverflow);
}
bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
(a->CanBeNegative() && b->CanBeZero());
res->set_can_be_minus_zero(m0);
if (!CheckFlag(kAllUsesTruncatingToInt32)) {
bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
(a->CanBeNegative() && b->CanBeZero());
res->set_can_be_minus_zero(m0);
}
return res;
} else {
return HValue::InferRange(zone);
@ -1785,12 +1822,14 @@ Range* HDiv::InferRange(Zone* zone) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
if (a->CanBeMinusZero()) {
result->set_can_be_minus_zero(true);
}
if (!CheckFlag(kAllUsesTruncatingToInt32)) {
if (a->CanBeMinusZero()) {
result->set_can_be_minus_zero(true);
}
if (a->CanBeZero() && b->CanBeNegative()) {
result->set_can_be_minus_zero(true);
if (a->CanBeZero() && b->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}
}
if (!a->Includes(kMinInt) || !b->Includes(-1)) {
@ -1811,8 +1850,18 @@ Range* HMod::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
Range* b = right()->range();
Range* result = new(zone) Range();
if (a->CanBeMinusZero() || a->CanBeNegative()) {
// The magnitude of the modulus is bounded by the right operand. Note that
// apart for the cases involving kMinInt, the calculation below is the same
// as Max(Abs(b->lower()), Abs(b->upper())) - 1.
int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1);
// The result of the modulo operation has the sign of its left operand.
bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
a->CanBePositive() ? positive_bound : 0);
if (left_can_be_negative && !CheckFlag(kAllUsesTruncatingToInt32)) {
result->set_can_be_minus_zero(true);
}
@ -1910,12 +1959,12 @@ void HPhi::PrintTo(StringStream* stream) {
value->PrintNameTo(stream);
stream->Add(" ");
}
stream->Add(" uses:%d_%di_%dd_%dt",
stream->Add(" uses:%d_%ds_%di_%dd_%dt",
UseCount(),
smi_non_phi_uses() + smi_indirect_uses(),
int32_non_phi_uses() + int32_indirect_uses(),
double_non_phi_uses() + double_indirect_uses(),
tagged_non_phi_uses() + tagged_indirect_uses());
if (!IsConvertibleToInteger()) stream->Add("_ncti");
PrintRangeTo(stream);
PrintTypeTo(stream);
stream->Add("]");
@ -1990,8 +2039,9 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
PrintF("adding to #%d Phi uses of #%d Phi: s%d i%d d%d t%d\n",
id(), other->id(),
other->non_phi_uses_[Representation::kSmi],
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@ -2016,8 +2066,9 @@ void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
ZoneList<HValue*>* from_values = &from->values_;
for (int i = 0; i < from_values->length(); ++i) {
if (from->HasAssignedIndexAt(i)) {
AddAssignedValue(from->GetAssignedIndexAt(i),
from_values->at(i));
int index = from->GetAssignedIndexAt(i);
if (HasValueForIndex(index)) continue;
AddAssignedValue(index, from_values->at(i));
} else {
if (pop_count_ > 0) {
pop_count_--;
@ -2038,13 +2089,13 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
for (int i = values_.length() - 1; i >= 0; --i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
} else {
stream->Add(" push ");
}
values_[i]->PrintNameTo(stream);
if (i > 0) stream->Add(",");
}
}
}
@ -2060,6 +2111,13 @@ void HDeoptimize::PrintDataTo(StringStream* stream) {
}
void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
Zone* zone) {
ASSERT(return_target->IsInlineReturnTarget());
return_targets_.Add(return_target, zone);
}
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartArrayPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id().ToInt());
@ -2075,6 +2133,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
unique_id_(),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
@ -2088,21 +2147,13 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
int32_value_ = DoubleToInt32(n);
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
double_value_ = n;
has_double_value_ = true;
} else {
type_from_value_ = HType::TypeFromValue(handle_);
is_internalized_string_ = handle_->IsInternalizedString();
}
if (r.IsNone()) {
if (has_int32_value_) {
r = Representation::Integer32();
} else if (has_double_value_) {
r = Representation::Double();
} else {
r = Representation::Tagged();
}
}
Initialize(r);
}
@ -2116,6 +2167,7 @@ HConstant::HConstant(Handle<Object> handle,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
has_smi_value_(false),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
@ -2142,6 +2194,7 @@ HConstant::HConstant(int32_t integer_value,
boolean_value_(integer_value != 0),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
has_smi_value_ = Smi::IsValid(int32_value_);
Initialize(r);
}
@ -2159,11 +2212,23 @@ HConstant::HConstant(double double_value,
boolean_value_(double_value != 0 && !std::isnan(double_value)),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
Initialize(r);
}
void HConstant::Initialize(Representation r) {
if (r.IsNone()) {
if (has_smi_value_) {
r = Representation::Smi();
} else if (has_int32_value_) {
r = Representation::Integer32();
} else if (has_double_value_) {
r = Representation::Double();
} else {
r = Representation::Tagged();
}
}
set_representation(r);
SetFlag(kUseGVN);
if (representation().IsInteger32()) {
@ -2173,6 +2238,7 @@ void HConstant::Initialize(Representation r) {
HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (r.IsSmi() && !has_smi_value_) return NULL;
if (r.IsInteger32() && !has_int32_value_) return NULL;
if (r.IsDouble() && !has_double_value_) return NULL;
if (has_int32_value_) {
@ -2247,10 +2313,6 @@ bool HBinaryOperation::IgnoreObservedOutputRepresentation(
current_rep.IsInteger32() &&
// Mul in Integer32 mode would be too precise.
!this->IsMul() &&
// TODO(jkummerow): Remove blacklisting of Div when the Div
// instruction has learned not to deopt when the remainder is
// non-zero but all uses are truncating.
!this->IsDiv() &&
CheckUsesForFlag(kTruncatingToInt32);
}
@ -2301,7 +2363,37 @@ void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
if (op() == Token::BIT_XOR) {
if (left()->HasRange() && right()->HasRange()) {
// The maximum value has the high bit, and all bits below, set:
// (1 << high) - 1.
// If the range can be negative, the minimum int is a negative number with
// the high bit, and all bits below, unset:
// -(1 << high).
// If it cannot be negative, conservatively choose 0 as minimum int.
int64_t left_upper = left()->range()->upper();
int64_t left_lower = left()->range()->lower();
int64_t right_upper = right()->range()->upper();
int64_t right_lower = right()->range()->lower();
if (left_upper < 0) left_upper = ~left_upper;
if (left_lower < 0) left_lower = ~left_lower;
if (right_upper < 0) right_upper = ~right_upper;
if (right_lower < 0) right_lower = ~right_lower;
int high = MostSignificantBit(
static_cast<uint32_t>(
left_upper | left_lower | right_upper | right_lower));
int64_t limit = 1;
limit <<= high;
int32_t min = (left()->range()->CanBeNegative() ||
right()->range()->CanBeNegative())
? static_cast<int32_t>(-limit) : 0;
return new(zone) Range(min, static_cast<int32_t>(limit - 1));
}
return HValue::InferRange(zone);
}
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
int32_t left_mask = (left()->range() != NULL)
? left()->range()->Mask()
@ -2442,18 +2534,22 @@ void HGoto::PrintDataTo(StringStream* stream) {
void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
Representation rep = Representation::None();
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
bool observed_integers =
observed_input_representation(0).IsInteger32() &&
observed_input_representation(1).IsInteger32();
bool inputs_are_not_doubles =
!left_rep.IsDouble() && !right_rep.IsDouble();
if (observed_integers && inputs_are_not_doubles) {
rep = Representation::Integer32();
Representation observed_left = observed_input_representation(0);
Representation observed_right = observed_input_representation(1);
Representation rep = Representation::None();
rep = rep.generalize(observed_left);
rep = rep.generalize(observed_right);
if (rep.IsNone() || rep.IsSmiOrInteger32()) {
if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
} else {
rep = Representation::Double();
}
if (rep.IsDouble()) {
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@ -2467,8 +2563,8 @@ void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
if (Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kAllowUndefinedAsNaN);
}
}
ChangeRepresentation(rep);
@ -2482,7 +2578,7 @@ void HParameter::PrintDataTo(StringStream* stream) {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
access_.PrintTo(stream);
if (HasTypeCheck()) {
stream->Add(" ");
typecheck()->PrintNameTo(stream);
@ -2710,11 +2806,14 @@ bool HLoadKeyed::UsesMustHandleHole() const {
return false;
}
// Holes are only returned as tagged values.
if (!representation().IsTagged()) {
return false;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) {
return false;
}
if (!use->IsChange()) return false;
}
return true;
@ -2728,7 +2827,7 @@ bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->CheckFlag(HValue::kDeoptimizeOnUndefined)) {
if (!use->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
return false;
}
}
@ -2804,11 +2903,9 @@ void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
access_.PrintTo(stream);
stream->Add(" = ");
value()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
if (NeedsWriteBarrier()) {
stream->Add(" (write-barrier)");
}
@ -2941,20 +3038,6 @@ HType HCheckNonSmi::CalculateInferredType() {
}
HType HCheckSmi::CalculateInferredType() {
return HType::Smi();
}
void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
ASSERT(UseCount() == 1);
HUseIterator use = uses();
Representation r = use.value()->RequiredInputRepresentation(use.index());
UpdateRepresentation(r, h_infer, "checksmiorint32");
}
HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
@ -3026,13 +3109,18 @@ HType HUnaryMathOperation::CalculateInferredType() {
}
HType HStringCharFromCode::CalculateInferredType() {
return HType::String();
Representation HUnaryMathOperation::RepresentationFromInputs() {
Representation rep = representation();
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
Representation input_rep = value()->representation();
if (!input_rep.IsTagged()) rep = rep.generalize(input_rep);
return rep;
}
HType HAllocateObject::CalculateInferredType() {
return HType::JSObject();
HType HStringCharFromCode::CalculateInferredType() {
return HType::String();
}
@ -3216,7 +3304,8 @@ HInstruction* HStringAdd::New(
HConstant* c_right = HConstant::cast(right);
HConstant* c_left = HConstant::cast(left);
if (c_left->HasStringValue() && c_right->HasStringValue()) {
return new(zone) HConstant(FACTORY->NewConsString(c_left->StringValue(),
Factory* factory = Isolate::Current()->factory();
return new(zone) HConstant(factory->NewConsString(c_left->StringValue(),
c_right->StringValue()),
Representation::Tagged());
}
@ -3249,7 +3338,7 @@ HInstruction* HStringLength::New(Zone* zone, HValue* string) {
if (FLAG_fold_constants && string->IsConstant()) {
HConstant* c_string = HConstant::cast(string);
if (c_string->HasStringValue()) {
return H_CONSTANT_INT32(c_string->StringValue()->length());
return new(zone) HConstant(c_string->StringValue()->length());
}
}
return new(zone) HStringLength(string);
@ -3371,8 +3460,12 @@ HInstruction* HMathMinMax::New(
}
HInstruction* HMod::New(
Zone* zone, HValue* context, HValue* left, HValue* right) {
HInstruction* HMod::New(Zone* zone,
HValue* context,
HValue* left,
HValue* right,
bool has_fixed_right_arg,
int fixed_right_arg_value) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@ -3391,7 +3484,11 @@ HInstruction* HMod::New(
}
}
}
return new(zone) HMod(context, left, right);
return new(zone) HMod(context,
left,
right,
has_fixed_right_arg,
fixed_right_arg_value);
}
@ -3555,45 +3652,11 @@ void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
Representation r = Representation::None();
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
if (value->IsUnknownOSRValue()) {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
continue;
}
if (value->representation().IsDouble()) double_occurred = true;
if (value->representation().IsInteger32()) int32_occurred = true;
if (value->representation().IsTagged()) {
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
if (constant->IsConvertibleToInteger()) {
int32_occurred = true;
} else if (constant->HasNumberValue()) {
double_occurred = true;
} else {
return Representation::Tagged();
}
} else {
if (value->IsPhi() && !IsConvertibleToInteger()) {
return Representation::Tagged();
}
}
}
r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
}
if (double_occurred) return Representation::Double();
if (int32_occurred) return Representation::Integer32();
return Representation::None();
return r;
}
@ -3649,12 +3712,6 @@ void HSimulate::Verify() {
}
void HCheckSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
void HCheckNonSmi::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
@ -3668,4 +3725,140 @@ void HCheckFunction::Verify() {
#endif
HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
ASSERT(offset >= 0);
ASSERT(offset < FixedArray::kHeaderSize);
if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
return HObjectAccess(kInobject, offset);
}
HObjectAccess HObjectAccess::ForJSObjectOffset(int offset) {
ASSERT(offset >= 0);
Portion portion = kInobject;
if (offset == JSObject::kElementsOffset) {
portion = kElementsPointer;
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
return HObjectAccess(portion, offset, Handle<String>::null());
}
HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
ASSERT(offset >= 0);
Portion portion = kInobject;
if (offset == JSObject::kElementsOffset) {
portion = kElementsPointer;
} else if (offset == JSArray::kLengthOffset) {
portion = kArrayLengths;
} else if (offset == JSObject::kMapOffset) {
portion = kMaps;
}
return HObjectAccess(portion, offset, Handle<String>::null());
}
HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset) {
ASSERT(offset >= 0);
return HObjectAccess(kBackingStore, offset, Handle<String>::null());
}
HObjectAccess HObjectAccess::ForField(Handle<Map> map,
LookupResult *lookup, Handle<String> name) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*map));
int index;
if (lookup->IsField()) {
index = lookup->GetLocalFieldIndexFromMap(*map);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*map);
int descriptor = transition->LastAdded();
index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
map->inobject_properties();
}
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
return HObjectAccess(kInobject, offset);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
return HObjectAccess(kBackingStore, offset, name);
}
}
void HObjectAccess::SetGVNFlags(HValue *instr, bool is_store) {
// set the appropriate GVN flags for a given load or store instruction
if (is_store) {
// track dominating allocations in order to eliminate write barriers
instr->SetGVNFlag(kDependsOnNewSpacePromotion);
instr->SetFlag(HValue::kTrackSideEffectDominators);
} else {
// try to GVN loads, but don't hoist above map changes
instr->SetFlag(HValue::kUseGVN);
instr->SetGVNFlag(kDependsOnMaps);
}
switch (portion()) {
case kArrayLengths:
instr->SetGVNFlag(is_store
? kChangesArrayLengths : kDependsOnArrayLengths);
break;
case kInobject:
instr->SetGVNFlag(is_store
? kChangesInobjectFields : kDependsOnInobjectFields);
break;
case kDouble:
instr->SetGVNFlag(is_store
? kChangesDoubleFields : kDependsOnDoubleFields);
break;
case kBackingStore:
instr->SetGVNFlag(is_store
? kChangesBackingStoreFields : kDependsOnBackingStoreFields);
break;
case kElementsPointer:
instr->SetGVNFlag(is_store
? kChangesElementsPointer : kDependsOnElementsPointer);
break;
case kMaps:
instr->SetGVNFlag(is_store
? kChangesMaps : kDependsOnMaps);
break;
}
}
void HObjectAccess::PrintTo(StringStream* stream) {
stream->Add(".");
switch (portion()) {
case kArrayLengths:
stream->Add("%length");
break;
case kElementsPointer:
stream->Add("%elements");
break;
case kMaps:
stream->Add("%map");
break;
case kDouble: // fall through
case kInobject:
if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
stream->Add("[in-object]");
break;
case kBackingStore:
if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
stream->Add("[backing-store]");
break;
}
stream->Add("@%d", offset());
}
} } // namespace v8::internal

611
deps/v8/src/hydrogen-instructions.h

File diff suppressed because it is too large

2264
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

301
deps/v8/src/hydrogen.h

@ -34,7 +34,6 @@
#include "ast.h"
#include "compiler.h"
#include "hydrogen-instructions.h"
#include "type-info.h"
#include "zone.h"
#include "scopes.h"
@ -67,7 +66,6 @@ class HBasicBlock: public ZoneObject {
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
@ -110,9 +108,13 @@ class HBasicBlock: public ZoneObject {
int LoopNestingDepth() const;
void SetInitialEnvironment(HEnvironment* env);
void ClearEnvironment() { last_environment_ = NULL; }
void ClearEnvironment() {
ASSERT(IsFinished());
ASSERT(end()->SuccessorCount() == 0);
last_environment_ = NULL;
}
bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
void UpdateEnvironment(HEnvironment* env);
HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
@ -156,7 +158,11 @@ class HBasicBlock: public ZoneObject {
// Simulate (caller's environment)
// Goto (target block)
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
void MarkAsInlineReturnTarget(HBasicBlock* inlined_entry_block) {
is_inline_return_target_ = true;
inlined_entry_block_ = inlined_entry_block;
}
HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
bool IsDeoptimizing() const { return is_deoptimizing_; }
void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
@ -199,10 +205,12 @@ class HBasicBlock: public ZoneObject {
int last_instruction_index_;
ZoneList<int> deleted_phis_;
HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
bool is_deoptimizing_;
bool dominates_loop_successors_;
bool is_osr_entry_;
// For blocks marked as inline return target: the block with HEnterInlined.
HBasicBlock* inlined_entry_block_;
bool is_inline_return_target_ : 1;
bool is_deoptimizing_ : 1;
bool dominates_loop_successors_ : 1;
bool is_osr_entry_ : 1;
};
@ -286,6 +294,7 @@ class HGraph: public ZoneObject {
void RestoreActualValues();
void DeadCodeElimination(const char *phase_name);
void PropagateDeoptimizingMark();
void AnalyzeAndPruneEnvironmentLiveness();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@ -303,8 +312,6 @@ class HGraph: public ZoneObject {
HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantSmi0();
HConstant* GetConstantSmi1();
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
HConstant* GetConstantFalse();
@ -363,6 +370,13 @@ class HGraph: public ZoneObject {
return type_change_checksum_;
}
void update_maximum_environment_size(int environment_size) {
if (environment_size > maximum_environment_size_) {
maximum_environment_size_ = environment_size;
}
}
int maximum_environment_size() { return maximum_environment_size_; }
bool use_optimistic_licm() {
return use_optimistic_licm_;
}
@ -403,10 +417,8 @@ class HGraph: public ZoneObject {
}
private:
HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
HConstant* GetConstantSmi(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
int32_t integer_value);
void MarkLive(HValue* ref, HValue* instr, ZoneList<HValue*>* worklist);
void MarkLiveInstructions();
@ -439,8 +451,6 @@ class HGraph: public ZoneObject {
SetOncePointer<HConstant> undefined_constant_;
SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_smi_0_;
SetOncePointer<HConstant> constant_smi_1_;
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
SetOncePointer<HConstant> constant_false_;
@ -460,6 +470,7 @@ class HGraph: public ZoneObject {
bool has_soft_deoptimize_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
int maximum_environment_size_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@ -521,6 +532,10 @@ class HEnvironment: public ZoneObject {
return parameter_count() + specials_count() + local_count();
}
int first_local_index() const {
return parameter_count() + specials_count();
}
void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value);
}
@ -618,6 +633,22 @@ class HEnvironment: public ZoneObject {
values_[index] = value;
}
// Map a variable to an environment index. Parameter indices are shifted
// by 1 (receiver is parameter index -1 but environment index 0).
// Stack-allocated local indices are shifted by the number of parameters.
int IndexFor(Variable* variable) const {
ASSERT(variable->IsStackAllocated());
int shift = variable->IsParameter()
? 1
: parameter_count_ + specials_count_;
return variable->index() + shift;
}
bool is_local_index(int i) const {
return i >= first_local_index() &&
i < first_expression_index();
}
void PrintTo(StringStream* stream);
void PrintToStd();
@ -645,17 +676,6 @@ class HEnvironment: public ZoneObject {
void Initialize(int parameter_count, int local_count, int stack_height);
void Initialize(const HEnvironment* other);
// Map a variable to an environment index. Parameter indices are shifted
// by 1 (receiver is parameter index -1 but environment index 0).
// Stack-allocated local indices are shifted by the number of parameters.
int IndexFor(Variable* variable) const {
ASSERT(variable->IsStackAllocated());
int shift = variable->IsParameter()
? 1
: parameter_count_ + specials_count_;
return variable->index() + shift;
}
Handle<JSFunction> closure_;
// Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
@ -798,12 +818,10 @@ class TestContext: public AstContext {
public:
TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
condition_(condition),
oracle_(oracle),
if_true_(if_true),
if_false_(if_false) {
}
@ -820,7 +838,6 @@ class TestContext: public AstContext {
}
Expression* condition() const { return condition_; }
TypeFeedbackOracle* oracle() const { return oracle_; }
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
@ -830,7 +847,6 @@ class TestContext: public AstContext {
void BuildBranch(HValue* value);
Expression* condition_;
TypeFeedbackOracle* oracle_;
HBasicBlock* if_true_;
HBasicBlock* if_false_;
};
@ -840,12 +856,10 @@ class FunctionState {
public:
FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
~FunctionState();
CompilationInfo* compilation_info() { return compilation_info_; }
TypeFeedbackOracle* oracle() { return oracle_; }
AstContext* call_context() { return call_context_; }
InliningKind inlining_kind() const { return inlining_kind_; }
HBasicBlock* function_return() { return function_return_; }
@ -871,7 +885,6 @@ class FunctionState {
HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
TypeFeedbackOracle* oracle_;
// During function inlining, expression context of the call being
// inlined. NULL when not inlining.
@ -966,11 +979,7 @@ class HGraphBuilder {
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId id,
RemovableSimulate removable = FIXED_SIMULATE);
HBoundsCheck* AddBoundsCheck(
HValue* index,
HValue* length,
BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
Representation r = Representation::None());
HBoundsCheck* AddBoundsCheck(HValue* index, HValue* length);
HReturn* AddReturn(HValue* value);
@ -992,11 +1001,6 @@ class HGraphBuilder {
HValue* BuildCheckMap(HValue* obj, Handle<Map> map);
// Building common constructs
HLoadNamedField* DoBuildLoadNamedField(HValue* object,
bool inobject,
Representation representation,
int offset);
HInstruction* BuildExternalArrayElementAccess(
HValue* external_elements,
HValue* checked_key,
@ -1036,11 +1040,26 @@ class HGraphBuilder {
ElementsKind elements_kind,
bool is_store,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None());
KeyedAccessStoreMode store_mode);
HLoadNamedField* AddLoad(
HValue *object,
HObjectAccess access,
HValue *typecheck = NULL,
Representation representation = Representation::Tagged());
HLoadNamedField* BuildLoadNamedField(
HValue* object,
HObjectAccess access,
Representation representation);
HStoreNamedField* AddStore(
HValue *object,
HObjectAccess access,
HValue *val,
Representation representation = Representation::Tagged());
HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
HStoreNamedField* AddStoreMapConstant(HValue *object, Handle<Map>);
HLoadNamedField* AddLoadElements(HValue *object, HValue *typecheck = NULL);
@ -1198,8 +1217,7 @@ class HGraphBuilder {
HValue* BeginBody(
HValue* initial,
HValue* terminating,
Token::Value token,
Representation input_representation = Representation::Integer32());
Token::Value token);
void EndBody();
private:
@ -1241,7 +1259,11 @@ class HGraphBuilder {
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* allocation_site_payload,
AllocationSiteMode mode);
bool disable_allocation_sites);
JSArrayBuilder(HGraphBuilder* builder,
ElementsKind kind,
HValue* constructor_function);
HValue* AllocateEmptyArray();
HValue* AllocateArray(HValue* capacity, HValue* length_field,
@ -1264,6 +1286,7 @@ class HGraphBuilder {
}
HValue* EmitMapCode(HValue* context);
HValue* EmitInternalMapCode();
HValue* EstablishEmptyArrayAllocationSize();
HValue* EstablishAllocationSize(HValue* length_node);
HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity,
@ -1273,6 +1296,7 @@ class HGraphBuilder {
ElementsKind kind_;
AllocationSiteMode mode_;
HValue* allocation_site_payload_;
HValue* constructor_function_;
HInnerAllocatedObject* elements_location_;
};
@ -1280,13 +1304,13 @@ class HGraphBuilder {
ElementsKind kind,
HValue* capacity);
void BuildInitializeElements(HValue* elements,
ElementsKind kind,
HValue* capacity);
void BuildInitializeElementsHeader(HValue* elements,
ElementsKind kind,
HValue* capacity);
HValue* BuildAllocateAndInitializeElements(HValue* context,
ElementsKind kind,
HValue* capacity);
HValue* BuildAllocateElementsAndInitializeElementsHeader(HValue* context,
ElementsKind kind,
HValue* capacity);
// array must have been allocated with enough room for
// 1) the JSArray, 2) a AllocationSiteInfo if mode requires it,
@ -1326,7 +1350,6 @@ class HGraphBuilder {
void BuildCompareNil(
HValue* value,
EqualityKind kind,
CompareNilICStub::Types types,
Handle<Map> map,
int position,
@ -1350,9 +1373,6 @@ class HGraphBuilder {
class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
@ -1397,6 +1417,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
enum BreakType { BREAK, CONTINUE };
HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
private:
@ -1405,7 +1426,7 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
BreakAndContinueScope* next_;
};
HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
explicit HOptimizedGraphBuilder(CompilationInfo* info);
virtual bool BuildGraph();
@ -1423,8 +1444,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HBasicBlock* second,
BailoutId join_id);
TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
FunctionState* function_state() const { return function_state_; }
void VisitDeclarations(ZoneList<Declaration*>* declarations);
@ -1535,6 +1554,45 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
int index,
HValue* value,
HEnvironment* env) {
if (!FLAG_analyze_environment_liveness) return false;
// |this| and |arguments| are always live; zapping parameters isn't
// safe because function.arguments can inspect them at any time.
return !var->is_this() &&
!var->is_arguments() &&
!value->IsArgumentsObject() &&
env->is_local_index(index);
}
void BindIfLive(Variable* var, HValue* value) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
env->Bind(index, value);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* bind =
new(zone()) HEnvironmentMarker(HEnvironmentMarker::BIND, index);
AddInstruction(bind);
#ifdef DEBUG
bind->set_closure(env->closure());
#endif
}
}
HValue* LookupAndMakeLive(Variable* var) {
HEnvironment* env = environment();
int index = env->IndexFor(var);
HValue* value = env->Lookup(index);
if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
HEnvironmentMarker* lookup =
new(zone()) HEnvironmentMarker(HEnvironmentMarker::LOOKUP, index);
AddInstruction(lookup);
#ifdef DEBUG
lookup->set_closure(env->closure());
#endif
}
return value;
}
// The value of the arguments object is allowed in some but not most value
// contexts. (It's allowed in all effect contexts and disallowed in all
@ -1692,9 +1750,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
bool is_store,
bool* has_side_effects);
HLoadNamedField* BuildLoadNamedField(HValue* object,
Handle<Map> map,
LookupResult* result);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
@ -1750,13 +1805,37 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
int* offset,
AllocationSiteMode mode);
MUST_USE_RESULT HValue* BuildCopyObjectHeader(
MUST_USE_RESULT HValue* BuildEmitObjectHeader(
Handle<JSObject> boilerplat_object,
HInstruction* target,
int object_offset,
int elements_offset,
int elements_size);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
HValue* object_properties,
HInstruction* target,
int* offset);
void BuildEmitElements(Handle<FixedArrayBase> elements,
Handle<FixedArrayBase> original_elements,
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
int* offset);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements);
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
Handle<FixedArrayBase> original_elements,
ElementsKind kind,
HValue* object_elements,
HInstruction* target,
int* offset);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@ -1798,90 +1877,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
Zone* AstContext::zone() const { return owner_->zone(); }
class HValueMap: public ZoneObject {
public:
explicit HValueMap(Zone* zone)
: array_size_(0),
lists_size_(0),
count_(0),
present_flags_(0),
array_(NULL),
lists_(NULL),
free_list_head_(kNil) {
ResizeLists(kInitialSize, zone);
Resize(kInitialSize, zone);
}
void Kill(GVNFlagSet flags);
void Add(HValue* value, Zone* zone) {
present_flags_.Add(value->gvn_flags());
Insert(value, zone);
}
HValue* Lookup(HValue* value) const;
HValueMap* Copy(Zone* zone) const {
return new(zone) HValueMap(zone, this);
}
bool IsEmpty() const { return count_ == 0; }
private:
// A linked list of HValue* values. Stored in arrays.
struct HValueMapListElement {
HValue* value;
int next; // Index in the array of the next list element.
};
static const int kNil = -1; // The end of a linked list
// Must be a power of 2.
static const int kInitialSize = 16;
HValueMap(Zone* zone, const HValueMap* other);
void Resize(int new_size, Zone* zone);
void ResizeLists(int new_size, Zone* zone);
void Insert(HValue* value, Zone* zone);
uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
int array_size_;
int lists_size_;
int count_; // The number of values stored in the HValueMap.
GVNFlagSet present_flags_; // All flags that are in any value in the
// HValueMap.
HValueMapListElement* array_; // Primary store - contains the first value
// with a given hash. Colliding elements are stored in linked lists.
HValueMapListElement* lists_; // The linked lists containing hash collisions.
int free_list_head_; // Unused elements in lists_ are on the free list.
};
class HSideEffectMap BASE_EMBEDDED {
public:
HSideEffectMap();
explicit HSideEffectMap(HSideEffectMap* other);
HSideEffectMap& operator= (const HSideEffectMap& other);
void Kill(GVNFlagSet flags);
void Store(GVNFlagSet flags, HInstruction* instr);
bool IsEmpty() const { return count_ == 0; }
inline HInstruction* operator[](int i) const {
ASSERT(0 <= i);
ASSERT(i < kNumberOfTrackedSideEffects);
return data_[i];
}
inline HInstruction* at(int i) const { return operator[](i); }
private:
int count_;
HInstruction* data_[kNumberOfTrackedSideEffects];
};
class HStatistics: public Malloced {
public:
HStatistics()

7
deps/v8/src/ia32/assembler-ia32-inl.h

@ -333,8 +333,7 @@ Immediate::Immediate(Handle<Object> handle) {
#ifdef DEBUG
Isolate* isolate = Isolate::Current();
#endif
ALLOW_HANDLE_DEREF(isolate,
"using and embedding raw address, heap object check");
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate->heap()->InNewSpace(obj));
@ -368,7 +367,7 @@ void Assembler::emit(uint32_t x) {
void Assembler::emit(Handle<Object> handle) {
ALLOW_HANDLE_DEREF(isolate(), "heap object check");
AllowDeferredHandleDereference heap_object_check;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!isolate()->heap()->InNewSpace(obj));
@ -395,7 +394,7 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
void Assembler::emit(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId id) {
ALLOW_HANDLE_DEREF(isolate(), "embedding raw address");
AllowDeferredHandleDereference embedding_raw_address;
emit(reinterpret_cast<intptr_t>(code.location()), rmode, id);
}

2
deps/v8/src/ia32/assembler-ia32.cc

@ -2351,7 +2351,7 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(CpuFeatures::IsSupported(SSE4_1));
ASSERT(IsEnabled(SSE4_1));
ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
EMIT(0x66);

2
deps/v8/src/ia32/assembler-ia32.h

@ -411,7 +411,7 @@ class Operand BASE_EMBEDDED {
}
static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
ALLOW_HANDLE_DEREF(Isolate::Current(), "embedding raw address");
AllowDeferredHandleDereference embedding_raw_address;
return Operand(reinterpret_cast<int32_t>(cell.location()),
RelocInfo::GLOBAL_PROPERTY_CELL);
}

52
deps/v8/src/ia32/builtins-ia32.cc

@ -486,6 +486,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(), masm->isolate());
__ mov(ebx, Immediate(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@ -1455,14 +1459,20 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic internal array code in case the specialized code cannot
// handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic internal array code in case the specialized code
// cannot handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->InternalArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
}
@ -1488,14 +1498,24 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
// Run the native code for the Array function called as a normal function.
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic array code in case the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
if (FLAG_optimize_constructed_arrays) {
// tail call a stub
Handle<Object> undefined_sentinel(
masm->isolate()->heap()->undefined_value(),
masm->isolate());
__ mov(ebx, Immediate(undefined_sentinel));
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
} else {
ArrayNativeCode(masm, false, &generic_array_code);
// Jump to the generic internal array code in case the specialized code
// cannot handle the construction.
__ bind(&generic_array_code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ jmp(array_code, RelocInfo::CODE_TARGET);
}
}

397
deps/v8/src/ia32/code-stubs-ia32.cc

@ -30,7 +30,6 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@ -50,7 +49,6 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
static Register registers[] = { eax, ebx, ecx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
@ -62,7 +60,6 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
static Register registers[] = { eax, ebx, ecx, edx };
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
}
@ -74,7 +71,6 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx, ecx };
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
}
@ -86,7 +82,6 @@ void LoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@ -97,7 +92,6 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
static Register registers[] = { edx };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ = NULL;
}
@ -143,7 +137,29 @@ static void InitializeArrayConstructorDescriptor(
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ArrayConstructor_StubFailure);
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
}
static void InitializeInternalArrayConstructorDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor,
int constant_stack_parameter_count) {
// register state
// eax -- number of arguments
// edi -- constructor function
static Register registers[] = { edi };
descriptor->register_param_count_ = 1;
if (constant_stack_parameter_count != 0) {
// stack param count needs (constructor pointer, and single argument)
descriptor->stack_parameter_count_ = &eax;
}
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
}
@ -168,6 +184,27 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
}
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
}
void CompareNilICStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -176,8 +213,20 @@ void CompareNilICStub::InitializeInterfaceDescriptor(
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(CompareNilIC_Miss);
descriptor->miss_handler_ =
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate);
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
}
void ToBooleanStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax };
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
FUNCTION_ADDR(ToBooleanIC_Miss);
descriptor->SetMissHandler(
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
}
@ -200,7 +249,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
for (int i = 0; i < param_count; ++i) {
__ push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler_;
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
}
@ -469,116 +518,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
Label patch;
Factory* factory = masm->isolate()->factory();
const Register argument = eax;
const Register map = edx;
if (!types_.IsEmpty()) {
__ mov(argument, Operand(esp, 1 * kPointerSize));
}
// undefined -> false
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
// Boolean -> its value
CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
// 'null' -> false.
CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
if (types_.Contains(SMI)) {
// Smis: 0 -> false, all other -> true
Label not_smi;
__ JumpIfNotSmi(argument, &not_smi, Label::kNear);
// argument contains the correct return value already.
if (!tos_.is(argument)) {
__ mov(tos_, argument);
}
__ ret(1 * kPointerSize);
__ bind(&not_smi);
} else if (types_.NeedsMap()) {
// If we need a map later and have a Smi -> patch.
__ JumpIfSmi(argument, &patch, Label::kNear);
}
if (types_.NeedsMap()) {
__ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
if (types_.CanBeUndetectable()) {
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
// Undetectable -> false.
Label not_undetectable;
__ j(zero, &not_undetectable, Label::kNear);
__ Set(tos_, Immediate(0));
__ ret(1 * kPointerSize);
__ bind(&not_undetectable);
}
}
if (types_.Contains(SPEC_OBJECT)) {
// spec object -> true.
Label not_js_object;
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(below, &not_js_object, Label::kNear);
// argument contains the correct return value already.
if (!tos_.is(argument)) {
__ Set(tos_, Immediate(1));
}
__ ret(1 * kPointerSize);
__ bind(&not_js_object);
}
if (types_.Contains(STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ mov(tos_, FieldOperand(argument, String::kLengthOffset));
__ ret(1 * kPointerSize); // the string length is OK as the return value
__ bind(&not_string);
}
if (types_.Contains(SYMBOL)) {
// Symbol value -> true.
Label not_symbol;
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(not_equal, &not_symbol, Label::kNear);
__ bind(&not_symbol);
}
if (types_.Contains(HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number, false_result;
__ cmp(map, factory->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
__ fldz();
__ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
__ FCmp();
__ j(zero, &false_result, Label::kNear);
// argument contains the correct return value already.
if (!tos_.is(argument)) {
__ Set(tos_, Immediate(1));
}
__ ret(1 * kPointerSize);
__ bind(&false_result);
__ Set(tos_, Immediate(0));
__ ret(1 * kPointerSize);
__ bind(&not_heap_number);
}
__ bind(&patch);
GenerateTypeTransition(masm);
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@ -614,44 +553,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
void ToBooleanStub::CheckOddball(MacroAssembler* masm,
Type type,
Heap::RootListIndex value,
bool result) {
const Register argument = eax;
if (types_.Contains(type)) {
// If we see an expected oddball, return its ToBoolean value tos_.
Label different_value;
__ CompareRoot(argument, value);
__ j(not_equal, &different_value, Label::kNear);
if (!result) {
// If we have to return zero, there is no way around clearing tos_.
__ Set(tos_, Immediate(0));
} else if (!tos_.is(argument)) {
// If we have to return non-zero, we can re-use the argument if it is the
// same register as the result, because we never see Smi-zero here.
__ Set(tos_, Immediate(1));
}
__ ret(1 * kPointerSize);
__ bind(&different_value);
}
}
void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Get return address, operand is now on top of stack.
__ push(Immediate(Smi::FromInt(tos_.code())));
__ push(Immediate(Smi::FromInt(types_.ToByte())));
__ push(ecx); // Push return address.
// Patch the caller to an appropriate specialized stub and return the
// operation result to the caller of the stub.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
3,
1);
}
class FloatingPointHelper : public AllStatic {
public:
enum ArgLocation {
@ -707,12 +608,6 @@ class FloatingPointHelper : public AllStatic {
// Expects operands in edx, eax.
static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
// Checks that the two floating point numbers loaded into xmm0 and xmm1
// have int32 values.
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch);
// Checks that |operand| has an int32 value. If |int32_result| is different
// from |scratch|, it will contain that int32 value.
static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
@ -1611,7 +1506,7 @@ static void BinaryOpStub_GenerateSmiCode(
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
Label right_arg_changed, call_runtime;
switch (op_) {
case Token::ADD:
@ -1632,6 +1527,13 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
UNREACHABLE();
}
if (op_ == Token::MOD && has_fixed_right_arg_) {
// It is guaranteed that the value will fit into a Smi, because if it
// didn't, we wouldn't be here, see BinaryOp_Patch.
__ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value())));
__ j(not_equal, &right_arg_changed);
}
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
BinaryOpStub_GenerateSmiCode(
@ -1643,6 +1545,7 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
// Code falls through if the result is not returned as either a smi or heap
// number.
__ bind(&right_arg_changed);
switch (op_) {
case Token::ADD:
case Token::SUB:
@ -1745,8 +1648,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
Label not_floats;
Label not_int32;
Label not_floats, not_int32, right_arg_changed;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
// It could be that only SMIs have been seen at either the left
@ -1762,8 +1664,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ JumpIfNotSmi(eax, &not_int32);
}
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
FloatingPointHelper::CheckSSE2OperandIsInt32(
masm, &not_int32, xmm0, ebx, ecx, xmm2);
FloatingPointHelper::CheckSSE2OperandIsInt32(
masm, &not_int32, xmm1, edi, ecx, xmm2);
if (op_ == Token::MOD) {
if (has_fixed_right_arg_) {
__ cmp(edi, Immediate(fixed_right_arg_value()));
__ j(not_equal, &right_arg_changed);
}
GenerateRegisterArgsPush(masm);
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
} else {
@ -1816,6 +1725,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&not_floats);
__ bind(&not_int32);
__ bind(&right_arg_changed);
GenerateTypeTransition(masm);
break;
}
@ -2907,14 +2817,6 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
}
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
}
void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
Label* non_int32,
XMMRegister operand,
@ -3468,6 +3370,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// esp[0] : return address
// esp[4] : number of parameters (tagged)
// esp[8] : receiver displacement
@ -3599,7 +3503,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(FACTORY->non_strict_arguments_elements_map()));
Immediate(isolate->factory()->non_strict_arguments_elements_map()));
__ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
__ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
@ -3620,7 +3524,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
__ sub(ebx, eax);
__ mov(ecx, FACTORY->the_hole_value());
__ mov(ecx, isolate->factory()->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
// eax = loop variable (tagged)
@ -3655,7 +3559,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// esp[16] = address of receiver argument
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(FACTORY->fixed_array_map()));
Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
@ -3691,6 +3595,8 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
// esp[0] : return address
// esp[4] : number of parameters
// esp[8] : receiver displacement
@ -3761,7 +3667,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(FACTORY->fixed_array_map()));
Immediate(isolate->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
// Untag the length for the loop below.
@ -4778,7 +4684,6 @@ static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
@ -7860,14 +7765,16 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Ecx is the only volatile register we must save.
const int kNumSavedRegisters = 1;
__ push(ecx);
// Calculate and push the original stack pointer.
__ lea(eax, Operand(esp, kPointerSize));
__ lea(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ push(eax);
// Calculate and push the function address.
__ mov(eax, Operand(eax, 0));
// Retrieve our return address and use it to calculate the calling
// function's address.
__ mov(eax, Operand(esp, (kNumSavedRegisters + 1) * kPointerSize));
__ sub(eax, Immediate(Assembler::kCallInstructionLength));
__ push(eax);
@ -7964,8 +7871,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
T stub(kind, false);
stub.GetCode(isolate)->set_is_pregenerated(true);
if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
T stub1(kind, true);
stub1.GetCode(isolate)->set_is_pregenerated(true);
}
}
}
@ -7980,6 +7891,21 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
}
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
Isolate* isolate) {
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
stubh1.GetCode(isolate)->set_is_pregenerated(true);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
stubh2.GetCode(isolate)->set_is_pregenerated(true);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
stubh3.GetCode(isolate)->set_is_pregenerated(true);
}
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count_ == ANY)
@ -8065,6 +7991,107 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
Label not_zero_case, not_one_case;
Label normal_sequence;
__ test(eax, eax);
__ j(not_zero, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
__ bind(&not_zero_case);
__ cmp(eax, 1);
__ j(greater, &not_one_case);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ mov(ecx, Operand(esp, kPointerSize));
__ test(ecx, ecx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
stub1_holey(GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
__ bind(&normal_sequence);
InternalArraySingleArgumentConstructorStub stub1(kind);
__ TailCallStub(&stub1);
__ bind(&not_one_case);
InternalArrayNArgumentsConstructorStub stubN(kind);
__ TailCallStub(&stubN);
}
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
// -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected initial map for Array function");
__ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
}
if (FLAG_optimize_constructed_arrays) {
// Figure out the right elements kind
__ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ and_(ecx, Map::kElementsKindMask);
__ shr(ecx, Map::kElementsKindShift);
if (FLAG_debug_code) {
Label done;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
__ Assert(equal,
"Invalid ElementsKind for InternalArray or InternalPackedArray");
__ bind(&done);
}
Label fast_elements_case;
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
} else {
Label generic_constructor;
// Run the native code for the Array function called as constructor.
ArrayNativeCode(masm, true, &generic_constructor);
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
}
}
#undef __
} } // namespace v8::internal

44
deps/v8/src/ia32/codegen-ia32.cc

@ -1057,50 +1057,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
}
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ test(index, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi index");
__ test(value, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi value");
__ cmp(index, FieldOperand(string, String::kLengthOffset));
__ Check(less, "Index is too large");
__ cmp(index, Immediate(Smi::FromInt(0)));
__ Check(greater_equal, "Index is negative");
__ push(value);
__ mov(value, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
__ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, "Unexpected string type");
__ pop(value);
}
__ SmiUntag(value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
__ SmiUntag(index);
__ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
value);
} else {
// No need to untag a smi for two-byte addressing.
__ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
value);
}
}
static Operand ExpConstant(int index) {
return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
}

2
deps/v8/src/ia32/codegen-ia32.h

@ -43,7 +43,7 @@ class CompilationInfo;
class CodeGenerator {
public:
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
static void MakeCodePrologue(CompilationInfo* info, const char* kind);
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,

2
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -118,7 +118,7 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
JSFunction* function) {
Isolate* isolate = function->GetIsolate();
HandleScope scope(isolate);
AssertNoAllocation no_allocation;
DisallowHeapAllocation nha;
ASSERT(function->IsOptimized());
ASSERT(function->FunctionsInFunctionListShareSameCode());

170
deps/v8/src/ia32/full-codegen-ia32.cc

@ -641,9 +641,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, RelocInfo::CODE_TARGET, condition->test_id());
__ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
@ -1034,9 +1033,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
// Get the object to enumerate over. Both SpiderMonkey and JSC
// ignore null and undefined in contrast to the specification; see
// ECMA-262 section 12.6.4.
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, &exit);
@ -1199,6 +1197,64 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
Comment cmnt(masm_, "[ ForOfStatement");
SetStatementPosition(stmt);
Iteration loop_statement(this, stmt);
increment_loop_depth();
// var iterator = iterable[@@iterator]()
VisitForAccumulatorValue(stmt->assign_iterator());
// As with for-in, skip the loop if the iterator is null or undefined.
__ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
__ j(equal, loop_statement.break_label());
__ CompareRoot(eax, Heap::kNullValueRootIndex);
__ j(equal, loop_statement.break_label());
// Convert the iterator to a JS object.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert);
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &done_convert);
__ bind(&convert);
__ push(eax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
// Loop entry.
__ bind(loop_statement.continue_label());
// result = iterator.next()
VisitForEffect(stmt->next_result());
// if (result.done) break;
Label result_not_done;
VisitForControl(stmt->result_done(),
loop_statement.break_label(),
&result_not_done,
&result_not_done);
__ bind(&result_not_done);
// each = result.value
VisitForEffect(stmt->assign_each());
// Generate code for the body of the loop.
Visit(stmt->body());
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
__ jmp(loop_statement.continue_label());
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_label());
decrement_loop_depth();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new
@ -1932,10 +1988,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 1 * kPointerSize] iter
// [sp + 0 * kPointerSize] g
Label l_catch, l_try, l_resume, l_send, l_call, l_loop;
Label l_catch, l_try, l_resume, l_next, l_call, l_loop;
// Initial send value is undefined.
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&l_send);
__ jmp(&l_next);
// catch (e) { receiver = iter; f = iter.throw; arg = e; goto l_call; }
__ bind(&l_catch);
@ -1964,14 +2020,14 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_resume); // received in eax
__ PopTryHandler();
// receiver = iter; f = iter.send; arg = received;
__ bind(&l_send);
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
__ mov(edx, Operand(esp, 1 * kPointerSize)); // iter
__ push(edx); // iter
__ push(eax); // received
__ mov(ecx, isolate()->factory()->send_string()); // "send"
Handle<Code> send_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(send_ic); // iter.send in eax
__ mov(ecx, isolate()->factory()->next_string()); // "next"
Handle<Code> next_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(next_ic); // iter.next in eax
// result = f.call(receiver, arg);
__ bind(&l_call);
@ -2003,9 +2059,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, isolate()->factory()->done_string()); // "done"
Handle<Code> done_ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(done_ic); // result.done in eax
ToBooleanStub stub(eax);
__ push(eax);
__ CallStub(&stub);
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
__ test(eax, eax);
__ j(zero, &l_try);
@ -2074,7 +2129,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
if (resume_mode == JSGeneratorObject::SEND) {
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
__ cmp(edx, Immediate(0));
__ j(not_zero, &slow_resume);
@ -2925,7 +2980,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Check for fast case object. Return false for slow case objects.
__ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ cmp(ecx, FACTORY->hash_table_map());
__ cmp(ecx, isolate()->factory()->hash_table_map());
__ j(equal, if_false);
// Look for valueOf string in the descriptor array, and indicate false if
@ -2954,7 +3009,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_string());
__ cmp(edx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
__ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@ -3373,19 +3428,57 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
void FullCodeGenerator::EmitSeqStringSetCharCheck(Register string,
Register index,
Register value,
uint32_t encoding_mask) {
__ test(index, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi index");
__ test(value, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi value");
__ cmp(index, FieldOperand(string, String::kLengthOffset));
__ Check(less, "Index is too large");
__ cmp(index, Immediate(Smi::FromInt(0)));
__ Check(greater_equal, "Index is negative");
__ push(value);
__ mov(value, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
__ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
__ cmp(value, Immediate(encoding_mask));
__ Check(equal, "Unexpected string type");
__ pop(value);
}
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
Register string = eax;
Register index = ebx;
Register value = ecx;
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(ecx);
__ pop(ebx);
__ pop(value);
__ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
context()->Plug(eax);
if (FLAG_debug_code) {
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
}
__ SmiUntag(value);
__ SmiUntag(index);
__ mov_b(FieldOperand(string, index, times_1, SeqOneByteString::kHeaderSize),
value);
context()->Plug(string);
}
@ -3393,15 +3486,26 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
Register string = eax;
Register index = ebx;
Register value = ecx;
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(ecx);
__ pop(ebx);
__ pop(value);
__ pop(index);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
context()->Plug(eax);
if (FLAG_debug_code) {
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
}
__ SmiUntag(value);
// No need to untag a smi for two-byte addressing.
__ mov_w(FieldOperand(string, index, times_1, SeqTwoByteString::kHeaderSize),
value);
context()->Plug(string);
}
@ -4664,18 +4768,14 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
EqualityKind kind = expr->op() == Token::EQ_STRICT
? kStrictEquality : kNonStrictEquality;
Handle<Object> nil_value = nil == kNullValue
? isolate()->factory()->null_value()
: isolate()->factory()->undefined_value();
if (kind == kStrictEquality) {
if (expr->op() == Token::EQ_STRICT) {
__ cmp(eax, nil_value);
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
kNonStrictEquality,
nil);
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
__ test(eax, eax);
Split(not_zero, if_true, if_false, fall_through);

24
deps/v8/src/ia32/ic-ia32.cc

@ -92,7 +92,8 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
__ j(not_zero, miss);
__ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
__ CheckMap(r0, FACTORY->hash_table_map(), miss, DONT_DO_SMI_CHECK);
__ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
DONT_DO_SMI_CHECK);
}
@ -270,7 +271,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CheckMap(scratch,
FACTORY->fixed_array_map(),
masm->isolate()->factory()->fixed_array_map(),
not_fast_array,
DONT_DO_SMI_CHECK);
} else {
@ -282,7 +283,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
__ cmp(scratch, Immediate(FACTORY->the_hole_value()));
__ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
@ -1353,6 +1354,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
__ pop(ebx);
__ push(edx); // receiver
__ push(ecx); // name
__ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- ecx : key

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save