Browse Source

deps: downgrade v8 to 3.14.5

V8 3.15 and newer have stability and performance issues. Roll back to
a known-good version.
v0.9.11-release
Ben Noordhuis 12 years ago
parent
commit
b15a10e7a0
  1. 5
      deps/v8/.gitignore
  2. 3
      deps/v8/AUTHORS
  3. 158
      deps/v8/ChangeLog
  4. 9
      deps/v8/build/android.gypi
  5. 52
      deps/v8/build/common.gypi
  6. 17
      deps/v8/include/v8-profiler.h
  7. 626
      deps/v8/include/v8.h
  8. 27
      deps/v8/samples/shell.cc
  9. 104
      deps/v8/src/accessors.cc
  10. 380
      deps/v8/src/api.cc
  11. 6
      deps/v8/src/api.h
  12. 31
      deps/v8/src/arm/assembler-arm-inl.h
  13. 71
      deps/v8/src/arm/assembler-arm.cc
  14. 54
      deps/v8/src/arm/assembler-arm.h
  15. 33
      deps/v8/src/arm/builtins-arm.cc
  16. 712
      deps/v8/src/arm/code-stubs-arm.cc
  17. 123
      deps/v8/src/arm/code-stubs-arm.h
  18. 256
      deps/v8/src/arm/codegen-arm.cc
  19. 16
      deps/v8/src/arm/codegen-arm.h
  20. 15
      deps/v8/src/arm/constants-arm.h
  21. 22
      deps/v8/src/arm/deoptimizer-arm.cc
  22. 12
      deps/v8/src/arm/disasm-arm.cc
  23. 184
      deps/v8/src/arm/full-codegen-arm.cc
  24. 38
      deps/v8/src/arm/ic-arm.cc
  25. 235
      deps/v8/src/arm/lithium-arm.cc
  26. 213
      deps/v8/src/arm/lithium-arm.h
  27. 563
      deps/v8/src/arm/lithium-codegen-arm.cc
  28. 6
      deps/v8/src/arm/lithium-codegen-arm.h
  29. 86
      deps/v8/src/arm/macro-assembler-arm.cc
  30. 30
      deps/v8/src/arm/macro-assembler-arm.h
  31. 4
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  32. 112
      deps/v8/src/arm/simulator-arm.cc
  33. 4
      deps/v8/src/arm/simulator-arm.h
  34. 122
      deps/v8/src/arm/stub-cache-arm.cc
  35. 18
      deps/v8/src/array.js
  36. 187
      deps/v8/src/assembler.cc
  37. 75
      deps/v8/src/assembler.h
  38. 20
      deps/v8/src/ast.cc
  39. 38
      deps/v8/src/ast.h
  40. 4
      deps/v8/src/atomicops.h
  41. 335
      deps/v8/src/atomicops_internals_tsan.h
  42. 23
      deps/v8/src/bootstrapper.cc
  43. 2
      deps/v8/src/bootstrapper.h
  44. 592
      deps/v8/src/builtins.cc
  45. 31
      deps/v8/src/builtins.h
  46. 181
      deps/v8/src/code-stubs.cc
  47. 258
      deps/v8/src/code-stubs.h
  48. 1
      deps/v8/src/codegen.cc
  49. 14
      deps/v8/src/codegen.h
  50. 46
      deps/v8/src/collection.js
  51. 2
      deps/v8/src/compilation-cache.cc
  52. 103
      deps/v8/src/compiler.cc
  53. 29
      deps/v8/src/compiler.h
  54. 25
      deps/v8/src/contexts.cc
  55. 24
      deps/v8/src/contexts.h
  56. 7
      deps/v8/src/counters.cc
  57. 347
      deps/v8/src/d8.cc
  58. 28
      deps/v8/src/d8.h
  59. 2
      deps/v8/src/date.js
  60. 119
      deps/v8/src/debug-debugger.js
  61. 10
      deps/v8/src/debug.cc
  62. 110
      deps/v8/src/deoptimizer.cc
  63. 29
      deps/v8/src/deoptimizer.h
  64. 9
      deps/v8/src/elements-kind.cc
  65. 8
      deps/v8/src/elements-kind.h
  66. 526
      deps/v8/src/elements.cc
  67. 43
      deps/v8/src/elements.h
  68. 30
      deps/v8/src/execution.cc
  69. 7
      deps/v8/src/execution.h
  70. 5
      deps/v8/src/extensions/externalize-string-extension.cc
  71. 4
      deps/v8/src/extensions/gc-extension.cc
  72. 31
      deps/v8/src/factory.cc
  73. 9
      deps/v8/src/factory.h
  74. 29
      deps/v8/src/flag-definitions.h
  75. 4
      deps/v8/src/frames.cc
  76. 208
      deps/v8/src/full-codegen.cc
  77. 19
      deps/v8/src/full-codegen.h
  78. 91
      deps/v8/src/global-handles.cc
  79. 20
      deps/v8/src/global-handles.h
  80. 29
      deps/v8/src/handles.cc
  81. 11
      deps/v8/src/handles.h
  82. 14
      deps/v8/src/heap-inl.h
  83. 35
      deps/v8/src/heap-profiler.cc
  84. 24
      deps/v8/src/heap-profiler.h
  85. 433
      deps/v8/src/heap.cc
  86. 58
      deps/v8/src/heap.h
  87. 492
      deps/v8/src/hydrogen-instructions.cc
  88. 666
      deps/v8/src/hydrogen-instructions.h
  89. 904
      deps/v8/src/hydrogen.cc
  90. 72
      deps/v8/src/hydrogen.h
  91. 32
      deps/v8/src/ia32/assembler-ia32-inl.h
  92. 107
      deps/v8/src/ia32/assembler-ia32.cc
  93. 35
      deps/v8/src/ia32/assembler-ia32.h
  94. 36
      deps/v8/src/ia32/builtins-ia32.cc
  95. 628
      deps/v8/src/ia32/code-stubs-ia32.cc
  96. 90
      deps/v8/src/ia32/code-stubs-ia32.h
  97. 203
      deps/v8/src/ia32/codegen-ia32.cc
  98. 14
      deps/v8/src/ia32/codegen-ia32.h
  99. 20
      deps/v8/src/ia32/deoptimizer-ia32.cc
  100. 9
      deps/v8/src/ia32/disasm-ia32.cc

5
deps/v8/.gitignore

@ -18,7 +18,6 @@
#*#
*~
.cpplint-cache
.d8_history
d8
d8_g
shell
@ -51,7 +50,3 @@ shell_g
/xcodebuild
TAGS
*.Makefile
GTAGS
GRTAGS
GSYMS
GPATH

3
deps/v8/AUTHORS

@ -20,7 +20,6 @@ Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
Derek J Conrod <dconrod@codeaurora.org>
Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
@ -45,7 +44,6 @@ Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
@ -55,7 +53,6 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com>
Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
Zhongping Wang <kewpie.w.zp@gmail.com>

158
deps/v8/ChangeLog

@ -1,161 +1,3 @@
2012-12-10: Version 3.15.11
Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
flags.
Performance and stability improvements on all platforms.
2012-12-07: Version 3.15.10
Enabled optimisation of functions inside eval. (issue 2315)
Fixed spec violations in methods of Number.prototype. (issue 2443)
Added GCTracer metrics for a scavenger GC for DOM wrappers.
Performance and stability improvements on all platforms.
2012-12-06: Version 3.15.9
Fixed candidate eviction in code flusher.
(Chromium issue 159140)
Iterate through all arguments for side effects in Math.min/max.
(issue 2444)
Fixed spec violations related to regexp.lastIndex
(issue 2437, issue 2438)
Performance and stability improvements on all platforms.
2012-12-04: Version 3.15.8
Enforced stack allocation of TryCatch blocks.
(issue 2166,chromium:152389)
Fixed external exceptions in external try-catch handlers.
(issue 2166)
Activated incremental code flushing by default.
Performance and stability improvements on all platforms.
2012-11-30: Version 3.15.7
Activated code aging by default.
Included more information in --prof log.
Removed eager sweeping for lazy swept spaces. Try to find in
SlowAllocateRaw a bounded number of times a big enough memory slot.
(issue 2194)
Performance and stability improvements on all platforms.
2012-11-26: Version 3.15.6
Ensure double arrays are filled with holes when extended from
variations of empty arrays. (Chromium issue 162085)
Performance and stability improvements on all platforms.
2012-11-23: Version 3.15.5
Fixed JSON.stringify for objects with interceptor handlers.
(Chromium issue 161028)
Fixed corner case in x64 compare stubs. (issue 2416)
Performance and stability improvements on all platforms.
2012-11-16: Version 3.15.4
Fixed Array.prototype.join evaluation order. (issue 2263)
Perform CPU sampling by CPU sampling thread only iff processing thread
is not running. (issue 2364)
When using an Object as a set in Object.getOwnPropertyNames, null out
the proto. (issue 2410)
Disabled EXTRA_CHECKS in Release build.
Heap explorer: Show representation of strings.
Removed 'type' and 'arguments' properties from Error object.
(issue 2397)
Added atomics implementation for ThreadSanitizer v2.
(Chromium issue 128314)
Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
Performance and stability improvements on all platforms.
2012-11-13: Version 3.15.3
Changed sample shell to send non-JS output (e.g. errors) to stderr
instead of stdout.
Correctly check for stack overflow even when interrupt is pending.
(issue 214)
Collect stack trace on stack overflow. (issue 2394)
Performance and stability improvements on all platforms.
2012-11-12: Version 3.15.2
Function::GetScriptOrigin supplies sourceURL when script name is
not available. (Chromium issue 159413)
Made formatting error message side-effect-free. (issue 2398)
Fixed length check in JSON.stringify. (Chromium issue 160010)
ES6: Added support for Set and Map clear method (issue 2400)
Fixed slack tracking when instance prototype changes.
(Chromium issue 157019)
Fixed disabling of code flusher while marking. (Chromium issue 159140)
Added a test case for object grouping in a scavenger GC (issue 2077)
Support shared library build of Android for v8.
(Chromium issue 158821)
ES6: Added support for size to Set and Map (issue 2395)
Performance and stability improvements on all platforms.
2012-11-06: Version 3.15.1
Put incremental code flushing behind a flag. (Chromium issue 159140)
Performance and stability improvements on all platforms.
2012-10-31: Version 3.15.0
Loosened aligned code target requirement on ARM (issue 2380)
Fixed JSON.parse to treat leading zeros correctly.
(Chromium issue 158185)
Performance and stability improvements on all platforms.
2012-10-22: Version 3.14.5
Killed off the SCons based build.

9
deps/v8/build/android.gypi

@ -122,6 +122,8 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
@ -217,13 +219,6 @@
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
'<(android_lib)/crtbegin_so.o',
],
}],
['_type=="static_library"', {
'ldflags': [
# Don't export symbols from statically linked libraries.
'-Wl,--exclude-libs=ALL',
],
}],
],

52
deps/v8/build/common.gypi

@ -70,6 +70,9 @@
'v8_enable_disassembler%': 0,
# Enable extra checks in API functions and other strategic places.
'v8_enable_extra_checks%': 1,
'v8_enable_gdbjit%': 0,
'v8_object_print%': 0,
@ -111,6 +114,9 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@ -128,11 +134,6 @@
'V8_TARGET_ARCH_ARM',
],
'conditions': [
['armv7==1', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
@ -143,16 +144,12 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
# NEON implies VFP3 and VFP3 implies VFP2.
[ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
[ 'v8_can_use_vfp2_instructions=="true"', {
'defines': [
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
# NEON implies VFP3.
[ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
[ 'v8_can_use_vfp3_instructions=="true"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
@ -160,7 +157,7 @@
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP2_INSTRUCTIONS',
'CAN_USE_VFP3_INSTRUCTIONS',
],
'target_conditions': [
['_toolset=="target"', {
@ -202,12 +199,11 @@
}],
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
['mips_arch_variant=="mips32r1"', {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
}, {
'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
@ -334,9 +330,6 @@
], # conditions
'configurations': {
'Debug': {
'variables': {
'v8_enable_extra_checks%': 1,
},
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@ -361,9 +354,6 @@
},
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
@ -382,23 +372,21 @@
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
}],
],
}, # Debug
'Release': {
'variables': {
'v8_enable_extra_checks%': 0,
},
'conditions': [
['v8_enable_extra_checks==1', {
'defines': ['ENABLE_EXTRA_CHECKS',],
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [

17
deps/v8/include/v8-profiler.h

@ -406,20 +406,6 @@ class V8EXPORT HeapProfiler {
*/
static const SnapshotObjectId kUnknownObjectId = 0;
/**
* Callback interface for retrieving user friendly names of global objects.
*/
class ObjectNameResolver {
public:
/**
* Returns name to be used in the heap snapshot for given node. Returned
* string must stay alive until snapshot collection is completed.
*/
virtual const char* GetName(Handle<Object> object) = 0;
protected:
virtual ~ObjectNameResolver() {}
};
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
@ -427,8 +413,7 @@ class V8EXPORT HeapProfiler {
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
ActivityControl* control = NULL);
/**
* Starts tracking of heap objects population statistics. After calling

626
deps/v8/include/v8.h

File diff suppressed because it is too large

27
deps/v8/samples/shell.cc

@ -72,7 +72,7 @@ int main(int argc, char* argv[]) {
v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) {
fprintf(stderr, "Error creating context\n");
printf("Error creating context\n");
return 1;
}
context->Enter();
@ -226,8 +226,7 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
fprintf(stderr,
"Warning: unknown flag %s.\nTry --help for options\n", str);
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
@ -238,7 +237,7 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
fprintf(stderr, "Error reading '%s'\n", str);
printf("Error reading '%s'\n", str);
continue;
}
if (!ExecuteString(source, file_name, false, true)) return 1;
@ -250,20 +249,20 @@ int RunMain(int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
printf("V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
while (true) {
char buffer[kBufferSize];
fprintf(stderr, "> ");
printf("> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str), name, true, true);
}
fprintf(stderr, "\n");
printf("\n");
}
@ -311,31 +310,31 @@ void ReportException(v8::TryCatch* try_catch) {
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
fprintf(stderr, "%s\n", exception_string);
printf("%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline);
fprintf(stderr, "%s\n", sourceline_string);
printf("%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
for (int i = 0; i < start; i++) {
fprintf(stderr, " ");
printf(" ");
}
int end = message->GetEndColumn();
for (int i = start; i < end; i++) {
fprintf(stderr, "^");
printf("^");
}
fprintf(stderr, "\n");
printf("\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
fprintf(stderr, "%s\n", stack_trace_string);
printf("%s\n", stack_trace_string);
}
}
}

104
deps/v8/src/accessors.cc

@ -112,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
HandleScope scope(isolate);
// Protect raw pointers.
Handle<JSArray> array_handle(JSArray::cast(object), isolate);
Handle<JSObject> object_handle(object, isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
@ -122,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
return array_handle->SetElementsLength(*uint32_v);
return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@ -465,46 +465,24 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value_raw,
Object* value,
void*) {
Isolate* isolate = object->GetIsolate();
Heap* heap = isolate->heap();
JSFunction* function_raw = FindInstanceOf<JSFunction>(object);
if (function_raw == NULL) return heap->undefined_value();
if (!function_raw->should_have_prototype()) {
Heap* heap = object->GetHeap();
JSFunction* function = FindInstanceOf<JSFunction>(object);
if (function == NULL) return heap->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value_raw,
value,
NONE);
}
HandleScope scope(isolate);
Handle<JSFunction> function(function_raw, isolate);
Handle<Object> value(value_raw, isolate);
Handle<Object> old_value;
bool is_observed =
FLAG_harmony_observation &&
*function == object &&
function->map()->is_observed();
if (is_observed) {
if (function->has_prototype())
old_value = handle(function->prototype(), isolate);
else
old_value = isolate->factory()->NewFunctionPrototype(function);
}
Handle<Object> result;
MaybeObject* maybe_result = function->SetPrototype(*value);
if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
ASSERT(function->prototype() == *value);
if (is_observed && !old_value->SameValue(*value)) {
JSObject::EnqueueChangeRecord(
function, "updated", isolate->factory()->prototype_symbol(), old_value);
Object* prototype;
{ MaybeObject* maybe_prototype = function->SetPrototype(value);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
return *function;
ASSERT(function->prototype() == value);
return function;
}
@ -671,6 +649,19 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
if (!caller->shared()->is_classic_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
}
return caller;
}
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
@ -757,14 +748,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
// Censor if the caller is not a classic mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
if (!caller->shared()->is_classic_mode()) {
return isolate->heap()->null_value();
}
return caller;
return CheckNonStrictCallerOrThrow(isolate, caller);
}
@ -780,7 +764,7 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* current = receiver->GetPrototype();
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
@ -790,36 +774,12 @@ static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
}
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
return GetPrototypeSkipHiddenPrototypes(receiver);
}
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
Object* value_raw,
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
Object* value,
void*) {
const bool kSkipHiddenPrototypes = true;
const bool skip_hidden_prototypes = true;
// To be consistent with other Set functions, return the value.
if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
Isolate* isolate = receiver_raw->GetIsolate();
HandleScope scope(isolate);
Handle<JSObject> receiver(receiver_raw);
Handle<Object> value(value_raw);
Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(*receiver));
MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
Handle<Object> hresult;
if (!result->ToHandle(&hresult, isolate)) return result;
Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(*receiver));
if (!new_value->SameValue(*old_value)) {
JSObject::EnqueueChangeRecord(receiver, "prototype",
isolate->factory()->Proto_symbol(),
old_value);
}
return *hresult;
return receiver->SetPrototype(value, skip_hidden_prototypes);
}

380
deps/v8/src/api.cc

@ -634,15 +634,6 @@ void V8::MakeWeak(i::Object** object, void* parameters,
}
void V8::MakeWeak(i::Isolate* isolate, i::Object** object,
void* parameters, WeakReferenceCallback callback) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object, parameters,
callback);
}
void V8::ClearWeak(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "ClearWeak");
@ -652,32 +643,11 @@ void V8::ClearWeak(i::Object** obj) {
void V8::MarkIndependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MarkIndependent");
LOG_API(isolate, "MakeIndependent");
isolate->global_handles()->MarkIndependent(object);
}
void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MarkIndependent");
isolate->global_handles()->MarkIndependent(object);
}
void V8::MarkPartiallyDependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MarkPartiallyDependent");
isolate->global_handles()->MarkPartiallyDependent(object);
}
void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MarkPartiallyDependent");
isolate->global_handles()->MarkPartiallyDependent(object);
}
bool V8::IsGlobalIndependent(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalIndependent");
@ -686,14 +656,6 @@ bool V8::IsGlobalIndependent(i::Object** obj) {
}
bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "IsGlobalIndependent");
if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsIndependent(obj);
}
bool V8::IsGlobalNearDeath(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalNearDeath");
@ -710,14 +672,6 @@ bool V8::IsGlobalWeak(i::Object** obj) {
}
bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "IsGlobalWeak");
if (!isolate->IsInitialized()) return false;
return i::GlobalHandles::IsWeak(obj);
}
void V8::DisposeGlobal(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "DisposeGlobal");
@ -725,14 +679,6 @@ void V8::DisposeGlobal(i::Object** obj) {
isolate->global_handles()->Destroy(obj);
}
void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "DisposeGlobal");
if (!isolate->IsInitialized()) return;
isolate->global_handles()->Destroy(obj);
}
// --- H a n d l e s ---
@ -786,12 +732,6 @@ i::Object** HandleScope::CreateHandle(i::Object* value) {
}
i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
ASSERT(isolate == i::Isolate::Current());
return i::HandleScope::CreateHandle(value, isolate);
}
i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
ASSERT(value->IsHeapObject());
return reinterpret_cast<i::Object**>(
@ -833,77 +773,33 @@ void Context::Exit() {
}
static void* DecodeSmiToAligned(i::Object* value, const char* location) {
ApiCheck(value->IsSmi(), location, "Not a Smi");
return reinterpret_cast<void*>(value);
}
static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
i::Smi* smi = reinterpret_cast<i::Smi*>(value);
ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
return smi;
void Context::SetData(v8::Handle<Value> data) {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsNativeContext());
if (env->IsNativeContext()) {
env->set_data(*raw_data);
}
}
static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
int index,
bool can_grow,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::FixedArray>();
i::Handle<i::FixedArray> data(env->embedder_data());
if (index < data->length()) return data;
if (!can_grow) {
Utils::ReportApiFailure(location, "Index too large");
return i::Handle<i::FixedArray>();
}
int new_size = i::Max(index, data->length() << 1) + 1;
data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
env->set_embedder_data(*data);
return data;
}
v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
const char* location = "v8::Context::GetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
if (data.is_null()) return Local<Value>();
i::Handle<i::Object> result(data->get(index), data->GetIsolate());
v8::Local<v8::Value> Context::GetData() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
return Local<Value>();
}
ASSERT(env->IsNativeContext());
if (!env->IsNativeContext()) {
return Local<Value>();
}
i::Handle<i::Object> result(env->data(), isolate);
return Utils::ToLocal(result);
}
void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
const char* location = "v8::Context::SetEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
if (data.is_null()) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
data->set(index, *val);
ASSERT_EQ(*Utils::OpenHandle(*value),
*Utils::OpenHandle(*GetEmbedderData(index)));
}
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
if (data.is_null()) return NULL;
return DecodeSmiToAligned(data->get(index), location);
}
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
data->set(index, EncodeAlignedAsSmi(value, location));
ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
}
i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
@ -925,7 +821,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
}
// Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result, isolate_);
i::Handle<i::Object> handle(result);
return handle.location();
}
@ -1260,7 +1156,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
return;
}
ENTER_V8(isolate);
@ -1704,8 +1600,6 @@ Local<Value> Script::Run() {
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@ -2304,7 +2198,7 @@ bool Value::IsExternal() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
return false;
}
return Utils::OpenHandle(this)->IsExternal();
return Utils::OpenHandle(this)->IsForeign();
}
@ -2378,11 +2272,7 @@ static i::Object* LookupBuiltin(i::Isolate* isolate,
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
i::Object* constr = obj->map()->constructor();
if (!constr->IsJSFunction()) return false;
i::JSFunction* func = i::JSFunction::cast(constr);
return func->shared()->native() &&
constr == LookupBuiltin(isolate, class_name);
return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
}
@ -2537,7 +2427,8 @@ Local<Integer> Value::ToInteger() const {
void External::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
ApiCheck(Utils::OpenHandle(that)->IsExternal(),
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsForeign(),
"v8::External::Cast()",
"Could not convert to external");
}
@ -2882,7 +2773,6 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
isolate,
self,
key_obj,
value_obj,
@ -3437,7 +3327,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol), isolate);
i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@ -3674,8 +3564,6 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
return Local<v8::Value>());
LOG_API(isolate, "Object::CallAsFunction");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@ -3707,8 +3595,6 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Object::CallAsConstructor");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@ -3751,8 +3637,6 @@ Local<v8::Object> Function::NewInstance(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Function::NewInstance");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@ -3771,8 +3655,6 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
LOG_API(isolate, "Function::Call");
ENTER_V8(isolate);
i::Logger::TimerEventScope timer_scope(
isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@ -3816,9 +3698,8 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
v8::ScriptOrigin origin(
Utils::ToLocal(scriptName),
Utils::ToLocal(i::Handle<i::Object>(script->name())),
v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value()));
return origin;
@ -3881,7 +3762,7 @@ static int RecursivelySerializeToUtf8(i::String* string,
int32_t* last_character) {
int utf8_bytes = 0;
while (true) {
if (string->IsOneByteRepresentation()) {
if (string->IsAsciiRepresentation()) {
i::String::WriteToFlat(string, buffer, start, end);
*last_character = unibrow::Utf16::kNoPreviousCharacter;
return utf8_bytes + end - start;
@ -3981,7 +3862,7 @@ int String::WriteUtf8(char* buffer,
FlattenString(str); // Flatten the string for efficiency.
}
int string_length = str->length();
if (str->IsOneByteRepresentation()) {
if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
@ -4115,7 +3996,7 @@ int String::WriteAscii(char* buffer,
FlattenString(str); // Flatten the string for efficiency.
}
if (str->IsOneByteRepresentation()) {
if (str->IsAsciiRepresentation()) {
// WriteToFlat is faster than using the StringInputBuffer.
if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start);
@ -4230,7 +4111,7 @@ void v8::String::VerifyExternalStringResourceBase(
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING
: TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
@ -4310,65 +4191,75 @@ int v8::Object::InternalFieldCount() {
}
static bool InternalFieldOK(i::Handle<i::JSObject> obj,
int index,
const char* location) {
return !IsDeadCheck(obj->GetIsolate(), location) &&
ApiCheck(index < obj->GetInternalFieldCount(),
location,
"Internal field out of bounds");
}
Local<Value> v8::Object::SlowGetInternalField(int index) {
Local<Value> v8::Object::CheckedGetInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetInternalField()";
if (!InternalFieldOK(obj, index, location)) return Local<Value>();
i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
return Utils::ToLocal(value);
if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
return Local<Value>();
}
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::GetInternalField()",
"Reading internal field out of bounds")) {
return Local<Value>();
}
i::Handle<i::Object> value(obj->GetInternalField(index));
Local<Value> result = Utils::ToLocal(value);
#ifdef DEBUG
Local<Value> unchecked = UncheckedGetInternalField(index);
ASSERT(unchecked.IsEmpty() || (unchecked == result));
#endif
return result;
}
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
i::Isolate* isolate = obj->GetIsolate();
if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
return;
}
if (!ApiCheck(index < obj->GetInternalFieldCount(),
"v8::Object::SetInternalField()",
"Writing internal field out of bounds")) {
return;
}
ENTER_V8(isolate);
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
ASSERT_EQ(value, GetInternalField(index));
}
void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return NULL;
return DecodeSmiToAligned(obj->GetInternalField(index), location);
}
void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::SetAlignedPointerInInternalField()";
if (!InternalFieldOK(obj, index, location)) return;
obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
static bool CanBeEncodedAsSmi(void* ptr) {
const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return ((address & i::kEncodablePointerMask) == 0);
}
static void* ExternalValue(i::Object* obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
if (obj->IsUndefined()) return NULL;
i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
return i::Foreign::cast(foreign)->foreign_address();
static i::Smi* EncodeAsSmi(void* ptr) {
ASSERT(CanBeEncodedAsSmi(ptr));
const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
ASSERT(i::Internals::HasSmiTag(result));
ASSERT_EQ(result, i::Smi::FromInt(result->value()));
ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
return result;
}
void* Object::GetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetPointerFromInternalField()";
if (!InternalFieldOK(obj, index, location)) return NULL;
return ExternalValue(obj->GetInternalField(index));
void v8::Object::SetPointerInInternalField(int index, void* value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
if (CanBeEncodedAsSmi(value)) {
Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
} else {
HandleScope scope;
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(
reinterpret_cast<i::Address>(value), i::TENURED);
if (!foreign.is_null()) {
Utils::OpenHandle(this)->SetInternalField(index, *foreign);
}
}
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@ -4423,7 +4314,6 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
@ -4433,7 +4323,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
// Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0);
heap_statistics->set_total_physical_size(0);
heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0);
return;
@ -4443,7 +4332,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
heap->CommittedMemoryExecutable());
heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
heap_statistics->set_used_heap_size(heap->SizeOfObjects());
heap_statistics->set_heap_size_limit(heap->MaxReserved());
}
@ -4680,14 +4568,13 @@ v8::Local<v8::Context> Context::GetCalling() {
v8::Local<v8::Object> Context::Global() {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Context::Global()")) {
if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
return Local<v8::Object>();
}
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
i::Handle<i::Object> global(context->global_proxy(), isolate);
i::Handle<i::Object> global(context->global_proxy());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
@ -4808,20 +4695,74 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
}
Local<External> v8::External::New(void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
static Local<External> ExternalNewImpl(void* data) {
return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
}
static void* ExternalValueImpl(i::Handle<i::Object> obj) {
return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
}
Local<Value> v8::External::Wrap(void* data) {
i::Isolate* isolate = i::Isolate::Current();
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
LOG_API(isolate, "External::Wrap");
ENTER_V8(isolate);
v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
: v8::Local<v8::Value>(ExternalNewImpl(data));
ASSERT_EQ(data, Unwrap(result));
return result;
}
void* v8::Object::SlowGetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsForeign()) {
return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
} else {
return NULL;
}
}
void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
result = i::Internals::GetExternalPointerFromSmi(*obj);
} else if (obj->IsForeign()) {
result = ExternalValueImpl(obj);
} else {
result = NULL;
}
ASSERT_EQ(result, QuickUnwrap(wrapper));
return result;
}
Local<External> v8::External::New(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::External::New()");
LOG_API(isolate, "External::New");
ENTER_V8(isolate);
i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
return ExternalNewImpl(data);
}
void* External::Value() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
return ExternalValue(*Utils::OpenHandle(this));
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return ExternalValueImpl(obj);
}
@ -5391,6 +5332,13 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
SetAddHistogramSampleFunction(callback);
}
void V8::EnableSlidingStateWindow() {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
isolate->logger()->EnableSlidingStateWindow();
}
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
@ -5400,7 +5348,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
isolate->SetFailedAccessCheckCallback(callback);
}
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
@ -5412,19 +5359,6 @@ void V8::AddObjectGroup(Persistent<Value>* objects,
}
void V8::AddObjectGroup(Isolate* exportedIsolate,
Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exportedIsolate);
ASSERT(isolate == i::Isolate::Current());
if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
isolate->global_handles()->AddObjectGroup(
reinterpret_cast<i::Object***>(objects), length, info);
}
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
@ -6435,8 +6369,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
ActivityControl* control,
ObjectNameResolver* resolver) {
ActivityControl* control) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@ -6449,7 +6382,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control, resolver));
*Utils::OpenHandle(*title), internal_type, control));
}
@ -6560,7 +6493,6 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll() {
i::HandleScope scope;
internal::Deoptimizer::DeoptimizeAll();
}

6
deps/v8/src/api.h

@ -201,6 +201,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal(
v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@ -223,8 +225,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@ -268,6 +268,7 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -279,7 +280,6 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
#undef MAKE_TO_LOCAL

31
deps/v8/src/arm/assembler-arm-inl.h

@ -86,7 +86,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
Assembler::set_target_address_at(pc_, target);
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@ -165,24 +166,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
static const int kNoCodeAgeSequenceLength = 3;
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Memory::Address_at(pc_ + Assembler::kInstrSize *
(kNoCodeAgeSequenceLength - 1)) =
stub->instruction_start();
}
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
@ -256,8 +239,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@ -284,8 +265,6 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@ -494,12 +473,14 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
Address Assembler::target_address_at(Address pc) {
return target_pointer_at(pc);
return reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
}
void Assembler::set_target_address_at(Address pc, Address target) {
set_target_pointer_at(pc, target);
set_target_pointer_at(pc, reinterpret_cast<Address>(
reinterpret_cast<intptr_t>(target) & ~3));
}

71
deps/v8/src/arm/assembler-arm.cc

@ -318,11 +318,46 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate()->assembler_spare_buffer() != NULL) {
buffer = isolate()->assembler_spare_buffer();
isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
buffer_ = NewArray<byte>(buffer_size);
} else {
buffer_ = static_cast<byte*>(buffer);
}
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
@ -335,6 +370,14 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
if (isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
}
@ -2349,20 +2392,6 @@ void Assembler::vmul(const DwVfpRegister dst,
}
void Assembler::vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-892.
// cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) |
// Vm(3-0)
unsigned x = (cond | 0x1C*B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
emit(x);
}
void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -2701,9 +2730,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
// Put down constant pool marker "Undefined instruction".
emit(kConstantPoolMarker |
EncodeConstantPoolLength(num_pending_reloc_info_));
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_);
// Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {

54
deps/v8/src/arm/assembler-arm.h

@ -647,7 +647,15 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
virtual ~Assembler();
~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@ -1126,10 +1134,6 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vmla(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@ -1181,6 +1185,8 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
bool predictable_code_size() const { return predictable_code_size_; }
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
@ -1276,6 +1282,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@ -1321,8 +1329,6 @@ class Assembler : public AssemblerBase {
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
kMaxNumPendingRelocInfo);
// Postpone the generation of the constant pool for the specified number of
// instructions.
@ -1337,6 +1343,8 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@ -1378,6 +1386,13 @@ class Assembler : public AssemblerBase {
}
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@ -1386,6 +1401,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@ -1479,6 +1495,10 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
@ -1492,6 +1512,26 @@ class EnsureSpace BASE_EMBEDDED {
};
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(Assembler* assembler)
: asm_(assembler) {
old_value_ = assembler->predictable_code_size();
assembler->set_predictable_code_size(true);
}
~PredictableCodeSizeScope() {
if (!old_value_) {
asm_->set_predictable_code_size(false);
}
}
private:
Assembler* asm_;
bool old_value_;
};
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_

33
deps/v8/src/arm/builtins-arm.cc

@ -1226,39 +1226,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// The following registers must be saved and restored when calling through to
// the runtime:
// r0 - contains return address (beginning of patch sequence)
// r1 - function object
FrameScope scope(masm, StackFrame::MANUAL);
__ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ PrepareCallCFunction(1, 0, r1);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
__ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
__ mov(pc, r0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
} \
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{

712
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

123
deps/v8/src/arm/code-stubs-arm.h

@ -142,6 +142,108 @@ class UnaryOpStub: public CodeStub {
};
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp2_ = CpuFeatures::IsSupported(VFP2);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp2_(VFP2Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_vfp2_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP2Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| VFP2Bits::encode(use_vfp2_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiSmiOperation(MacroAssembler* masm);
void GenerateFPOperation(MacroAssembler* masm,
bool smi_operands,
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm,
Register result,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* gc_required);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@ -622,6 +724,20 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@ -720,12 +836,7 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
// Loads the objects from |object| into floating point registers.
// Depending on |destination| the value ends up either in |dst| or
// in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
// must be supported. If kCoreRegisters are requested and VFP3 is
// supported, |dst| will be scratched. If |object| is neither smi nor
// heap number, |not_number| is jumped to with |object| still intact.
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,

256
deps/v8/src/arm/codegen-arm.cc

@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
#include "simulator-arm.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@ -49,74 +49,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
#define __ masm.
#if defined(USE_SIMULATOR)
byte* fast_exp_arm_machine_code = NULL;
double fast_exp_simulator(double x) {
return Simulator::current(Isolate::Current())->CallFP(
fast_exp_arm_machine_code, x, 0);
}
#endif
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatures::Scope use_vfp(VFP2);
DoubleRegister input = d0;
DoubleRegister result = d1;
DoubleRegister double_scratch1 = d2;
DoubleRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
if (masm.use_eabi_hardfloat()) {
// Input value is in d0 anyway, nothing to do.
} else {
__ vmov(input, r0, r1);
}
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
if (masm.use_eabi_hardfloat()) {
__ vmov(d0, result);
} else {
__ vmov(r0, r1, result);
}
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#else
fast_exp_arm_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
#undef __
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@ -141,8 +73,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
@ -262,7 +192,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
r3,
r9,
kLRHasNotBeenSaved,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@ -486,7 +416,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@ -520,188 +450,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ tst(index, Operand(kSmiTagMask));
__ Check(eq, "Non-smi index");
__ tst(value, Operand(kSmiTagMask));
__ Check(eq, "Non-smi value");
__ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
__ cmp(index, ip);
__ Check(lt, "Index is too large");
__ cmp(index, Operand(Smi::FromInt(0)));
__ Check(ge, "Index is negative");
__ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
__ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(eq, "Unexpected string type");
}
__ add(ip,
string,
Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ SmiUntag(value, value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
// Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
__ strb(value, MemOperand(ip, index, LSR, 1));
} else {
// No need to untag a smi for two-byte addressing.
__ strh(value, MemOperand(ip, index));
}
}
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
ASSERT(!input.is(result));
ASSERT(!input.is(double_scratch1));
ASSERT(!input.is(double_scratch2));
ASSERT(!result.is(double_scratch1));
ASSERT(!result.is(double_scratch2));
ASSERT(!double_scratch1.is(double_scratch2));
ASSERT(!temp1.is(temp2));
ASSERT(!temp1.is(temp3));
ASSERT(!temp2.is(temp3));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
__ vldr(double_scratch1, ExpConstant(0, temp3));
__ vmov(result, kDoubleRegZero);
__ VFPCompareAndSetFlags(double_scratch1, input);
__ b(ge, &done);
__ vldr(double_scratch2, ExpConstant(1, temp3));
__ VFPCompareAndSetFlags(input, double_scratch2);
__ vldr(result, ExpConstant(2, temp3));
__ b(ge, &done);
__ vldr(double_scratch1, ExpConstant(3, temp3));
__ vldr(result, ExpConstant(4, temp3));
__ vmul(double_scratch1, double_scratch1, input);
__ vadd(double_scratch1, double_scratch1, result);
__ vmov(temp2, temp1, double_scratch1);
__ vsub(double_scratch1, double_scratch1, result);
__ vldr(result, ExpConstant(6, temp3));
__ vldr(double_scratch2, ExpConstant(5, temp3));
__ vmul(double_scratch1, double_scratch1, double_scratch2);
__ vsub(double_scratch1, double_scratch1, input);
__ vsub(result, result, double_scratch1);
__ vmul(input, double_scratch1, double_scratch1);
__ vmul(result, result, input);
__ mov(temp1, Operand(temp2, LSR, 11));
__ vldr(double_scratch2, ExpConstant(7, temp3));
__ vmul(result, result, double_scratch2);
__ vsub(result, result, double_scratch1);
__ vldr(double_scratch2, ExpConstant(8, temp3));
__ vadd(result, result, double_scratch2);
__ movw(ip, 0x7ff);
__ and_(temp2, temp2, Operand(ip));
__ add(temp1, temp1, Operand(0x3ff));
__ mov(temp1, Operand(temp1, LSL, 20));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
__ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
__ add(temp3, temp3, Operand(kPointerSize));
__ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
__ orr(temp1, temp1, temp2);
__ vmov(input, ip, temp1);
__ vmul(result, result, input);
__ bind(&done);
}
#undef __
// add(r0, pc, Operand(-8))
static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found in FUNCTIONS
static bool initialized = false;
static uint32_t sequence[kNoCodeAgeSequenceLength];
byte* byte_sequence = reinterpret_cast<byte*>(sequence);
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
initialized = true;
}
return byte_sequence;
}
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
bool result = !memcmp(sequence, young_sequence, young_length);
ASSERT(result ||
Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
return result;
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAge;
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
patcher.masm()->ldr(pc, MemOperand(pc, -4));
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
}
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

16
deps/v8/src/arm/codegen-arm.h

@ -88,22 +88,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
DoubleRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_

15
deps/v8/src/arm/constants-arm.h

@ -84,18 +84,9 @@ namespace v8 {
namespace internal {
// Constant pool marker.
// Use UDF, the permanently undefined instruction.
const int kConstantPoolMarkerMask = 0xfff000f0;
const int kConstantPoolMarker = 0xe7f000f0;
const int kConstantPoolLengthMaxMask = 0xffff;
inline int EncodeConstantPoolLength(int length) {
ASSERT((length & kConstantPoolLengthMaxMask) == length);
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
const int kConstantPoolMarkerMask = 0xffe00000;
const int kConstantPoolMarker = 0x0c000000;
const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;

22
deps/v8/src/arm/deoptimizer-arm.cc

@ -114,6 +114,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@ -122,21 +123,24 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// The call of the stack guard check has the following form:
// e1 5d 00 0c cmp sp, <limit>
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
if (FLAG_count_based_interrupts) {
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 5d 00 0c cmp sp, <limit>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
@ -173,9 +177,15 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
if (FLAG_count_based_interrupts) {
patcher.masm()->b(+16, pl);
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
} else {
patcher.masm()->b(+4, cs);
ASSERT_EQ(kBranchBeforeStackCheck,
Memory::int32_at(pc_after - 3 * kInstrSize));
}
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.

12
deps/v8/src/arm/disasm-arm.cc

@ -1098,7 +1098,6 @@ int Decoder::DecodeType7(Instruction* instr) {
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
// Dd = vmla(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@ -1161,12 +1160,6 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
@ -1395,7 +1388,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return DecodeConstantPoolLength(instruction_bits);
return instruction_bits & kConstantPoolLengthMask;
} else {
return -1;
}
@ -1417,7 +1410,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
DecodeConstantPoolLength(instruction_bits));
instruction_bits &
kConstantPoolLengthMask);
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {

184
deps/v8/src/arm/full-codegen-arm.cc

@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@ -164,19 +164,14 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
// Adjust fp to point to caller's fp.
__ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) {
@ -292,7 +287,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@ -347,13 +342,14 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
Comment cmnt(masm_, "[ Stack check");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
if (FLAG_count_based_interrupts) {
int weight = 1;
if (FLAG_weighted_back_edges) {
ASSERT(back_edge_target->is_bound());
@ -365,13 +361,23 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
__ b(pl, &ok);
InterruptStub stub;
__ CallStub(&stub);
} else {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
}
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordBackEdge(stmt->OsrEntryId());
RecordStackCheck(stmt->OsrEntryId());
if (FLAG_count_based_interrupts) {
EmitProfilingCounterReset();
}
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@ -433,8 +439,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
PredictableCodeSizeScope predictable(masm_);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@ -909,33 +914,34 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
ASSERT(variable->location() == Variable::CONTEXT);
ASSERT(variable->interface()->IsFrozen());
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
Handle<JSModule> instance = declaration->module()->interface()->Instance();
ASSERT(!instance.is_null());
switch (variable->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
__ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
__ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
globals_->Add(variable->name(), zone());
globals_->Add(instance, zone());
Visit(declaration->module());
break;
}
// Assign it.
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ mov(r1, Operand(instance));
__ str(r1, ContextOperand(cp, variable->index()));
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(cp,
Context::SlotOffset(variable->index()),
r1,
r3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse into body.
Visit(declaration->module());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
@ -978,14 +984,6 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
__ CallRuntime(Runtime::kDeclareModules, 1);
// Return value is ignored.
}
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@ -1240,7 +1238,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
EmitBackEdgeBookkeeping(stmt, &loop);
EmitStackCheck(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@ -1393,9 +1391,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET ||
local->mode() == CONST ||
local->mode() == CONST_HARMONY) {
if (local->mode() == CONST ||
local->mode() == CONST_HARMONY ||
local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@ -2376,7 +2374,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@ -3131,39 +3129,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT_EQ(3, args->length());
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
__ pop(r2);
__ pop(r1);
VisitForAccumulatorValue(args->at(0)); // string
static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
context()->Plug(r0);
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@ -3656,7 +3621,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
__ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
@ -3685,7 +3650,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@ -3723,10 +3688,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ add(result_pos,
result,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
__ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@ -3742,9 +3707,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
@ -3754,7 +3717,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
__ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
__ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@ -3774,9 +3737,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@ -3797,16 +3758,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ add(string,
separator,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
__ add(string,
string,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
@ -4111,8 +4070,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
__ mov(r1, r0);
__ mov(r0, Operand(Smi::FromInt(count_value)));
__ mov(r1, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
@ -4337,7 +4295,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = CompareIC::ComputeCondition(op);
Condition cond = eq;
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
cond = eq;
break;
case Token::LT:
cond = lt;
break;
case Token::GT:
cond = gt;
break;
case Token::LTE:
cond = le;
break;
case Token::GTE:
cond = ge;
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);

38
deps/v8/src/arm/ic-arm.cc

@ -1379,6 +1379,7 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,
@ -1698,15 +1699,36 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address cmp_instruction_address =
Assembler::return_address_from_call_start(address);
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
State previous_state = GetState();
State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
if (state == KNOWN_OBJECTS) {
stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
}
rewritten = stub.GetCode();
}
set_target(*rewritten);
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
return Assembler::IsCmpImmediate(instr);
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[CompareIC (%s->%s)#%s]\n",
GetStateName(previous_state),
GetStateName(state),
Token::Name(op_));
}
#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
}
}

235
deps/v8/src/arm/lithium-arm.cc

@ -177,7 +177,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@ -297,11 +296,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
void LMathExp::PrintDataTo(StringStream* stream) {
value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@ -378,27 +372,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
void LLoadKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
stream->Add(" + %d]", additional_index());
} else {
stream->Add("]");
}
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
stream->Add(" + %d] <-", additional_index());
} else {
stream->Add("] <- ");
}
value()->PrintTo(stream);
}
@ -1046,15 +1033,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathExp) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
@ -1130,11 +1108,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
LInstruction* LChunkBuilder::DoRor(HRor* instr) {
return DoShift(Token::ROR, instr);
}
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@ -1333,21 +1306,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
HAdd* add = HAdd::cast(instr->uses().value());
if (instr == add->left()) {
// This mul is the lhs of an add. The add and mul will be folded
// into a multiply-add.
return NULL;
}
if (instr == add->right() && !add->left()->IsMul()) {
// This mul is the rhs of an add, where the lhs is not another mul.
// The add and mul will be folded into a multiply-add.
return NULL;
}
}
return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@ -1358,12 +1318,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
if (instr->left()->IsConstant()) {
// If lhs is constant, do reverse subtraction instead.
return DoRSub(instr);
}
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@ -1380,32 +1334,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
// Note: The lhs of the subtraction becomes the rhs of the
// reverse-subtraction.
LOperand* left = UseRegisterAtStart(instr->right());
LOperand* right = UseOrConstantAtStart(instr->left());
LRSubI* rsb = new(zone()) LRSubI(left, right);
LInstruction* result = DefineAsRegister(rsb);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
return result;
}
LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
LOperand* multiplier_op = UseRegisterAtStart(mul->left());
LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
LOperand* addend_op = UseRegisterAtStart(addend);
return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
multiplicand_op));
}
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@ -1419,14 +1347,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
if (instr->left()->IsMul())
return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
if (instr->right()->IsMul()) {
ASSERT(!instr->left()->IsMul());
return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
}
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@ -1492,7 +1412,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
Representation r = instr->representation();
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@ -1646,16 +1566,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegister(instr->index());
LOperand* value = UseRegister(instr->value());
LSeqStringSetChar* result =
new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@ -1779,10 +1689,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp1 = TempRegister();
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(Define(result, temp1));
LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
}
@ -1950,23 +1860,35 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
ElementsKind elements_kind = instr->elements_kind();
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyed* result = NULL;
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}
if (!instr->is_external()) {
LOperand* obj = NULL;
if (instr->representation().IsDouble()) {
obj = UseTempRegister(instr->elements());
} else {
ASSERT(instr->representation().IsTagged());
obj = UseRegisterAtStart(instr->elements());
}
result = new(zone()) LLoadKeyed(obj, key);
} else {
LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
HLoadKeyedFastDoubleElement* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseTempRegister(instr->elements());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastDoubleElement* result =
new(zone()) LLoadKeyedFastDoubleElement(elements, key);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@ -1974,16 +1896,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
DefineAsRegister(result);
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
LLoadKeyedSpecializedArrayElement* result =
new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
bool can_deoptimize = instr->RequiresHoleCheck() ||
(elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
return can_deoptimize ? AssignEnvironment(result) : result;
return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
}
@ -1997,32 +1920,43 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
HStoreKeyedFastElement* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
LOperand* key = NULL;
LOperand* val = NULL;
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
val = UseTempRegister(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(instr->value()->representation().IsTagged());
object = UseTempRegister(instr->elements());
val = needs_write_barrier ? UseTempRegister(instr->value())
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* obj = UseTempRegister(instr->object());
LOperand* val = needs_write_barrier
? UseTempRegister(instr->value())
: UseRegisterAtStart(instr->value());
key = needs_write_barrier ? UseTempRegister(instr->key())
LOperand* key = needs_write_barrier
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
}
return new(zone()) LStoreKeyedFastElement(obj, key, val);
}
return new(zone()) LStoreKeyed(object, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
HStoreKeyedFastDoubleElement* instr) {
ASSERT(instr->value()->representation().IsDouble());
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@ -2030,15 +1964,22 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->elements()->representation().IsExternal());
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LOperand* external_pointer = UseRegister(instr->elements());
return new(zone()) LStoreKeyed(external_pointer, key, val);
LOperand* key = UseRegisterOrConstant(instr->key());
return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val);
}
@ -2261,7 +2202,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
for (int i = instr->values()->length() - 1; i >= 0; --i) {
for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);

213
deps/v8/src/arm/lithium-arm.h

@ -125,18 +125,18 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@ -150,7 +150,6 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@ -158,8 +157,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyed) \
V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@ -168,7 +169,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
@ -625,24 +625,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
// Instruction for computing multiplier * multiplicand + addend.
class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
public:
LMultiplyAddD(LOperand* addend, LOperand* multiplier,
LOperand* multiplicand) {
inputs_[0] = addend;
inputs_[1] = multiplier;
inputs_[2] = multiplicand;
}
LOperand* addend() { return inputs_[0]; }
LOperand* multiplier() { return inputs_[1]; }
LOperand* multiplicand() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
};
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@ -658,7 +640,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->representation().IsDouble();
return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@ -683,30 +665,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
class LMathExp: public LTemplateInstruction<1, 1, 3> {
public:
LMathExp(LOperand* value,
LOperand* double_temp,
LOperand* temp1,
LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = double_temp;
ExternalReference::InitializeMathExpData();
}
LOperand* value() { return inputs_[0]; }
LOperand* temp1() { return temps_[0]; }
LOperand* temp2() { return temps_[1]; }
LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
virtual void PrintDataTo(StringStream* stream);
};
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@ -1031,21 +989,6 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
class LRSubI: public LTemplateInstruction<1, 2, 0> {
public:
LRSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@ -1199,30 +1142,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
public:
LSeqStringSetChar(String::Encoding encoding,
LOperand* string,
LOperand* index,
LOperand* value) : encoding_(encoding) {
inputs_[0] = string;
inputs_[1] = index;
inputs_[2] = value;
}
String::Encoding encoding() { return encoding_; }
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
private:
String::Encoding encoding_;
};
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@ -1438,26 +1357,58 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyed(LOperand* elements, LOperand* key) {
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
bool is_external() const {
return hydrogen()->is_external();
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
"load-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -1971,28 +1922,51 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
bool is_external() const { return hydrogen()->is_external(); }
LOperand* elements() { return inputs_[0]; }
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastDoubleElement(LOperand* elements,
LOperand* key,
LOperand* value) {
inputs_[0] = elements;
inputs_[1] = key;
inputs_[2] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
"store-keyed-fast-double-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@ -2016,6 +1990,28 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* value) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = value;
}
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
@ -2138,7 +2134,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@ -2482,9 +2478,6 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);

563
deps/v8/src/arm/lithium-codegen-arm.cc

@ -146,20 +146,8 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
info()->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
// for code aging to work properly.
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
__ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
@ -234,30 +222,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
if (FLAG_code_comments) {
HValue* hydrogen = instr->hydrogen_value();
if (hydrogen != NULL) {
if (hydrogen->IsChange()) {
HValue* changed_value = HChange::cast(hydrogen)->value();
int use_id = 0;
const char* use_mnemo = "dead";
if (hydrogen->UseCount() >= 1) {
HValue* use_value = hydrogen->uses().value();
use_id = use_value->id();
use_mnemo = use_value->Mnemonic();
}
Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
current_instruction_, instr->Mnemonic(),
changed_value->id(), changed_value->Mnemonic(),
use_id, use_mnemo);
} else {
Comment(";;; @%d: %s. <#%d>", current_instruction_,
instr->Mnemonic(), hydrogen->id());
}
} else {
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
}
}
instr->CompileToNative(this);
}
}
@ -1324,18 +1289,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
DwVfpRegister addend = ToDoubleRegister(instr->addend());
DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
// This is computed in-place.
ASSERT(addend.is(ToDoubleRegister(instr->result())));
__ vmla(addend, multiplier, multiplicand);
}
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->left());
@ -1536,9 +1489,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// Mask the right_op operand.
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
case Token::ROR:
__ mov(result, Operand(left, ROR, scratch));
break;
case Token::SAR:
__ mov(result, Operand(left, ASR, scratch));
break;
@ -1562,13 +1512,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::ROR:
if (shift_count != 0) {
__ mov(result, Operand(left, ROR, shift_count));
} else {
__ Move(result, left);
}
break;
case Token::SAR:
if (shift_count != 0) {
__ mov(result, Operand(left, ASR, shift_count));
@ -1623,27 +1566,6 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
void LCodeGen::DoRSubI(LRSubI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
if (right->IsStackSlot() || right->IsArgument()) {
Register right_reg = EmitLoadRegister(right, ip);
__ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
__ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
DeoptimizeIf(vs, instr->environment());
}
}
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), Operand(instr->value()));
@ -1764,15 +1686,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
SeqStringSetCharGenerator::Generate(masm(),
instr->encoding(),
ToRegister(instr->string()),
ToRegister(instr->index()),
ToRegister(instr->value()));
}
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@ -2560,7 +2473,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(masm_);
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
@ -2624,7 +2537,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(masm_);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
@ -3005,87 +2918,50 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort("array index constant value too big.");
}
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
key = ToRegister(instr->key());
Register key = EmitLoadRegister(instr->key(), scratch0());
// Even though the HLoadKeyedFastElement instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ ldr(result, FieldMemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ tst(result, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(result, mem_operand);
break;
case EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(result, mem_operand);
break;
case EXTERNAL_INT_ELEMENTS:
__ ldr(result, mem_operand);
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr->environment());
}
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr->environment());
}
}
}
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFastDoubleElement(
LLoadKeyedFastDoubleElement* instr) {
Register elements = ToRegister(instr->elements());
bool key_is_constant = instr->key()->IsConstantOperand();
Register key = no_reg;
@ -3117,65 +2993,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
(instr->additional_index() << element_size_shift)));
}
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
}
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
Register key = EmitLoadRegister(instr->key(), scratch0());
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ ldr(result, FieldMemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ tst(result, Operand(kSmiTagMask));
DeoptimizeIf(ne, instr->environment());
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
DeoptimizeIf(eq, instr->environment());
}
}
}
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
if (instr->is_external()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
} else {
DoLoadKeyedFixedArray(instr);
}
__ vldr(result, elements, 0);
}
@ -3215,6 +3039,87 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
int additional_offset = instr->additional_index() << element_size_shift;
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, result.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
instr->additional_index(), additional_offset);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(result, mem_operand);
break;
case EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(result, mem_operand);
break;
case EXTERNAL_INT_ELEMENTS:
__ ldr(result, mem_operand);
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
DeoptimizeIf(cs, instr->environment());
}
break;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@ -3820,20 +3725,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
void LCodeGen::DoMathExp(LMathExp* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
DoubleRegister double_scratch2 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
MathExpGenerator::EmitMathExp(
masm(), input, result, double_scratch1, double_scratch2,
temp1, temp2, scratch0());
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@ -4109,8 +4000,102 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
Register external_pointer = ToRegister(instr->elements());
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
// Even though the HLoadKeyedFastElement instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ str(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ add(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
}
void LCodeGen::DoStoreKeyedFastDoubleElement(
LStoreKeyedFastDoubleElement* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
Register scratch = scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
if (!key_is_constant) {
__ add(scratch, scratch,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
// Only load canonical NaN if the comparison above set the overflow.
__ Vmov(value,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
no_reg, vs);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@ -4179,110 +4164,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
Register scratch = scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
// Calculate the effective address of the slot in the array to store the
// double value.
if (key_is_constant) {
constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort("array index constant value too big.");
}
} else {
key = ToRegister(instr->key());
}
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
? (element_size_shift - kSmiTagSize) : element_size_shift;
Operand operand = key_is_constant
? Operand((constant_key << element_size_shift) +
FixedDoubleArray::kHeaderSize - kHeapObjectTag)
: Operand(key, LSL, shift_size);
__ add(scratch, elements, operand);
if (!key_is_constant) {
__ add(scratch, scratch,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
}
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
// Only load canonical NaN if the comparison above set the overflow.
__ Vmov(value,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
no_reg, vs);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
: no_reg;
Register scratch = scratch0();
Register store_base = scratch;
int offset = 0;
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
instr->additional_index());
store_base = elements;
} else {
// Even though the HLoadKeyed instruction forces the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ add(scratch, elements,
Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
__ str(value, FieldMemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
__ add(key, store_base, Operand(offset - kHeapObjectTag));
__ RecordWrite(elements,
key,
value,
kLRHasBeenSaved,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
// By cases: external, fast double
if (instr->is_external()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
} else {
DoStoreKeyedFixedArray(instr);
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
@ -4740,6 +4621,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@ -4761,7 +4643,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ EmitECMATruncate(input_reg,
double_scratch2,
double_scratch,
single_scratch,
scratch1,
scratch2,
scratch3);
@ -4843,19 +4725,20 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
DwVfpRegister double_scratch = double_scratch0();
Label done;
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
SwVfpRegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
double_scratch,
single_scratch,
scratch1,
scratch2,
scratch3);
} else {
DwVfpRegister double_scratch = double_scratch0();
__ EmitVFPTruncate(kRoundToMinusInf,
result_reg,
double_input,
@ -5029,7 +4912,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
@ -5054,6 +4936,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
DoCheckMapCommon(temp1, temp2,
Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
DeoptimizeIf(ne, instr->environment());
}
@ -5664,7 +5547,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(masm_);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);

6
deps/v8/src/arm/lithium-codegen-arm.h

@ -377,12 +377,6 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr);
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;

86
deps/v8/src/arm/macro-assembler-arm.cc

@ -422,17 +422,6 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
!Heap::RootCanBeWrittenAfterInitialization(index) &&
!predictable_code_size()) {
Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
if (!isolate()->heap()->InNewSpace(*root)) {
// The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value.
mov(destination, Operand(root), LeaveCC, cond);
return;
}
}
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@ -1787,10 +1776,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@ -1956,13 +1945,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail,
int elements_offset) {
Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@ -1989,10 +1978,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(
scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
sizeof(kHoleNanLower32);
str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@ -2013,8 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
@ -2223,28 +2209,12 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(0, r0);
CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
PopSafepointRegisters();
}
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
stub.GenerateCall(this, function);
if (FLAG_log_timer_events) {
FrameScope frame(this, StackFrame::MANUAL);
PushSafepointRegisters();
PrepareCallCFunction(0, r0);
CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
PopSafepointRegisters();
}
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@ -2490,20 +2460,6 @@ void MacroAssembler::ConvertToInt32(Register source,
}
void MacroAssembler::TryFastDoubleToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Label* done) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
b(eq, done);
}
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register result,
DwVfpRegister double_input,
@ -2519,7 +2475,11 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
TryFastDoubleToInt32(result, double_input, double_scratch, &done);
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
b(eq, &done);
// Convert to integer, respecting rounding mode.
int32_t check_inexact_conversion =
@ -2636,7 +2596,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
void MacroAssembler::EmitECMATruncate(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
SwVfpRegister single_scratch,
Register scratch,
Register input_high,
Register input_low) {
@ -2647,18 +2607,16 @@ void MacroAssembler::EmitECMATruncate(Register result,
ASSERT(!scratch.is(result) &&
!scratch.is(input_high) &&
!scratch.is(input_low));
ASSERT(!double_input.is(double_scratch));
ASSERT(!single_scratch.is(double_input.low()) &&
!single_scratch.is(double_input.high()));
Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
TryFastDoubleToInt32(result, double_input, double_scratch, &done);
// Clear cumulative exception flags.
ClearFPSCRBits(kVFPExceptionMask, scratch);
// Try a conversion to a signed integer.
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_s32_f64(single_scratch, double_input);
vmov(result, single_scratch);
// Retrieve he FPSCR.
vmrs(scratch);
// Check for overflow and NaNs.
@ -3370,10 +3328,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
kStringRepresentationMask;
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
@ -3387,10 +3343,8 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
kStringRepresentationMask;
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
@ -3730,7 +3684,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));

30
deps/v8/src/arm/macro-assembler-arm.h

@ -322,7 +322,6 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@ -832,14 +831,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* fail,
int elements_offset = 0);
Label* fail);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@ -894,15 +893,12 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
// Returns a condition that will be enabled if the object was a string
// and the passed-in condition passed. If the passed-in condition failed
// then flags remain unchanged.
// Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj,
Register type,
Condition cond = al) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
tst(type, Operand(kIsNotStringMask), cond);
Register type) {
ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
tst(type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
return eq;
}
@ -959,14 +955,6 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
// Try to convert a double to a signed 32-bit integer. If the double value
// can be exactly represented as an integer, the code jumps to 'done' and
// 'result' contains the integer value. Otherwise, the code falls through.
void TryFastDoubleToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Label* done);
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// Clears the z flag (ne condition) if an overflow occurs.
@ -997,7 +985,7 @@ class MacroAssembler: public Assembler {
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
SwVfpRegister single_scratch,
Register scratch,
Register scratch2,
Register scratch3);
@ -1214,7 +1202,7 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump if the register contains a smi.
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);

4
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -1150,7 +1150,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
bool is_ascii = subject->IsOneByteRepresentationUnderneath();
bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@ -1181,7 +1181,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).

112
deps/v8/src/arm/simulator-arm.cc

@ -1387,14 +1387,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
UNIMPLEMENTED();
break;
}
@ -1466,14 +1459,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
if (shift_amount == 0) {
*carry_out = c_flag_;
} else {
uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
result = right | left;
*carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
}
UNIMPLEMENTED();
break;
}
@ -2778,20 +2764,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
// vmla
if (instr->SzValue() != 0x1) {
UNREACHABLE(); // Not used by V8.
}
double dd_value = get_double_from_d_register(vd);
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
// Note: we do the mul and add in separate steps to avoid getting a result
// with too high precision.
set_d_register_from_double(vd, dn_value * dm_value);
set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value);
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@ -3301,7 +3273,33 @@ void Simulator::Execute() {
}
void Simulator::CallInternal(byte* entry) {
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@ -3355,37 +3353,6 @@ void Simulator::CallInternal(byte* entry) {
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
}
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
set_register(r3, va_arg(parameters, int32_t));
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
// Store remaining arguments on stack, from low to high memory.
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
for (int i = 4; i < argument_count; i++) {
stack_argument[i - 4] = va_arg(parameters, int32_t);
}
va_end(parameters);
set_register(sp, entry_stack);
CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@ -3396,27 +3363,6 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
double Simulator::CallFP(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
memcpy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
memcpy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
return get_double_from_register_pair(0);
}
}
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);

4
deps/v8/src/arm/simulator-arm.h

@ -205,8 +205,6 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Alternative: call a 2-argument double function.
double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@ -358,8 +356,6 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
void CallInternal(byte* entry);
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q

122
deps/v8/src/arm/stub-cache-arm.cc

@ -327,24 +327,19 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
PropertyIndex index) {
if (index.is_header_index()) {
int offset = index.header_index() * kPointerSize;
__ ldr(dst, FieldMemOperand(src, offset));
} else {
int index) {
// Adjust for the number of properties stored in the holder.
int slot = index.field_index() - holder->map()->inobject_properties();
if (slot < 0) {
index -= holder->map()->inobject_properties();
if (index < 0) {
// Get the property straight out of the holder.
int offset = holder->map()->instance_size() + (slot * kPointerSize);
int offset = holder->map()->instance_size() + (index * kPointerSize);
__ ldr(dst, FieldMemOperand(src, offset));
} else {
// Calculate the offset into the properties array.
int offset = slot * kPointerSize + FixedArray::kHeaderSize;
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
__ ldr(dst, FieldMemOperand(dst, offset));
}
}
}
@ -1201,7 +1196,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
PropertyIndex index,
int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@ -1550,7 +1545,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
@ -1623,7 +1618,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
Label attempt_to_grow_elements, with_write_barrier, check_double;
Label attempt_to_grow_elements;
Register elements = r6;
Register end_elements = r5;
@ -1634,9 +1629,10 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
&check_double,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
@ -1651,6 +1647,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@ -1670,40 +1667,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
__ bind(&check_double);
// Check that the elements are in fast mode and writable.
__ CheckMap(elements,
r0,
Heap::kFixedDoubleArrayMapRootIndex,
&call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ add(r0, r0, Operand(Smi::FromInt(argc)));
// Get the elements' length.
__ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ cmp(r0, r4);
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
r4, r0, elements, r3, r5, r2, r9,
&call_builtin, argc * kDoubleSize);
// Save new length.
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
// Check for a smi.
__ Drop(argc + 1);
__ Ret();
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@ -1715,11 +1678,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
__ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r7, ip);
__ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
@ -2954,7 +2912,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
PropertyIndex index,
int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@ -3143,7 +3101,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
PropertyIndex index) {
int index) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@ -3509,13 +3467,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// r1: constructor function
// r2: initial map
// r7: undefined
ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
#ifdef DEBUG
int instance_size = function->initial_map()->instance_size();
__ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
__ Check(eq, "Instance size of initial map changed.");
#endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@ -3573,6 +3525,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@ -3856,20 +3809,20 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
Register dst_mantissa = r1;
Register dst_exponent = r3;
Register dst1 = r1;
Register dst2 = r3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
d0,
dst_mantissa,
dst_exponent,
dst1,
dst2,
r9,
s0);
__ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@ -4138,7 +4091,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2);
@ -4197,7 +4150,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
__ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
@ -4690,12 +4643,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
// -- r3 : scratch (elements backing store)
// -- r3 : scratch
// -- r4 : scratch
// -- r5 : scratch
// -- r6 : scratch
// -- r7 : scratch
// -- r9 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@ -4708,7 +4658,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
Register scratch5 = r9;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@ -4739,6 +4688,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@ -4787,7 +4737,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
// Initialize the new FixedDoubleArray.
// Initialize the new FixedDoubleArray. Leave elements unitialized for
// efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
@ -4795,25 +4746,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
// All registers after this are overwritten.
scratch1,
scratch2,
scratch3,
scratch4,
scratch5,
&transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
int offset = FixedDoubleArray::OffsetOfElementAt(i);
__ str(scratch1, FieldMemOperand(elements_reg, offset));
__ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
}
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@ -4826,7 +4758,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ Ret();
__ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.

18
deps/v8/src/array.js

@ -413,7 +413,6 @@ function ArrayJoin(separator) {
["Array.prototype.join"]);
}
var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@ -423,7 +422,7 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
return Join(this, length, separator, ConvertToString);
return Join(this, TO_UINT32(this.length), separator, ConvertToString);
}
@ -442,8 +441,8 @@ function ArrayPop() {
}
n--;
var value = this[n];
delete this[n];
this.length = n;
delete this[n];
return value;
}
@ -582,7 +581,7 @@ function ArrayShift() {
var first = this[0];
if (IS_ARRAY(this) && !%IsObserved(this)) {
if (IS_ARRAY(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@ -603,7 +602,7 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this) && !%IsObserved(this)) {
if (IS_ARRAY(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@ -650,7 +649,6 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this) &&
!%IsObserved(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
@ -707,9 +705,7 @@ function ArraySplice(start, delete_count) {
var use_simple_splice = true;
if (IS_ARRAY(this) &&
!%IsObserved(this) &&
num_additional_args !== del_count) {
if (IS_ARRAY(this) && num_additional_args !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@ -1553,11 +1549,9 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"splice", getFunction("splice", ArraySplice)
"push", getFunction("push", ArrayPush)
));
}

187
deps/v8/src/assembler.cc

@ -103,78 +103,15 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
static bool math_exp_data_initialized = false;
static Mutex* math_exp_data_mutex = NULL;
static double* math_exp_constants_array = NULL;
static double* math_exp_log_table_array = NULL;
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
jit_cookie_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false) {
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate->assembler_spare_buffer() != NULL) {
buffer = isolate->assembler_spare_buffer();
isolate->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
own_buffer_ = false;
}
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
pc_ = buffer_;
}
AssemblerBase::~AssemblerBase() {
if (own_buffer_) {
if (isolate() != NULL &&
isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
}
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
expected_size_(expected_size),
start_offset_(assembler->pc_offset()),
old_value_(assembler->predictable_code_size()) {
assembler_->set_predictable_code_size(true);
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
}
assembler_->set_predictable_code_size(old_value_);
}
@ -376,7 +313,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
@ -634,15 +570,6 @@ void RelocIterator::next() {
}
}
}
if (code_age_sequence_ != NULL) {
byte* old_code_age_sequence = code_age_sequence_;
code_age_sequence_ = NULL;
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
rinfo_.data_ = 0;
rinfo_.pc_ = old_code_age_sequence;
return;
}
}
done_ = true;
}
@ -658,12 +585,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
byte* sequence = code->FindCodeAgeSequence();
if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
code_age_sequence_ = sequence;
} else {
code_age_sequence_ = NULL;
}
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -679,7 +600,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -732,8 +652,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE();
#endif
return "debug break slot";
case RelocInfo::CODE_AGE_SEQUENCE:
return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@ -821,9 +739,6 @@ void RelocInfo::Verify() {
case NUMBER_OF_MODES:
UNREACHABLE();
break;
case CODE_AGE_SEQUENCE:
ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
break;
}
}
#endif // VERIFY_HEAP
@ -841,70 +756,6 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
math_exp_data_mutex = OS::CreateMutex();
}
void ExternalReference::InitializeMathExpData() {
// Early return?
if (math_exp_data_initialized) return;
math_exp_data_mutex->Lock();
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
const int kTableSize = 1 << kTableSizeBits;
const double kTableSizeDouble = static_cast<double>(kTableSize);
math_exp_constants_array = new double[9];
// Input values smaller than this always return 0.
math_exp_constants_array[0] = -708.39641853226408;
// Input values larger than this always return +Infinity.
math_exp_constants_array[1] = 709.78271289338397;
math_exp_constants_array[2] = V8_INFINITY;
// The rest is black magic. Do not attempt to understand it. It is
// loosely based on the "expd" function published at:
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
const double constant3 = (1 << kTableSizeBits) / log(2.0);
math_exp_constants_array[3] = constant3;
math_exp_constants_array[4] =
static_cast<double>(static_cast<int64_t>(3) << 51);
math_exp_constants_array[5] = 1 / constant3;
math_exp_constants_array[6] = 3.0000000027955394;
math_exp_constants_array[7] = 0.16666666685227835;
math_exp_constants_array[8] = 1;
math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) {
double value = pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits);
// <just testing>
uint64_t doublebits;
memcpy(&doublebits, &value, sizeof doublebits);
doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa2;
memcpy(&mantissa2, &doublebits, sizeof mantissa2);
CHECK_EQ(mantissa, mantissa2);
// </just testing>
math_exp_log_table_array[i] = mantissa;
}
math_exp_data_initialized = true;
}
math_exp_data_mutex->Unlock();
}
void ExternalReference::TearDownMathExpData() {
delete[] math_exp_constants_array;
delete[] math_exp_log_table_array;
delete math_exp_data_mutex;
}
@ -1023,13 +874,6 @@ ExternalReference ExternalReference::get_date_field_function(
}
ExternalReference ExternalReference::get_make_code_young_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
}
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@ -1056,20 +900,6 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
}
ExternalReference ExternalReference::log_leave_external_function(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
}
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@ -1356,19 +1186,6 @@ ExternalReference ExternalReference::math_log_double_function(
}
ExternalReference ExternalReference::math_exp_constants(int constant_index) {
ASSERT(math_exp_data_initialized);
return ExternalReference(
reinterpret_cast<void*>(math_exp_constants_array + constant_index));
}
ExternalReference ExternalReference::math_exp_log_table() {
ASSERT(math_exp_data_initialized);
return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
}
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);

75
deps/v8/src/assembler.h

@ -56,56 +56,18 @@ struct StatsCounter;
class AssemblerBase: public Malloced {
public:
AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
virtual ~AssemblerBase();
explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
int jit_cookie() const { return jit_cookie_; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
int jit_cookie() { return jit_cookie_; }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
static const int kMinimalBufferSize = 4*KB;
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
byte* buffer_;
int buffer_size_;
bool own_buffer_;
// The program counter, which points into the buffer above and moves forward.
byte* pc_;
private:
Isolate* isolate_;
int jit_cookie_;
bool emit_debug_code_;
bool predictable_code_size_;
};
// Avoids using instructions that vary in size in unpredictable ways between the
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
private:
AssemblerBase* assembler_;
int expected_size_;
int start_offset_;
bool old_value_;
};
@ -249,12 +211,6 @@ class RelocInfo BASE_EMBEDDED {
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = CONST_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
@ -269,15 +225,6 @@ class RelocInfo BASE_EMBEDDED {
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsPseudoRelocMode(Mode mode) {
ASSERT(!IsRealRelocMode(mode));
return mode >= FIRST_PSEUDO_RELOC_MODE &&
mode <= LAST_PSEUDO_RELOC_MODE;
}
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@ -315,9 +262,6 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@ -350,8 +294,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
INLINE(Code* code_age_stub());
INLINE(void set_code_age_stub(Code* stub));
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@ -544,7 +487,6 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@ -604,8 +546,6 @@ class ExternalReference BASE_EMBEDDED {
};
static void SetUp();
static void InitializeMathExpData();
static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
@ -655,16 +595,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
static ExternalReference get_make_code_young_function(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@ -731,9 +665,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
static ExternalReference math_exp_constants(int constant_index);
static ExternalReference math_exp_log_table();
static ExternalReference page_flags(Page* page);
Address address() const {return reinterpret_cast<Address>(address_);}

20
deps/v8/src/ast.cc

@ -103,7 +103,6 @@ VariableProxy::VariableProxy(Isolate* isolate,
void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind
ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
@ -477,7 +476,6 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) {
@ -606,6 +604,18 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->CompareType(this);
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
} else {
ASSERT(compare_type_ == NONE);
}
}
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this)
@ -1060,14 +1070,16 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
// We currently do not optimize any modules.
// We currently do not optimize any modules. Note in particular, that module
// instance objects associated with ModuleLiterals are allocated during
// scope resolution, and references to them are embedded into the code.
// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)

38
deps/v8/src/ast.h

@ -75,7 +75,6 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \
V(Block) \
V(ModuleStatement) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@ -523,7 +522,7 @@ class ModuleDeclaration: public Declaration {
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
: Declaration(proxy, MODULE, scope),
: Declaration(proxy, LET, scope),
module_(module) {
}
@ -646,25 +645,6 @@ class ModuleUrl: public Module {
};
class ModuleStatement: public Statement {
public:
DECLARE_NODE_TYPE(ModuleStatement)
VariableProxy* proxy() const { return proxy_; }
Block* body() const { return body_; }
protected:
ModuleStatement(VariableProxy* proxy, Block* body)
: proxy_(proxy),
body_(body) {
}
private:
VariableProxy* proxy_;
Block* body_;
};
class IterationStatement: public BreakableStatement {
public:
// Type testing & conversion.
@ -1437,7 +1417,7 @@ class VariableProxy: public Expression {
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var. Interfaces must match.
// Bind this proxy to the variable var.
void BindTo(Variable* var);
protected:
@ -1797,6 +1777,9 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@ -1813,7 +1796,8 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
pos_(pos) {
pos_(pos),
compare_type_(NONE) {
ASSERT(Token::IsCompareOp(op));
}
@ -1822,6 +1806,9 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
CompareTypeFeedback compare_type_;
};
@ -2660,11 +2647,6 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
VISIT_AND_RETURN(ModuleStatement, stmt)
}
ExpressionStatement* NewExpressionStatement(Expression* expression) {
ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
VISIT_AND_RETURN(ExpressionStatement, stmt)

4
deps/v8/src/atomicops.h

@ -151,9 +151,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal
// Include our platform specific implementation.
#if defined(THREAD_SANITIZER)
#include "atomicops_internals_tsan.h"
#elif defined(_MSC_VER) && \
#if defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \

335
deps/v8/src/atomicops_internals_tsan.h

@ -1,335 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation for compiler-based
// ThreadSanitizer. Use base/atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
#define V8_ATOMICOPS_INTERNALS_TSAN_H_
// This struct is not part of the public API of this module; clients may not
// use it. (However, it's exported via BASE_EXPORT because clients implicitly
// do use it at link time by inlining these functions.)
// Features of this x86. Values may not be correct before main() is run,
// but are set conservatively.
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
bool has_sse2; // Processor has SSE2.
};
extern struct AtomicOps_x86CPUFeatureStruct
AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
#ifdef __cplusplus
extern "C" {
#endif
typedef char __tsan_atomic8;
typedef short __tsan_atomic16; // NOLINT
typedef int __tsan_atomic32;
typedef long __tsan_atomic64; // NOLINT
typedef enum {
__tsan_memory_order_relaxed = (1 << 0) + 100500,
__tsan_memory_order_consume = (1 << 1) + 100500,
__tsan_memory_order_acquire = (1 << 2) + 100500,
__tsan_memory_order_release = (1 << 3) + 100500,
__tsan_memory_order_acq_rel = (1 << 4) + 100500,
__tsan_memory_order_seq_cst = (1 << 5) + 100500,
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
__tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
__tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
__tsan_memory_order mo);
void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
__tsan_memory_order mo);
void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
__tsan_memory_order mo);
void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
__tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
__tsan_atomic8 v, __tsan_memory_order mo);
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
__tsan_atomic16 v, __tsan_memory_order mo);
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
__tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
__tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
__tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
__tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
__tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_acquire);
}
inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
return __tsan_atomic32_exchange(ptr, new_value,
__tsan_memory_order_release);
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return increment + __tsan_atomic32_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 cmp = old_value;
__tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_relaxed);
return cmp;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
}
inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_relaxed);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return increment + __tsan_atomic64_fetch_add(ptr, increment,
__tsan_memory_order_acq_rel);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
__tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_acquire);
return cmp;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 cmp = old_value;
__tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
__tsan_memory_order_release);
return cmp;
}
inline void MemoryBarrier() {
__tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
}
} // namespace internal
} // namespace v8
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_

23
deps/v8/src/bootstrapper.cc

@ -1084,11 +1084,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@ -1186,7 +1186,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField());
ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@ -1240,9 +1240,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the out of memory slot.
native_context()->set_out_of_memory(heap->false_value());
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
native_context()->set_embedder_data(*embedder_data);
// Initialize the data slot.
native_context()->set_data(heap->undefined_value());
{
// Initialize the random seed slot.
@ -1341,7 +1340,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsOneByteRepresentation());
ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
@ -1416,11 +1415,6 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
if (FLAG_harmony_observation) {
INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
observers_deliver_changes);
}
}
#undef INSTALL_NATIVE
@ -1834,11 +1828,6 @@ bool Genesis::InstallExperimentalNatives() {
"native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_observation &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native object-observe.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
InstallExperimentalNativeFunctions();

2
deps/v8/src/bootstrapper.h

@ -54,7 +54,7 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));

592
deps/v8/src/builtins.cc

@ -268,7 +268,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
}
FixedArrayBase* elms;
if (!maybe_elms->To(&elms)) return maybe_elms;
if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
// Fill in the content
switch (array->GetElementsKind()) {
@ -325,18 +325,6 @@ BUILTIN(ArrayCodeGeneric) {
}
static void MoveDoubleElements(FixedDoubleArray* dst,
int dst_index,
FixedDoubleArray* src,
int src_index,
int len) {
if (len == 0) return;
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kDoubleSize);
}
static void MoveElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
@ -363,39 +351,24 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
}
static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
for (int i = from; i < to; i++) {
dst->set_the_hole(i);
}
}
static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
FixedArrayBase* elms,
static FixedArray* LeftTrimFixedArray(Heap* heap,
FixedArray* elms,
int to_trim) {
Map* map = elms->map();
int entry_size;
if (elms->IsFixedArray()) {
entry_size = kPointerSize;
} else {
entry_size = kDoubleSize;
}
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
elms->IsFixedArray() &&
if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@ -409,15 +382,14 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
int new_start_index = to_trim * (entry_size / kPointerSize);
former_start[new_start_index] = map;
former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
former_start[to_trim] = heap->fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
int size_delta = to_trim * entry_size;
int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
@ -425,8 +397,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
return FixedArray::cast(HeapObject::FromAddress(
elms->address() + to_trim * kPointerSize));
}
@ -455,14 +427,19 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
if (array->HasFastDoubleElements()) {
ASSERT(elms == heap->empty_fixed_array());
MaybeObject* maybe_transition =
array->TransitionElementsKind(FAST_ELEMENTS);
if (maybe_transition->IsFailure()) return maybe_transition;
return elms;
}
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
!maybe_writable_result->To(&elms)) {
maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
} else if (map == heap->fixed_double_array_map()) {
if (args == NULL) return elms;
} else {
return NULL;
}
@ -472,28 +449,13 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
ElementsKind origin_kind = array->map()->elements_kind();
ASSERT(!IsFastObjectElementsKind(origin_kind));
ElementsKind target_kind = origin_kind;
int arg_count = args->length() - first_added_arg;
Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
for (int i = 0; i < arg_count; i++) {
Object* arg = arguments[i];
if (arg->IsHeapObject()) {
if (arg->IsHeapNumber()) {
target_kind = FAST_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_ELEMENTS;
break;
}
}
}
if (target_kind != origin_kind) {
MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
if (maybe_failure->IsFailure()) return maybe_failure;
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
args_length - first_added_arg,
DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
return elms;
}
@ -537,24 +499,16 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
if (maybe_elms_obj == NULL) {
return CallJsBuiltin(isolate, "ArrayPush", args);
}
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (FLAG_harmony_observation &&
JSObject::cast(receiver)->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayPush", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
JSArray* array = JSArray::cast(receiver);
ElementsKind kind = array->GetElementsKind();
if (IsFastSmiOrObjectElementsKind(kind)) {
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -570,16 +524,15 @@ BUILTIN(ArrayPush) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
FixedArray* new_elms;
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, 0, new_elms, kind, 0,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
}
@ -598,139 +551,75 @@ BUILTIN(ArrayPush) {
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
} else {
int len = Smi::cast(array->length())->value();
int elms_len = elms_obj->length();
int to_add = args.length() - 1;
if (to_add == 0) {
return Smi::FromInt(len);
}
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT(to_add <= (Smi::kMaxValue - len));
int new_length = len + to_add;
FixedDoubleArray* new_elms;
if (new_length > elms_len) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
MaybeObject* maybe_obj =
heap->AllocateUninitializedFixedDoubleArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, 0, new_elms, kind, 0,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
} else {
// to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
// empty_fixed_array.
new_elms = FixedDoubleArray::cast(elms_obj);
}
// Add the provided values.
AssertNoAllocation no_gc;
int index;
for (index = 0; index < to_add; index++) {
Object* arg = args[index + 1];
new_elms->set(index + len, arg->Number());
}
if (new_elms != array->elements()) {
array->set_elements(new_elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
return Smi::FromInt(new_length);
}
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms =
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms->To(&elms_obj)) return maybe_elms;
JSArray* array = JSArray::cast(receiver);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayPop", args);
if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
ElementsAccessor* accessor = array->GetElementsAccessor();
int new_length = len - 1;
MaybeObject* maybe_result;
if (accessor->HasElement(array, array, new_length, elms_obj)) {
maybe_result = accessor->Get(array, array, new_length, elms_obj);
} else {
maybe_result = array->GetPrototype()->GetElement(len - 1);
// Get top element
MaybeObject* top = elms->get(len - 1);
// Set the length.
array->set_length(Smi::FromInt(len - 1));
if (!top->IsTheHole()) {
// Delete the top element.
elms->set_the_hole(len - 1);
return top;
}
if (maybe_result->IsFailure()) return maybe_result;
MaybeObject* maybe_failure =
accessor->SetLength(array, Smi::FromInt(new_length));
if (maybe_failure->IsFailure()) return maybe_failure;
return maybe_result;
top = array->GetPrototype()->GetElement(len - 1);
return top;
}
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayShift", args);
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get first element
ElementsAccessor* accessor = array->GetElementsAccessor();
Object* first;
MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
if (!maybe_first->To(&first)) return maybe_first;
Object* first = elms->get(0);
if (first->IsTheHole()) {
first = heap->undefined_value();
}
if (!heap->lo_space()->Contains(elms_obj)) {
array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
if (!heap->lo_space()->Contains(elms)) {
array->set_elements(LeftTrimFixedArray(heap, elms, 1));
} else {
// Shift the elements.
if (elms_obj->IsFixedArray()) {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
elms->set(len - 1, heap->the_hole_value());
} else {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, 0, elms, 1, len - 1);
elms->set_the_hole(len - 1);
}
}
// Set the length.
@ -743,25 +632,19 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms_obj =
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastSmiOrObjectElements()) {
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@ -778,18 +661,14 @@ BUILTIN(ArrayUnshift) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
FixedArray* new_elms;
MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_elms->To(&new_elms)) return maybe_elms;
Object* obj;
{ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, 0, new_elms, kind, to_add,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
@ -813,20 +692,16 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms;
FixedArray* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
if (array->HasFastElements()) {
elms = array->elements();
} else {
if (!array->HasFastSmiOrObjectElements() ||
!IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@ -835,19 +710,15 @@ BUILTIN(ArraySlice) {
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject() &&
JSObject::cast(receiver)->map() == arguments_map;
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
JSObject* object = JSObject::cast(receiver);
if (object->HasFastElements()) {
elms = object->elements();
} else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
Object* len_obj = JSObject::cast(receiver)
->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -855,27 +726,12 @@ BUILTIN(ArraySlice) {
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
JSObject* object = JSObject::cast(receiver);
ElementsKind kind = object->GetElementsKind();
if (IsHoleyElementsKind(kind)) {
bool packed = true;
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = 0; i < len; i++) {
if (!accessor->HasElement(object, object, i, elms)) {
packed = false;
break;
}
}
if (packed) {
kind = GetPackedElementsKind(kind);
} else if (!receiver->IsJSArray()) {
if (elms->get(i) == heap->the_hole_value()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
}
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@ -888,12 +744,6 @@ BUILTIN(ArraySlice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
double start = HeapNumber::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -901,12 +751,6 @@ BUILTIN(ArraySlice) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
} else if (arg2->IsHeapNumber()) {
double end = HeapNumber::cast(arg2)->value();
if (end < kMinInt || end > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
relative_end = static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@ -921,24 +765,21 @@ BUILTIN(ArraySlice) {
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
// Calculate the length of result array.
int result_len = Max(final - k, 0);
JSArray* result_array;
MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
result_len);
AssertNoAllocation no_gc;
if (result_len == 0) return maybe_array;
JSArray* result_array;
if (!maybe_array->To(&result_array)) return maybe_array;
ElementsAccessor* accessor = object->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(NULL, k, result_array->elements(),
kind, 0, result_len, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
CopyObjectToObjectElements(elms, elements_kind, k,
FixedArray::cast(result_array->elements()),
elements_kind, 0, result_len);
return result_array;
}
@ -947,22 +788,19 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
FixedArrayBase* elms_obj;
MaybeObject* maybe_elms =
Object* elms_obj;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
if (maybe_elms == NULL) {
if (maybe_elms_obj == NULL)
return CallJsBuiltin(isolate, "ArraySplice", args);
if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
if (!maybe_elms->To(&elms_obj)) return maybe_elms;
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
if (FLAG_harmony_observation && array->map()->is_observed()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
@ -973,12 +811,6 @@ BUILTIN(ArraySplice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
} else if (arg1->IsHeapNumber()) {
double start = HeapNumber::cast(arg1)->value();
if (start < kMinInt || start > kMaxInt) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@ -1008,74 +840,43 @@ BUILTIN(ArraySplice) {
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
ElementsKind elements_kind = array->GetElementsKind();
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
// For double mode we do not support changing the length.
if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
if (new_length == 0) {
MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
elms_obj, elements_kind, actual_delete_count);
if (maybe_array->IsFailure()) return maybe_array;
array->set_elements(heap->empty_fixed_array());
array->set_length(Smi::FromInt(0));
return maybe_array;
}
JSArray* result_array = NULL;
ElementsKind elements_kind =
JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
if (actual_delete_count > 0) {
AssertNoAllocation no_gc;
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(NULL, actual_start, result_array->elements(),
elements_kind, 0, actual_delete_count, elms_obj);
// Cannot fail since the origin and target array are of the same elements
// kind.
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
{
// Fill newly created array.
CopyObjectToObjectElements(elms, elements_kind, actual_start,
FixedArray::cast(result_array->elements()),
elements_kind, 0, actual_delete_count);
}
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
int new_length = len - actual_delete_count + item_count;
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
const bool trim_array = !heap->lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
if (elms_obj->IsFixedDoubleArray()) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, delta, elms, 0, actual_start);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
{
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
}
elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
elms = LeftTrimFixedArray(heap, elms, delta);
elms_changed = true;
} else {
if (elms_obj->IsFixedDoubleArray()) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
MoveDoubleElements(elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(elms, new_length, len);
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc,
elms, actual_start + item_count,
@ -1083,9 +884,7 @@ BUILTIN(ArraySplice) {
(len - actual_delete_count - actual_start));
FillWithHoles(heap, elms, new_length, len);
}
}
} else if (item_count > actual_delete_count) {
FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@ -1094,29 +893,28 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
FixedArray* new_elms;
MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->To(&new_elms)) return maybe_obj;
AssertNoAllocation no_gc;
Object* obj;
{ MaybeObject* maybe_obj =
heap->AllocateUninitializedFixedArray(capacity);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind();
ElementsAccessor* accessor = array->GetElementsAccessor();
if (actual_start > 0) {
{
// Copy the part before actual_start as is.
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, 0, new_elms, kind, 0, actual_start, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
}
MaybeObject* maybe_failure = accessor->CopyElements(
NULL, actual_start + actual_delete_count, new_elms, kind,
actual_start + item_count,
ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
ASSERT(!maybe_failure->IsFailure());
USE(maybe_failure);
elms_obj = new_elms;
ElementsKind kind = array->GetElementsKind();
CopyObjectToObjectElements(elms, kind, 0,
new_elms, kind, 0, actual_start);
const int to_copy = len - actual_delete_count - actual_start;
CopyObjectToObjectElements(elms, kind,
actual_start + actual_delete_count,
new_elms, kind,
actual_start + item_count, to_copy);
}
FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
@ -1127,28 +925,16 @@ BUILTIN(ArraySplice) {
}
}
if (IsFastDoubleElementsKind(elements_kind)) {
FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
for (int k = actual_start; k < actual_start + item_count; k++) {
Object* arg = args[3 + k - actual_start];
if (arg->IsSmi()) {
elms->set(k, Smi::cast(arg)->value());
} else {
elms->set(k, HeapNumber::cast(arg)->value());
}
}
} else {
FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
for (int k = actual_start; k < actual_start + item_count; k++) {
elms->set(k, args[3 + k - actual_start], mode);
}
}
if (elms_changed) {
array->set_elements(elms_obj);
array->set_elements(elms);
}
// Set the length.
array->set_length(Smi::FromInt(new_length));
@ -1170,15 +956,14 @@ BUILTIN(ArrayConcat) {
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
bool has_double = false;
bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
!JSArray::cast(arg)->HasFastElements() ||
!JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@ -1188,51 +973,47 @@ BUILTIN(ArrayConcat) {
result_len += len;
ASSERT(result_len >= 0);
if (result_len > FixedDoubleArray::kMaxLength) {
if (result_len > FixedArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
has_double = has_double || IsFastDoubleElementsKind(arg_kind);
is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
elements_kind = arg_kind;
if (!JSArray::cast(arg)->HasFastSmiElements()) {
if (IsFastSmiElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
elements_kind = FAST_HOLEY_ELEMENTS;
} else {
elements_kind = FAST_ELEMENTS;
}
}
}
if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
if (JSArray::cast(arg)->HasFastHoleyElements()) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
}
// If a double array is concatted into a fast elements array, the fast
// elements array needs to be initialized to contain proper holes, since
// boxing doubles may cause incremental marking.
ArrayStorageAllocationMode mode =
has_double && IsFastObjectElementsKind(elements_kind)
? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
JSArray* result_array;
// Allocate result.
JSArray* result_array;
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
result_len,
mode);
result_len);
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
int j = 0;
FixedArrayBase* storage = result_array->elements();
// Copy data.
int start_pos = 0;
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
if (len > 0) {
ElementsAccessor* accessor = array->GetElementsAccessor();
MaybeObject* maybe_failure =
accessor->CopyElements(array, 0, storage, elements_kind, j, len);
if (maybe_failure->IsFailure()) return maybe_failure;
j += len;
FixedArray* elms = FixedArray::cast(array->elements());
CopyObjectToObjectElements(elms, elements_kind, 0,
result_elms, elements_kind,
start_pos, len);
start_pos += len;
}
}
ASSERT(j == result_len);
ASSERT(start_pos == result_len);
return result_array;
}
@ -1252,28 +1033,12 @@ BUILTIN(StrictModePoisonPill) {
//
// Searches the hidden prototype chain of the given object for the first
// object that is an instance of the given type. If no such object can
// be found then Heap::null_value() is returned.
static inline Object* FindHidden(Heap* heap,
Object* object,
FunctionTemplateInfo* type) {
if (object->IsInstanceOf(type)) return object;
Object* proto = object->GetPrototype();
if (proto->IsJSObject() &&
JSObject::cast(proto)->map()->is_hidden_prototype()) {
return FindHidden(heap, proto, type);
}
return heap->null_value();
}
// Returns the holder JSObject if the function can legally be called
// with this receiver. Returns Heap::null_value() if the call is
// illegal. Any arguments that don't fit the expected type is
// overwritten with undefined. Note that holder and the arguments are
// implicitly rewritten with the first object in the hidden prototype
// chain that actually has the expected type.
// overwritten with undefined. Arguments that do fit the expected
// type is overwritten with the object in the prototype chain that
// actually has that type.
static inline Object* TypeCheck(Heap* heap,
int argc,
Object** argv,
@ -1286,10 +1051,15 @@ static inline Object* TypeCheck(Heap* heap,
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
// If necessary, check the receiver
Object* recv_type = sig->receiver();
Object* holder = recv;
if (!recv_type->IsUndefined()) {
holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
if (holder == heap->null_value()) return heap->null_value();
for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
break;
}
}
if (holder == heap->null_value()) return holder;
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@ -1302,9 +1072,13 @@ static inline Object* TypeCheck(Heap* heap,
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
if (current == heap->null_value()) current = heap->undefined_value();
for (; current != heap->null_value(); current = current->GetPrototype()) {
if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
*arg = current;
break;
}
}
if (current == heap->null_value()) *arg = heap->undefined_value();
}
return holder;
}

31
deps/v8/src/builtins.h

@ -38,25 +38,6 @@ enum BuiltinExtraArguments {
};
#define CODE_AGE_LIST_WITH_ARG(V, A) \
V(Quadragenarian, A) \
V(Quinquagenarian, A) \
V(Sexagenarian, A) \
V(Septuagenarian, A) \
V(Octogenarian, A)
#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
#define CODE_AGE_LIST(V) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
#define DECLARE_CODE_AGE_BUILTIN(C, V) \
V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState) \
V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
UNINITIALIZED, Code::kNoExtraICState)
// Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \
@ -214,8 +195,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
Code::kNoExtraICState)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@ -398,14 +379,6 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
static void Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm);
CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
static void InitBuiltinFunctionTable();
bool initialized_;

181
deps/v8/src/code-stubs.cc

@ -37,11 +37,11 @@
namespace v8 {
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
int index = stubs->FindEntry(GetKey());
bool CodeStub::FindCodeInCache(Code** code_out) {
Heap* heap = Isolate::Current()->heap();
int index = heap->code_stubs()->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(stubs->ValueAt(index));
*code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
return false;
@ -93,8 +93,8 @@ Handle<Code> CodeStub::GetCode() {
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
? FindCodeInSpecialCache(&code, isolate)
: FindCodeInCache(&code, isolate)) {
? FindCodeInSpecialCache(&code)
: FindCodeInCache(&code)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
@ -169,122 +169,6 @@ void CodeStub::PrintName(StringStream* stream) {
}
void BinaryOpStub::Generate(MacroAssembler* masm) {
// Explicitly allow generation of nested stubs. It is safe here because
// generation code does not use any raw pointers.
AllowStubCallsScope allow_stub_calls(masm, true);
BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
// The OddballStub handles a number and an oddball, not two oddballs.
operands_type = BinaryOpIC::GENERIC;
}
switch (operands_type) {
case BinaryOpIC::UNINITIALIZED:
GenerateTypeTransition(masm);
break;
case BinaryOpIC::SMI:
GenerateSmiStub(masm);
break;
case BinaryOpIC::INT32:
GenerateInt32Stub(masm);
break;
case BinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
case BinaryOpIC::ODDBALL:
GenerateOddballStub(masm);
break;
case BinaryOpIC::STRING:
GenerateStringStub(masm);
break;
case BinaryOpIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
}
}
#define __ ACCESS_MASM(masm)
void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break;
case Token::MUL:
__ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
break;
case Token::DIV:
__ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
__ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
break;
case Token::BIT_AND:
__ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
break;
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
}
#undef __
void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
case NO_OVERWRITE: overwrite_name = "Alloc"; break;
case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
stream->Add("BinaryOpStub_%s_%s_%s+%s",
op_name,
overwrite_name,
BinaryOpIC::GetName(left_type_),
BinaryOpIC::GetName(right_type_));
}
void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
GenerateBothStringStub(masm);
return;
}
// Try to add arguments as strings, otherwise, transition to the generic
// BinaryOpIC type.
GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
@ -297,7 +181,8 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
}
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Isolate* isolate = known_map_->GetIsolate();
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
@ -311,12 +196,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
#ifdef DEBUG
Token::Value cached_op;
ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
&cached_op);
ASSERT(op_ == cached_op);
#endif
ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
return true;
}
return false;
@ -324,33 +204,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) |
LeftStateField::encode(left_) |
RightStateField::encode(right_) |
HandlerStateField::encode(state_);
}
void ICCompareStub::DecodeMinorKey(int minor_key,
CompareIC::State* left_state,
CompareIC::State* right_state,
CompareIC::State* handler_state,
Token::Value* op) {
if (left_state) {
*left_state =
static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
}
if (right_state) {
*right_state =
static_cast<CompareIC::State>(RightStateField::decode(minor_key));
}
if (handler_state) {
*handler_state =
static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
if (op) {
*op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
}
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@ -359,28 +213,27 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
case CompareIC::SMI:
case CompareIC::SMIS:
GenerateSmis(masm);
break;
case CompareIC::HEAP_NUMBER:
case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
break;
case CompareIC::STRING:
case CompareIC::STRINGS:
GenerateStrings(masm);
break;
case CompareIC::SYMBOL:
case CompareIC::SYMBOLS:
GenerateSymbols(masm);
break;
case CompareIC::OBJECT:
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
case CompareIC::GENERIC:
GenerateGeneric(masm);
break;
default:
UNREACHABLE();
}
}

258
deps/v8/src/code-stubs.h

@ -141,7 +141,7 @@ class CodeStub BASE_EMBEDDED {
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current()));
CHECK(!is_pregenerated || FindCodeInCache(&code));
return is_pregenerated;
}
@ -160,7 +160,7 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out, Isolate* isolate);
bool FindCodeInCache(Code** code_out);
protected:
static bool CanUseFPRegisters();
@ -202,9 +202,7 @@ class CodeStub BASE_EMBEDDED {
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
return false;
}
virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
@ -484,132 +482,10 @@ class MathPowStub: public CodeStub {
};
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
platform_specific_bit_(false),
left_type_(BinaryOpIC::UNINITIALIZED),
right_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
Initialize();
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo left_type,
BinaryOpIC::TypeInfo right_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
platform_specific_bit_(PlatformSpecificBits::decode(key)),
left_type_(left_type),
right_type_(right_type),
result_type_(result_type) { }
static void decode_types_from_minor_key(int minor_key,
BinaryOpIC::TypeInfo* left_type,
BinaryOpIC::TypeInfo* right_type,
BinaryOpIC::TypeInfo* result_type) {
*left_type =
static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
*right_type =
static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
*result_type =
static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
}
static Token::Value decode_op_from_minor_key(int minor_key) {
return static_cast<Token::Value>(OpBits::decode(minor_key));
}
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
private:
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
BinaryOpIC::TypeInfo right_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class PlatformSpecificBits: public BitField<bool, 9, 1> {};
class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| PlatformSpecificBits::encode(platform_specific_bit_)
| LeftTypeBits::encode(left_type_)
| RightTypeBits::encode(right_type_)
| ResultTypeBits::encode(result_type_);
}
// Platform-independent implementation.
void Generate(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
// Platform-independent signature, platform-specific implementation.
void Initialize();
void GenerateAddStrings(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
// Entirely platform-specific methods are defined as static helper
// functions in the <arch>/code-stubs-<arch>.cc files.
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(Max(left_type_, right_type_));
}
virtual void FinishCode(Handle<Code> code) {
code->set_stub_info(MinorKey());
}
friend class CodeGenerator;
};
class ICCompareStub: public CodeStub {
public:
ICCompareStub(Token::Value op,
CompareIC::State left,
CompareIC::State right,
CompareIC::State handler)
: op_(op),
left_(left),
right_(right),
state_(handler) {
ICCompareStub(Token::Value op, CompareIC::State state)
: op_(op), state_(state) {
ASSERT(Token::IsCompareOp(op));
}
@ -617,24 +493,13 @@ class ICCompareStub: public CodeStub {
void set_known_map(Handle<Map> map) { known_map_ = map; }
static void DecodeMinorKey(int minor_key,
CompareIC::State* left_state,
CompareIC::State* right_state,
CompareIC::State* handler_state,
Token::Value* op);
static CompareIC::State CompareState(int minor_key) {
return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
}
private:
class OpField: public BitField<int, 0, 3> { };
class LeftStateField: public BitField<int, 3, 3> { };
class RightStateField: public BitField<int, 6, 3> { };
class HandlerStateField: public BitField<int, 9, 3> { };
class StateField: public BitField<int, 3, 5> { };
virtual void FinishCode(Handle<Code> code) {
code->set_stub_info(MinorKey());
code->set_compare_state(state_);
code->set_compare_operation(op_ - Token::EQ);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@ -649,23 +514,117 @@ class ICCompareStub: public CodeStub {
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object);
virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
virtual bool FindCodeInSpecialCache(Code** code_out);
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_;
CompareIC::State left_;
CompareIC::State right_;
CompareIC::State state_;
Handle<Map> known_map_;
};
// Flags that control the compare stub code generation.
enum CompareFlags {
NO_COMPARE_FLAGS = 0,
NO_SMI_COMPARE_IN_STUB = 1 << 0,
NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
CANT_BOTH_BE_NAN = 1 << 2
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
bool strict,
CompareFlags flags,
Register lhs,
Register rhs) :
cc_(cc),
strict_(strict),
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(lhs),
rhs_(rhs) { }
CompareStub(Condition cc,
bool strict,
CompareFlags flags) :
cc_(cc),
strict_(strict),
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
lhs_(no_reg),
rhs_(no_reg) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
// Only used for 'equal' comparisons. Tells the stub that we already know
// that at least one side of the comparison is not NaN. This allows the
// stub to use object identity in the positive case. We ignore it when
// generating the minor key for other comparisons to avoid creating more
// stubs.
bool never_nan_nan_;
// Do generate the number comparison code in the stub. Stubs without number
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
// Generate the comparison code for two smi operands in the stub.
bool include_smi_compare_;
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
// Register holding the right hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register rhs_;
// Encoding of the minor key in 16 bits.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
class RegisterField: public BitField<bool, 4, 1> {};
class ConditionField: public BitField<int, 5, 11> {};
Major MajorKey() { return Compare; }
int MinorKey();
virtual int GetCodeKind() { return Code::COMPARE_IC; }
virtual void FinishCode(Handle<Code> code) {
code->set_compare_state(CompareIC::GENERIC);
}
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
virtual void PrintName(StringStream* stream);
};
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
@ -1094,9 +1053,6 @@ class ToBooleanStub: public CodeStub {
bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
bool ContainsAnyOf(Types types) const {
return set_.ContainsAnyOf(types.set_);
}
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const;
@ -1215,8 +1171,6 @@ class ProfileEntryHookStub : public CodeStub {
// non-NULL hook.
static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
static bool HasEntryHook() { return entry_hook_ != NULL; }
private:
static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer);

1
deps/v8/src/codegen.cc

@ -107,7 +107,6 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
code->set_prologue_offset(info->prologue_offset());
}
return code;
}

14
deps/v8/src/codegen.h

@ -90,7 +90,6 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
@ -104,19 +103,6 @@ class ElementsTransitionGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
class SeqStringSetCharGenerator : public AllStatic {
public:
static void Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value);
private:
DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_

46
deps/v8/src/collection.js

@ -88,25 +88,6 @@ function SetDelete(key) {
}
function SetGetSize() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.size', this]);
}
return %SetGetSize(this);
}
function SetClear() {
if (!IS_SET(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Set.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%SetInitialize(this);
}
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
@ -164,25 +145,6 @@ function MapDelete(key) {
}
function MapGetSize() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.size', this]);
}
return %MapGetSize(this);
}
function MapClear() {
if (!IS_MAP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['Map.prototype.clear', this]);
}
// Replace the internal table with a new empty table.
%MapInitialize(this);
}
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
@ -253,22 +215,18 @@ function WeakMapDelete(key) {
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
"delete", SetDelete,
"clear", SetClear
"delete", SetDelete
));
// Set up the non-enumerable functions on the Map prototype object.
InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
"delete", MapDelete,
"clear", MapClear
"delete", MapDelete
));
// Set up the WeakMap constructor function.

2
deps/v8/src/compilation-cache.cc

@ -98,7 +98,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = isolate()->heap()->undefined_value();
Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);

103
deps/v8/src/compiler.cc

@ -52,53 +52,57 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
: isolate_(script->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
script_(script),
osr_ast_id_(BailoutId::None()) {
Initialize(zone);
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
: isolate_(shared_info->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()) {
Initialize(zone);
extension_(NULL),
pre_parse_data_(NULL),
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
: flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
: isolate_(closure->GetIsolate()),
flags_(LanguageModeField::encode(CLASSIC_MODE) |
IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
global_scope_(NULL),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
extension_(NULL),
pre_parse_data_(NULL),
context_(closure->context()),
osr_ast_id_(BailoutId::None()) {
Initialize(zone);
}
void CompilationInfo::Initialize(Zone* zone) {
isolate_ = script_->GetIsolate();
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
extension_ = NULL;
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
osr_ast_id_(BailoutId::None()),
zone_(zone),
deferred_handles_(NULL) {
Initialize(BASE);
}
@ -190,11 +194,6 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size,
compilation_time);
}
if (FLAG_hydrogen_stats) {
HStatistics::Instance()->IncrementSubtotals(time_taken_to_create_graph_,
time_taken_to_optimize_,
time_taken_to_codegen_);
}
}
@ -285,6 +284,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
@ -324,8 +324,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
Timer t(this, &time_taken_to_create_graph_);
HPhase phase(HPhase::kTotal);
graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) {
@ -372,7 +371,6 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
{ // Scope for timer.
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
@ -382,7 +380,6 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
return AbortOptimization();
}
info()->SetCode(optimized_code);
}
RecordOptimizationStats();
return SetLastStatus(SUCCEEDED);
}
@ -393,8 +390,6 @@ static bool GenerateCode(CompilationInfo* info) {
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
@ -402,8 +397,6 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT.
info->DisableOptimization();
}
Logger::TimerEventScope timer(
info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info);
}
}
@ -439,9 +432,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(0));
script->set_context_data((*isolate->native_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@ -697,7 +688,7 @@ static bool InstallFullCode(CompilationInfo* info) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
shared->ReplaceCode(*code);
shared->set_code(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
@ -850,11 +841,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
// Here we prepare compile data for the parallel recompilation thread, but
// this still happens synchronously and interrupts execution.
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
@ -863,7 +849,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
VMState state(isolate, PARALLEL_COMPILER);
VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
@ -874,10 +860,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
{
CompilationHandleScope handle_scope(*info);
if (!FLAG_manual_parallel_recompilation &&
InstallCodeFromOptimizedCodeMap(*info)) {
return;
}
if (InstallCodeFromOptimizedCodeMap(*info)) return;
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
@ -911,10 +894,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
Isolate* isolate = info->isolate();
VMState state(isolate, PARALLEL_COMPILER);
Logger::TimerEventScope timer(
isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();

29
deps/v8/src/compiler.h

@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It
@ -188,16 +186,6 @@ class CompilationInfo {
const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
int prologue_offset() const {
ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
return prologue_offset_;
}
void set_prologue_offset(int prologue_offset) {
ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
prologue_offset_ = prologue_offset;
}
private:
Isolate* isolate_;
@ -212,7 +200,18 @@ class CompilationInfo {
NONOPT
};
void Initialize(Zone* zone);
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
set_bailout_reason("unknown");
}
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@ -286,8 +285,6 @@ class CompilationInfo {
const char* bailout_reason_;
int prologue_offset_;
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@ -296,8 +293,6 @@ class CompilationInfo {
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
zone_(script->GetIsolate()),

25
deps/v8/src/contexts.cc

@ -55,15 +55,6 @@ JSBuiltinsObject* Context::builtins() {
}
Context* Context::global_context() {
Context* current = this;
while (!current->IsGlobalContext()) {
current = current->previous();
}
return current;
}
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
@ -192,10 +183,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
case MODULE:
*attributes = READ_ONLY;
*binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@ -264,6 +251,8 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
}
CHECK(function->next_function_link()->IsUndefined());
// Check that the context belongs to the weak native contexts list.
bool found = false;
Object* context = GetHeap()->native_contexts_list();
@ -276,16 +265,6 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
CHECK(found);
#endif
// If the function link field is already used then the function was
// enqueued as a code flushing candidate and we remove it now.
if (!function->next_function_link()->IsUndefined()) {
CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
flusher->EvictCandidate(function);
}
ASSERT(function->next_function_link()->IsUndefined());
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}

24
deps/v8/src/contexts.h

@ -152,7 +152,7 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
@ -161,9 +161,7 @@ enum BindingFlags {
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@ -283,16 +281,14 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
EMBEDDER_DATA_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
PROXY_ENUMERATE,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@ -345,19 +341,12 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
// Get the innermost global context by traversing the context chain.
Context* global_context();
// Compute the native context by traversing the context chain.
Context* native_context();
// Predicates for context types. IsNativeContext is also defined on Object
// Predicates for context types. IsNativeContext is defined on Object
// because we frequently have to know if arbitrary objects are natives
// contexts.
bool IsNativeContext() {
Map* map = this->map();
return map == map->GetHeap()->native_context_map();
}
bool IsFunctionContext() {
Map* map = this->map();
return map == map->GetHeap()->function_context_map();
@ -457,9 +446,6 @@ class Context: public FixedArray {
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal

7
deps/v8/src/counters.cc

@ -77,7 +77,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer.
void HistogramTimer::Start() {
if (histogram_.Enabled() || FLAG_log_internal_timer_events) {
if (histogram_.Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@ -87,14 +87,11 @@ void HistogramTimer::Start() {
void HistogramTimer::Stop() {
if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds);
}
if (FLAG_log_internal_timer_events) {
LOG(Isolate::Current(),
TimerEvent(histogram_.name_, start_time_, OS::Ticks()));
}
}
} } // namespace v8::internal

347
deps/v8/src/d8.cc

@ -67,62 +67,6 @@
namespace v8 {
static Handle<Value> Throw(const char* message) {
return ThrowException(String::New(message));
}
// TODO(rossberg): should replace these by proper uses of HasInstance,
// once we figure out a good way to make the templates global.
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
#define FOR_EACH_SYMBOL(V) \
V(ArrayBuffer, "ArrayBuffer") \
V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
V(ArrayMarkerPropName, kArrayMarkerPropName) \
V(buffer, "buffer") \
V(byteLength, "byteLength") \
V(byteOffset, "byteOffset") \
V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
V(length, "length")
class Symbols {
public:
explicit Symbols(Isolate* isolate) : isolate_(isolate) {
HandleScope scope;
#define INIT_SYMBOL(name, value) \
name##_ = Persistent<String>::New(String::NewSymbol(value));
FOR_EACH_SYMBOL(INIT_SYMBOL)
#undef INIT_SYMBOL
isolate->SetData(this);
}
~Symbols() {
#define DISPOSE_SYMBOL(name, value) name##_.Dispose();
FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
#undef DISPOSE_SYMBOL
isolate_->SetData(NULL); // Not really needed, just to be sure...
}
#define DEFINE_SYMBOL_GETTER(name, value) \
static Persistent<String> name(Isolate* isolate) { \
return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
}
FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
#undef DEFINE_SYMBOL_GETTER
private:
Isolate* isolate_;
#define DEFINE_MEMBER(name, value) Persistent<String> name##_;
FOR_EACH_SYMBOL(DEFINE_MEMBER)
#undef DEFINE_MEMBER
};
LineEditor *LineEditor::first_ = NULL;
@ -148,17 +92,17 @@ LineEditor* LineEditor::Get() {
class DumbLineEditor: public LineEditor {
public:
explicit DumbLineEditor(Isolate* isolate)
: LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
virtual Handle<String> Prompt(const char* prompt);
private:
Isolate* isolate_;
};
static DumbLineEditor dumb_line_editor;
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
return Shell::ReadFromStdin(isolate_);
return Shell::ReadFromStdin();
}
@ -171,6 +115,7 @@ i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
@ -287,17 +232,17 @@ Handle<Value> Shell::DisableProfiler(const Arguments& args) {
Handle<Value> Shell::Read(const Arguments& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
return Throw("Error loading file");
return ThrowException(String::New("Error loading file"));
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
return Throw("Error loading file");
return ThrowException(String::New("Error loading file"));
}
return source;
}
Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
Handle<String> Shell::ReadFromStdin() {
static const int kBufferSize = 256;
char buffer[kBufferSize];
Handle<String> accumulator = String::New("");
@ -308,7 +253,7 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
// If fgets gets an error, just give up.
char* input = NULL;
{ // Release lock for blocking input.
Unlocker unlock(isolate);
Unlocker unlock(Isolate::GetCurrent());
input = fgets(buffer, kBufferSize, stdin);
}
if (input == NULL) return Handle<String>();
@ -332,14 +277,14 @@ Handle<Value> Shell::Load(const Arguments& args) {
HandleScope handle_scope;
String::Utf8Value file(args[i]);
if (*file == NULL) {
return Throw("Error loading file");
return ThrowException(String::New("Error loading file"));
}
Handle<String> source = ReadFile(args.GetIsolate(), *file);
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
return Throw("Error loading file");
return ThrowException(String::New("Error loading file"));
}
if (!ExecuteString(source, String::New(*file), false, true)) {
return Throw("Error executing file");
return ThrowException(String::New("Error executing file"));
}
}
return Undefined();
@ -369,7 +314,7 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
Throw("Array length must not be negative.");
ThrowException(String::New("Array length must not be negative."));
return 0;
}
@ -378,27 +323,33 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) {
Throw("Array length exceeds maximum length.");
ThrowException(
String::New("Array length exceeds maximum length."));
}
return raw_value;
}
Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
// TODO(rossberg): should replace these by proper uses of HasInstance,
// once we figure out a good way to make the templates global.
const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
return Throw("ArrayBuffer exceeds maximum size (2G)");
return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
return Throw("Memory allocation failed");
return ThrowException(String::New("Memory allocation failed"));
}
memset(data, 0, length);
buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
@ -406,7 +357,7 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
buffer->Set(Symbols::byteLength(isolate), Int32::New(length), ReadOnly);
buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
return buffer;
}
@ -422,18 +373,18 @@ Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
}
if (args.Length() == 0) {
return Throw("ArrayBuffer constructor must have one argument");
return ThrowException(
String::New("ArrayBuffer constructor must have one argument"));
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
return CreateExternalArrayBuffer(args.This(), length);
}
Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
Handle<Object> array,
Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
@ -449,13 +400,12 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate),
Int32::New(type));
array->Set(Symbols::byteLength(isolate), Int32::New(byteLength), ReadOnly);
array->Set(Symbols::byteOffset(isolate), Int32::New(byteOffset), ReadOnly);
array->Set(Symbols::length(isolate), Int32::New(length), ReadOnly);
array->Set(Symbols::BYTES_PER_ELEMENT(isolate), Int32::New(element_size));
array->Set(Symbols::buffer(isolate), buffer, ReadOnly);
array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
array->Set(String::New("buffer"), buffer, ReadOnly);
return array;
}
@ -464,7 +414,6 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
Isolate* isolate = args.GetIsolate();
if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
@ -490,15 +439,16 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t byteOffset;
bool init_from_array = false;
if (args.Length() == 0) {
return Throw("Array constructor must have at least one argument");
return ThrowException(
String::New("Array constructor must have at least one argument"));
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) {
String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
// Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch);
convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
@ -507,10 +457,11 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
return Throw("byteOffset out of bounds");
return ThrowException(String::New("byteOffset out of bounds"));
}
if (byteOffset % element_size != 0) {
return Throw("byteOffset must be multiple of element size");
return ThrowException(
String::New("byteOffset must be multiple of element size"));
}
}
@ -518,22 +469,23 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteLength = bufferLength - byteOffset;
length = byteLength / element_size;
if (byteLength % element_size != 0) {
return Throw("buffer size must be multiple of element size");
return ThrowException(
String::New("buffer size must be multiple of element size"));
}
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
return Throw("length out of bounds");
return ThrowException(String::New("length out of bounds"));
}
}
} else {
if (args[0]->IsObject() &&
args[0]->ToObject()->Has(Symbols::length(isolate))) {
args[0]->ToObject()->Has(String::New("length"))) {
// Construct from array.
length = convertToUint(
args[0]->ToObject()->Get(Symbols::length(isolate)), &try_catch);
args[0]->ToObject()->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true;
} else {
@ -545,7 +497,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global();
Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate));
Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
@ -554,9 +506,8 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
buffer = result->ToObject();
}
Handle<Object> array =
CreateExternalArray(isolate, args.This(), buffer, type, length,
byteLength, byteOffset, element_size);
Handle<Object> array = CreateExternalArray(
args.This(), buffer, type, length, byteLength, byteOffset, element_size);
if (init_from_array) {
Handle<Object> init = args[0]->ToObject();
@ -571,23 +522,25 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return Throw("'slice' invoked on non-object receiver");
return ThrowException(
String::New("'slice' invoked on non-object receiver"));
}
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate));
self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
if (marker.IsEmpty()) {
return Throw("'slice' invoked on wrong receiver type");
return ThrowException(
String::New("'slice' invoked on wrong receiver type"));
}
int32_t length =
convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch);
convertToUint(self->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return Throw("'slice' must have at least one argument");
return ThrowException(
String::New("'slice' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@ -626,31 +579,32 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return Throw("'subarray' invoked on non-object receiver");
return ThrowException(
String::New("'subarray' invoked on non-object receiver"));
}
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
return Throw("'subarray' invoked on wrong receiver type");
return ThrowException(
String::New("'subarray' invoked on wrong receiver type"));
}
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length =
convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return Throw("'subarray' must have at least one argument");
return ThrowException(
String::New("'subarray' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@ -685,33 +639,35 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
return Throw("'set' invoked on non-object receiver");
return ThrowException(
String::New("'set' invoked on non-object receiver"));
}
Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
Local<Value> marker =
self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
return Throw("'set' invoked on wrong receiver type");
return ThrowException(
String::New("'set' invoked on wrong receiver type"));
}
int32_t length =
convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
return Throw("'set' must have at least one argument");
return ThrowException(
String::New("'set' must have at least one argument"));
}
if (!args[0]->IsObject() ||
!args[0]->ToObject()->Has(Symbols::length(isolate))) {
return Throw("'set' invoked with non-array argument");
!args[0]->ToObject()->Has(String::New("length"))) {
return ThrowException(
String::New("'set' invoked with non-array argument"));
}
Handle<Object> source = args[0]->ToObject();
int32_t source_length =
convertToUint(source->Get(Symbols::length(isolate)), &try_catch);
convertToUint(source->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset;
@ -722,32 +678,31 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (offset + source_length > length) {
return Throw("offset or source length out of bounds");
return ThrowException(String::New("offset or source length out of bounds"));
}
int32_t source_element_size;
if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) {
if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
source_element_size = 0;
} else {
source_element_size =
convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)),
&try_catch);
convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers.
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
source->Get(Symbols::buffer(isolate))->ToObject();
source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@ -763,10 +718,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
}
} else {
// Need to copy element-wise to make the right conversions.
Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
source->Get(Symbols::buffer(isolate))->ToObject();
source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) {
@ -774,10 +729,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
// This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset =
convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right.
@ -823,9 +778,8 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
Isolate* isolate = Isolate::GetCurrent();
int32_t length =
object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value();
object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose();
@ -1191,7 +1145,7 @@ Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
}
Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@ -1211,7 +1165,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
// Bind the handlers for external arrays.
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
global_template->Set(Symbols::ArrayBuffer(isolate),
global_template->Set(String::New("ArrayBuffer"),
CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr);
@ -1248,7 +1202,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
}
void Shell::Initialize(Isolate* isolate) {
void Shell::Initialize() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
@ -1269,15 +1223,12 @@ void Shell::Initialize(Isolate* isolate) {
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
#endif // V8_SHARED
}
void Shell::InitializeDebugger(Isolate* isolate) {
if (options.test_shell) return;
#ifndef V8_SHARED
Locker lock;
HandleScope scope;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -1291,13 +1242,13 @@ void Shell::InitializeDebugger(Isolate* isolate) {
}
Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
Persistent<Context> Shell::CreateEvaluationContext() {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
Persistent<Context> context = Context::New(NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@ -1343,6 +1294,7 @@ int CompareKeys(const void* a, const void* b) {
void Shell::OnExit() {
if (console != NULL) console->Close();
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@ -1402,9 +1354,9 @@ static FILE* FOpen(const char* path, const char* mode) {
}
static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
static char* ReadChars(const char* name, int* size_out) {
// Release the V8 lock while reading files.
v8::Unlocker unlocker(isolate);
v8::Unlocker unlocker(Isolate::GetCurrent());
FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL;
@ -1429,17 +1381,15 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
return Throw("Error loading file");
return ThrowException(String::New("Error loading file"));
}
uint8_t* data = reinterpret_cast<uint8_t*>(
ReadChars(args.GetIsolate(), *filename, &length));
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
return Throw("Error reading file");
return ThrowException(String::New("Error reading file"));
}
Isolate* isolate = args.GetIsolate();
Handle<Object> buffer = Object::New();
buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
@ -1447,7 +1397,7 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
buffer->Set(Symbols::byteLength(isolate),
buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
}
@ -1477,9 +1427,9 @@ static char* ReadWord(char* data) {
// Reads a file into a v8 string.
Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
Handle<String> Shell::ReadFile(const char* name) {
int size = 0;
char* chars = ReadChars(isolate, name, &size);
char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars);
delete[] chars;
@ -1487,13 +1437,12 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
}
void Shell::RunShell(Isolate* isolate) {
void Shell::RunShell() {
Locker locker;
Context::Scope context_scope(evaluation_context_);
HandleScope outer_scope;
Handle<String> name = String::New("(d8)");
DumbLineEditor dumb_line_editor(isolate);
LineEditor* console = LineEditor::Get();
console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open();
while (true) {
@ -1503,7 +1452,6 @@ void Shell::RunShell(Isolate* isolate) {
ExecuteString(input, name, true, true);
}
printf("\n");
console->Close();
}
@ -1511,9 +1459,9 @@ void Shell::RunShell(Isolate* isolate) {
class ShellThread : public i::Thread {
public:
// Takes ownership of the underlying char array of |files|.
ShellThread(Isolate* isolate, char* files)
ShellThread(int no, char* files)
: Thread("d8:ShellThread"),
isolate_(isolate), files_(files) { }
no_(no), files_(files) { }
~ShellThread() {
delete[] files_;
@ -1521,7 +1469,7 @@ class ShellThread : public i::Thread {
virtual void Run();
private:
Isolate* isolate_;
int no_;
char* files_;
};
@ -1541,8 +1489,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker;
HandleScope outer_scope;
Persistent<Context> thread_context =
Shell::CreateEvaluationContext(isolate_);
Persistent<Context> thread_context = Shell::CreateEvaluationContext();
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@ -1555,7 +1502,7 @@ void ShellThread::Run() {
continue;
}
Handle<String> str = Shell::ReadFile(isolate_, filename);
Handle<String> str = Shell::ReadFile(filename);
if (str.IsEmpty()) {
printf("File '%s' not found\n", filename);
Shell::Exit(1);
@ -1583,7 +1530,7 @@ SourceGroup::~SourceGroup() {
}
void SourceGroup::Execute(Isolate* isolate) {
void SourceGroup::Execute() {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@ -1601,7 +1548,7 @@ void SourceGroup::Execute(Isolate* isolate) {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = String::New(arg);
Handle<String> source = ReadFile(isolate, arg);
Handle<String> source = ReadFile(arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
@ -1614,9 +1561,9 @@ void SourceGroup::Execute(Isolate* isolate) {
}
Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
Handle<String> SourceGroup::ReadFile(const char* name) {
int size;
char* chars = ReadChars(isolate, name, &size);
char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;
@ -1642,11 +1589,10 @@ void SourceGroup::ExecuteInThread() {
Isolate::Scope iscope(isolate);
Locker lock(isolate);
HandleScope scope;
Symbols symbols(isolate);
Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
Persistent<Context> context = Shell::CreateEvaluationContext();
{
Context::Scope cscope(context);
Execute(isolate);
Execute();
}
context.Dispose();
if (Shell::options.send_idle_notification) {
@ -1814,21 +1760,21 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
int Shell::RunMain(int argc, char* argv[]) {
#ifndef V8_SHARED
i::List<i::Thread*> threads(1);
if (options.parallel_files != NULL) {
for (int i = 0; i < options.num_parallel_files; i++) {
char* files = NULL;
{ Locker lock(isolate);
{ Locker lock(Isolate::GetCurrent());
int size = 0;
files = ReadChars(isolate, options.parallel_files[i], &size);
files = ReadChars(options.parallel_files[i], &size);
}
if (files == NULL) {
printf("File list '%s' not found\n", options.parallel_files[i]);
Exit(1);
}
ShellThread* thread = new ShellThread(isolate, files);
ShellThread* thread = new ShellThread(threads.length(), files);
thread->Start();
threads.Add(thread);
}
@ -1840,7 +1786,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
{ // NOLINT
Locker lock;
HandleScope scope;
Persistent<Context> context = CreateEvaluationContext(isolate);
Persistent<Context> context = CreateEvaluationContext();
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
@ -1854,7 +1800,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
}
{
Context::Scope cscope(context);
options.isolate_sources[0].Execute(isolate);
options.isolate_sources[0].Execute();
}
if (!options.last_run) {
context.Dispose();
@ -1896,23 +1842,19 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
int result = 0;
Isolate* isolate = Isolate::GetCurrent();
{
Initialize(isolate);
Symbols symbols(isolate);
InitializeDebugger(isolate);
Initialize();
int result = 0;
if (options.stress_opt || options.stress_deopt) {
Testing::SetStressRunType(options.stress_opt
? Testing::kStressTypeOpt
Testing::SetStressRunType(
options.stress_opt ? Testing::kStressTypeOpt
: Testing::kStressTypeDeopt);
int stress_runs = Testing::GetStressRuns();
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
Testing::PrepareStressRun(i);
options.last_run = (i == stress_runs - 1);
result = RunMain(isolate, argc, argv);
result = RunMain(argc, argv);
}
printf("======== Full Deoptimization =======\n");
Testing::DeoptimizeAll();
@ -1922,11 +1864,11 @@ int Shell::Main(int argc, char* argv[]) {
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Run %d/%d ============\n", i + 1, stress_runs);
options.last_run = (i == stress_runs - 1);
result = RunMain(isolate, argc, argv);
result = RunMain(argc, argv);
}
#endif
} else {
result = RunMain(isolate, argc, argv);
result = RunMain(argc, argv);
}
@ -1942,16 +1884,17 @@ int Shell::Main(int argc, char* argv[]) {
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
if (( options.interactive_shell || !options.script_executed )
if (( options.interactive_shell
|| !options.script_executed )
&& !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
if (!i::FLAG_debugger) {
InstallUtilityScript();
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
RunShell(isolate);
}
RunShell();
}
V8::Dispose();
#ifndef V8_SHARED

28
deps/v8/src/d8.h

@ -158,7 +158,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
void Execute(Isolate* isolate);
void Execute();
#ifndef V8_SHARED
void StartExecuteInThread();
@ -187,7 +187,7 @@ class SourceGroup {
#endif // V8_SHARED
void ExitShell(int exit_code);
Handle<String> ReadFile(Isolate* isolate, const char* name);
Handle<String> ReadFile(const char* name);
const char** argv_;
int begin_offset_;
@ -272,9 +272,9 @@ class Shell : public i::AllStatic {
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
static Handle<String> ReadFile(Isolate* isolate, const char* name);
static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
static int RunMain(Isolate* isolate, int argc, char* argv[]);
static Handle<String> ReadFile(const char* name);
static Persistent<Context> CreateEvaluationContext();
static int RunMain(int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@ -310,9 +310,9 @@ class Shell : public i::AllStatic {
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
static Handle<String> ReadFromStdin(Isolate* isolate);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin(args.GetIsolate());
return ReadFromStdin();
}
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
@ -365,6 +365,7 @@ class Shell : public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
static LineEditor* console;
static const char* kPrompt;
static ShellOptions options;
@ -383,18 +384,15 @@ class Shell : public i::AllStatic {
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript();
#endif // V8_SHARED
static void Initialize(Isolate* isolate);
static void InitializeDebugger(Isolate* isolate);
static void RunShell(Isolate* isolate);
static void Initialize();
static void RunShell();
static bool SetOptions(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
Handle<Object> buffer,
static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t size);
static Handle<Object> CreateExternalArray(Isolate* isolate,
Handle<Object> array,
static Handle<Object> CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,

2
deps/v8/src/date.js

@ -107,7 +107,7 @@ function MakeDay(year, month, date) {
}
// Now we rely on year and month being SMIs.
return %DateMakeDay(year | 0, month | 0) + date - 1;
return %DateMakeDay(year, month) + date - 1;
}

119
deps/v8/src/debug-debugger.js

@ -1306,12 +1306,9 @@ ProtocolMessage.prototype.setOption = function(name, value) {
};
ProtocolMessage.prototype.failed = function(message, opt_details) {
ProtocolMessage.prototype.failed = function(message) {
this.success = false;
this.message = message;
if (IS_OBJECT(opt_details)) {
this.error_details = opt_details;
}
};
@ -1358,9 +1355,6 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) {
json.message = this.message;
}
if (this.error_details) {
json.error_details = this.error_details;
}
json.running = this.running;
return JSON.stringify(json);
};
@ -1433,8 +1427,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
} else if (request.command == 'setVariableValue') {
this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') {
@ -1961,12 +1953,11 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
};
DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
function(scope_description) {
DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame.
if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
frame_index = scope_description.frameNumber;
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number');
}
@ -1980,13 +1971,13 @@ DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
DebugCommandProcessor.prototype.resolveScopeHolder_ =
function(scope_description) {
if (scope_description && "functionHandle" in scope_description) {
if (!IS_NUMBER(scope_description.functionHandle)) {
DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
function(request) {
if (request.arguments && "functionHandle" in request.arguments) {
if (!IS_NUMBER(request.arguments.functionHandle)) {
throw new Error('Function handle must be a number');
}
var function_mirror = LookupMirror(scope_description.functionHandle);
var function_mirror = LookupMirror(request.arguments.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
@ -2001,14 +1992,14 @@ DebugCommandProcessor.prototype.resolveScopeHolder_ =
}
// Get the frame for which the scopes are requested.
var frame = this.resolveFrameFromScopeDescription_(scope_description);
var frame = this.frameForScopeRequest_(request);
return frame;
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
var scope_holder = this.resolveScopeHolder_(request.arguments);
var scope_holder = this.scopeHolderForScopeRequest_(request);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
@ -2027,7 +2018,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested.
var scope_holder = this.resolveScopeHolder_(request.arguments);
var scope_holder = this.scopeHolderForScopeRequest_(request);
// With no scope argument just return top scope.
var scope_index = 0;
@ -2042,77 +2033,6 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
};
// Reads value from protocol description. Description may be in form of type
// (for singletons), raw value (primitive types supported in JSON),
// string value description plus type (for primitive values) or handle id.
// Returns raw value or throws exception.
DebugCommandProcessor.resolveValue_ = function(value_description) {
if ("handle" in value_description) {
var value_mirror = LookupMirror(value_description.handle);
if (!value_mirror) {
throw new Error("Failed to resolve value by handle, ' #" +
mapping.handle + "# not found");
}
return value_mirror.value();
} else if ("stringDescription" in value_description) {
if (value_description.type == BOOLEAN_TYPE) {
return Boolean(value_description.stringDescription);
} else if (value_description.type == NUMBER_TYPE) {
return Number(value_description.stringDescription);
} if (value_description.type == STRING_TYPE) {
return String(value_description.stringDescription);
} else {
throw new Error("Unknown type");
}
} else if ("value" in value_description) {
return value_description.value;
} else if (value_description.type == UNDEFINED_TYPE) {
return void 0;
} else if (value_description.type == NULL_TYPE) {
return null;
} else {
throw new Error("Failed to parse value description");
}
};
DebugCommandProcessor.prototype.setVariableValueRequest_ =
function(request, response) {
if (!request.arguments) {
response.failed('Missing arguments');
return;
}
if (IS_UNDEFINED(request.arguments.name)) {
response.failed('Missing variable name');
}
var variable_name = request.arguments.name;
var scope_description = request.arguments.scope;
// Get the frame or function for which the scope is requested.
var scope_holder = this.resolveScopeHolder_(scope_description);
if (IS_UNDEFINED(scope_description.number)) {
response.failed('Missing scope number');
}
var scope_index = %ToNumber(scope_description.number);
var scope = scope_holder.scope(scope_index);
var new_value =
DebugCommandProcessor.resolveValue_(request.arguments.newValue);
scope.setVariableValue(variable_name, new_value);
var new_value_mirror = MakeMirror(new_value);
response.body = {
newValue: new_value_mirror
};
};
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@ -2467,17 +2387,8 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source;
var result_description;
try {
result_description = Debug.LiveEdit.SetScriptSource(the_script,
var result_description = Debug.LiveEdit.SetScriptSource(the_script,
new_source, preview_only, change_log);
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
response.failed(e.message, e.details);
return;
}
throw e;
}
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@ -2752,7 +2663,3 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
Debug.TestApi = {
CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
};

10
deps/v8/src/debug.cc

@ -261,12 +261,8 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
reloc_iterator_ = new RelocIterator(
debug_info_->code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
reloc_iterator_original_ = new RelocIterator(
debug_info_->original_code(),
~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
reloc_iterator_ = new RelocIterator(debug_info_->code());
reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
// Position at the first break point.
break_point_ = -1;
@ -786,11 +782,9 @@ bool Debug::CompileDebuggerScript(int index) {
"error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
if (!exception.is_null()) {
isolate->set_pending_exception(*exception);
MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
isolate->clear_pending_exception();
}
return false;
}

110
deps/v8/src/deoptimizer.cc

@ -41,11 +41,8 @@ namespace v8 {
namespace internal {
DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
eager_deoptimization_entry_code_ = NULL;
lazy_deoptimization_entry_code_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -55,18 +52,16 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
delete eager_deoptimization_entry_code_;
if (eager_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free(
eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
delete lazy_deoptimization_entry_code_;
}
if (lazy_deoptimization_entry_code_ != NULL) {
Isolate::Current()->memory_allocator()->Free(
lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
while (current != NULL) {
DeoptimizingCodeListNode* prev = current;
current = current->next();
delete prev;
}
deoptimizing_code_list_ = NULL;
}
@ -101,20 +96,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
}
// No larger than 2K on all platforms
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(OS::CommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_;
@ -473,45 +454,44 @@ void Deoptimizer::DeleteFrameDescriptions() {
}
Address Deoptimizer::GetDeoptimizationEntry(int id,
BailoutType type,
GetEntryMode mode) {
Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
VirtualMemory* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(type, id);
} else {
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
if (id >= kNumberOfEntries) return NULL;
MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
if (data->eager_deoptimization_entry_code_ == NULL) {
data->eager_deoptimization_entry_code_ = CreateCode(type);
}
base = data->eager_deoptimization_entry_code_;
} else {
if (data->lazy_deoptimization_entry_code_ == NULL) {
data->lazy_deoptimization_entry_code_ = CreateCode(type);
}
base = data->lazy_deoptimization_entry_code_;
}
return
static_cast<Address>(base->address()) + (id * table_entry_size_);
static_cast<Address>(base->area_start()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
VirtualMemory* base = NULL;
MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
addr < base->address() ||
addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
addr < base->area_start() ||
addr >= base->area_start() +
(kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
static_cast<int>(addr - base_casted) % table_entry_size_);
return static_cast<int>(addr - base_casted) / table_entry_size_;
static_cast<int>(addr - base->area_start()) % table_entry_size_);
return static_cast<int>(addr - base->area_start()) / table_entry_size_;
}
@ -535,7 +515,7 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString());
FATAL("unable to find pc offset during deoptimization");
UNREACHABLE();
return -1;
}
@ -1397,45 +1377,31 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id) {
MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
ASSERT(!Serializer::enabled());
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
? data->eager_deoptimization_entry_code_entries_
: data->lazy_deoptimization_entry_code_entries_;
if (max_entry_id < entry_count) return;
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
while (max_entry_id >= entry_count) entry_count *= 2;
ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
ASSERT(static_cast<int>(table_size) >= desc.instr_size);
memory->Commit(memory->address(), table_size, true);
memcpy(memory->address(), desc.buffer, desc.instr_size);
CPU::FlushICache(memory->address(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
} else {
data->lazy_deoptimization_entry_code_entries_ = entry_count;
MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
EXECUTABLE,
NULL);
ASSERT(chunk->area_size() >= desc.instr_size);
if (chunk == NULL) {
V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
CPU::FlushICache(chunk->area_start(), desc.instr_size);
return chunk;
}

29
deps/v8/src/deoptimizer.h

@ -100,10 +100,8 @@ class DeoptimizerData {
#endif
private:
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
VirtualMemory* eager_deoptimization_entry_code_;
VirtualMemory* lazy_deoptimization_entry_code_;
MemoryChunk* eager_deoptimization_entry_code_;
MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -228,17 +226,7 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
enum GetEntryMode {
CALCULATE_ENTRY_ADDRESS,
ENSURE_ENTRY_CODE
};
static Address GetDeoptimizationEntry(
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id,
@ -295,11 +283,8 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
static size_t GetMaxDeoptTableSize();
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
static const int kNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@ -342,8 +327,7 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
static void EnsureCodeForDeoptimizationEntry(BailoutType type,
int max_entry_id);
static MemoryChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@ -537,6 +521,9 @@ class FrameDescription {
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
#endif
// Continuation is the PC where the execution continues after
// deoptimizing.

9
deps/v8/src/elements-kind.cc

@ -35,14 +35,9 @@ namespace v8 {
namespace internal {
const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
return accessor->name();
}
void PrintElementsKind(FILE* out, ElementsKind kind) {
PrintF(out, "%s", ElementsKindToString(kind));
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
PrintF(out, "%s", accessor->name());
}

8
deps/v8/src/elements-kind.h

@ -77,7 +77,6 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
@ -110,13 +109,6 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsFastDoubleElementsKind(kind) ||
kind == EXTERNAL_DOUBLE_ELEMENTS ||
kind == EXTERNAL_FLOAT_ELEMENTS;
}
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||

526
deps/v8/src/elements.cc

@ -146,36 +146,33 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
static void CopyObjectToObjectElements(FixedArrayBase* from_base,
void CopyObjectToObjectElements(FixedArray* from,
ElementsKind from_kind,
uint32_t from_start,
FixedArrayBase* to_base,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
AssertNoAllocation no_allocation;
ASSERT(to->map() != HEAP->fixed_cow_array_map());
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from_base->length() - from_start,
to_base->length() - to_start);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedArray* to = FixedArray::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@ -196,34 +193,31 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
}
static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
AssertNoAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
#ifdef DEBUG
// Fast object arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
int start = to_start + copy_size;
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT(to_base != from_base);
ASSERT(to != from);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
FixedArray* to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@ -250,9 +244,9 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
FixedArrayBase* from_base,
FixedDoubleArray* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@ -261,26 +255,21 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from_base->length() - from_start,
to_base->length() - to_start);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
#ifdef DEBUG
// FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
// marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
// Also initialize the area that will be copied over since HeapNumber
// allocation below can cause an incremental marking step, requiring all
// existing heap objects to be propertly initialized.
int start = to_start;
int length = to_base->length() - start;
if (length > 0) {
Heap* heap = from_base->GetHeap();
MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
heap->the_hole_value(), length);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return from_base;
FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
FixedArray* to = FixedArray::cast(to_base);
for (int i = to_start + copy_size; i < to->length(); ++i) {
ASSERT(to->get(i)->IsTheHole());
}
}
#endif
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
@ -309,28 +298,26 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
}
static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = Min(from_base->length() - from_start,
to_base->length() - to_start);
copy_size = Min(from->length() - from_start,
to->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
FixedDoubleArray::cast(to_base)->set_the_hole(i);
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@ -342,27 +329,25 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
}
static void CopySmiToDoubleElements(FixedArrayBase* from_base,
static void CopySmiToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from_base->length() - from_start;
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
FixedDoubleArray::cast(to_base)->set_the_hole(i);
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
@ -376,9 +361,9 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
static void CopyPackedSmiToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedDoubleArray* to,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
@ -387,55 +372,52 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = packed_size - from_start;
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
to_end = to_base->length();
for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
to_end = to->length();
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
ASSERT(static_cast<int>(to_end) <= to_base->length());
ASSERT(static_cast<int>(to_end) <= to->length());
ASSERT(packed_size >= 0 && packed_size <= copy_size);
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
ASSERT(!smi->IsTheHole());
to->set(to_start, Smi::cast(smi)->value());
}
while (to_start < to_end) {
to->set_the_hole(to_start++);
}
}
static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from_base->length() - from_start;
copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
FixedDoubleArray::cast(to_base)->set_the_hole(i);
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
(copy_size + static_cast<int>(from_start)) <= from_base->length());
ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
(copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
@ -449,25 +431,23 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
uint32_t from_start,
FixedArrayBase* to_base,
FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
for (int i = to_start + copy_size; i < to_base->length(); ++i) {
FixedDoubleArray::cast(to_base)->set_the_hole(i);
for (int i = to_start + copy_size; i < to->length(); ++i) {
to->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@ -523,8 +503,8 @@ class ElementsAccessorBase : public ElementsAccessor {
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
if (map == heap->one_pointer_filler_map() ||
map == heap->two_pointer_filler_map() ||
if (map == heap->raw_unchecked_one_pointer_filler_map() ||
map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
@ -547,9 +527,10 @@ class ElementsAccessorBase : public ElementsAccessor {
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store) != ABSENT;
BackingStore* backing_store) {
MaybeObject* element =
ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
return !element->IsTheHole();
}
virtual bool HasElement(Object* receiver,
@ -560,7 +541,7 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, backing_store);
receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
@ -571,95 +552,28 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetImpl(
receiver, holder, key, backing_store);
receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
BackingStore* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
? BackingStore::cast(backing_store)->get(key)
? backing_store->get(key)
: backing_store->GetHeap()->the_hole_value();
}
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store);
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
return ABSENT;
}
return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
}
MUST_USE_RESULT virtual PropertyType GetType(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetTypeImpl(
receiver, holder, key, backing_store);
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
return NONEXISTENT;
}
return BackingStore::cast(backing_store)->is_the_hole(key)
? NONEXISTENT : FIELD;
}
MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
if (backing_store == NULL) {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetAccessorPairImpl(
receiver, holder, key, backing_store);
}
MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
return NULL;
}
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
array, length, array->elements());
array, length, BackingStore::cast(array->elements()));
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
FixedArrayBase* backing_store);
BackingStore* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
@ -717,6 +631,9 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
}
if (from->length() == 0) {
return from;
}
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, packed_size, copy_size);
}
@ -737,22 +654,25 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = holder->elements();
}
BackingStore* backing_store = BackingStore::cast(from);
uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
if (len1 == 0) return to;
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, from)) {
receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
ElementsAccessorSubclass::GetImpl(receiver, holder,
key, backing_store);
Object* value;
if (!maybe_value->To(&value)) return maybe_value;
if (!maybe_value->ToObject(&value)) return maybe_value;
ASSERT(!value->IsTheHole());
if (!HasKey(to, value)) {
extra++;
@ -764,8 +684,9 @@ class ElementsAccessorBase : public ElementsAccessor {
// Allocate the result
FixedArray* result;
MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
if (!maybe_obj->To(&result)) return maybe_obj;
MaybeObject* maybe_obj =
backing_store->GetHeap()->AllocateFixedArray(len0 + extra);
if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj;
// Fill in the content
{
@ -781,13 +702,14 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key =
ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
receiver, holder, key, from)) {
receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
ElementsAccessorSubclass::GetImpl(receiver, holder,
key, backing_store);
Object* value;
if (!maybe_value->To(&value)) return maybe_value;
if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) {
result->set(len0 + index, value);
index++;
@ -799,22 +721,24 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
static uint32_t GetCapacityImpl(BackingStore* backing_store) {
return backing_store->length();
}
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
return ElementsAccessorSubclass::GetCapacityImpl(
BackingStore::cast(backing_store));
}
static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
uint32_t index) {
return index;
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) {
return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
return ElementsAccessorSubclass::GetKeyForIndexImpl(
BackingStore::cast(backing_store), index);
}
private:
@ -840,17 +764,17 @@ class FastElementsAccessor
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
JSArray* array,
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
bool same_or_smaller_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
bool same_size = old_length->IsSmi() &&
static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
if (!same_or_smaller_size && IsFastElementsKind(kind) &&
if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
@ -878,7 +802,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
BackingStore::cast(backing_store)->set_the_hole(i);
backing_store->set_the_hole(i);
}
}
return length_object;
@ -905,25 +829,14 @@ class FastElementsAccessor
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
Heap* heap = obj->GetHeap();
Object* elements = obj->elements();
if (elements == heap->empty_fixed_array()) {
return heap->true_value();
}
typename KindTraits::BackingStore* backing_store =
KindTraits::BackingStore::cast(elements);
bool is_non_strict_arguments_elements_map =
backing_store->map() == heap->non_strict_arguments_elements_map();
if (is_non_strict_arguments_elements_map) {
backing_store = KindTraits::BackingStore::cast(
KindTraits::BackingStore::cast(obj->elements());
Heap* heap = obj->GetHeap();
if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
backing_store =
KindTraits::BackingStore::cast(
FixedArray::cast(backing_store)->get(1));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
if (!is_non_strict_arguments_elements_map) {
} else {
ElementsKind kind = KindTraits::Kind;
if (IsFastPackedElementsKind(kind)) {
MaybeObject* transitioned =
@ -937,6 +850,11 @@ class FastElementsAccessor
backing_store = KindTraits::BackingStore::cast(writable);
}
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
backing_store->set_the_hole(key);
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
@ -972,11 +890,11 @@ class FastElementsAccessor
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
return !BackingStore::cast(backing_store)->is_the_hole(key);
return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
@ -1024,18 +942,25 @@ class FastSmiOrObjectElementsAccessor
int copy_size) {
if (IsFastSmiOrObjectElementsKind(to_kind)) {
CopyObjectToObjectElements(
from, KindTraits::Kind, from_start, to, to_kind, to_start, copy_size);
FixedArray::cast(from), KindTraits::Kind, from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) {
if (IsFastSmiElementsKind(KindTraits::Kind)) {
if (IsFastPackedElementsKind(KindTraits::Kind) &&
packed_size != kPackedSizeNotKnown) {
CopyPackedSmiToDoubleElements(
from, from_start, to, to_start, packed_size, copy_size);
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start,
packed_size, copy_size);
} else {
CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
CopySmiToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
}
} else {
CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
CopyObjectToDoubleElements(
FixedArray::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
}
} else {
UNREACHABLE();
@ -1139,10 +1064,13 @@ class FastDoubleElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
FixedDoubleArray::cast(to),
to_start, copy_size);
return from;
default:
UNREACHABLE();
@ -1201,37 +1129,17 @@ class ExternalElementsAccessor
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
BackingStore* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
? BackingStore::cast(backing_store)->get(key)
? backing_store->get(key)
: backing_store->GetHeap()->undefined_value();
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
? NONE : ABSENT;
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
? FIELD : NONEXISTENT;
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
FixedArrayBase* backing_store) {
BackingStore* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
@ -1247,7 +1155,7 @@ class ExternalElementsAccessor
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
BackingStore* backing_store) {
uint32_t capacity =
ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
return key < capacity;
@ -1356,11 +1264,10 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
FixedArrayBase* store,
SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Heap* heap = array->GetHeap();
int capacity = dict->Capacity();
uint32_t new_length = length;
@ -1470,12 +1377,14 @@ class DictionaryElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
from, from_start, to, to_kind, to_start, copy_size);
SeededNumberDictionary::cast(from), from_start,
FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
from, from_start, to, to_start, copy_size);
SeededNumberDictionary::cast(from), from_start,
FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
@ -1498,8 +1407,7 @@ class DictionaryElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* store) {
SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
SeededNumberDictionary* backing_store) {
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@ -1516,59 +1424,16 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(backing_store);
int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
return dictionary->DetailsAt(entry).attributes();
}
return ABSENT;
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* store) {
SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
return backing_store->DetailsAt(entry).type();
}
return NONEXISTENT;
}
MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* store) {
SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound &&
backing_store->DetailsAt(entry).type() == CALLBACKS &&
backing_store->ValueAt(entry)->IsAccessorPair()) {
return AccessorPair::cast(backing_store->ValueAt(entry));
}
return NULL;
}
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store) {
return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
SeededNumberDictionary* backing_store) {
return backing_store->FindEntry(key) !=
SeededNumberDictionary::kNotFound;
}
static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
uint32_t index) {
SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
}
@ -1591,8 +1456,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* parameters) {
FixedArray* parameter_map = FixedArray::cast(parameters);
FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
@ -1619,61 +1483,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* backing_store) {
FixedArray* parameter_map = FixedArray::cast(backing_store);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return NONE;
} else {
// If not aliased, check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ElementsAccessor::ForArray(arguments)->GetAttributes(
receiver, obj, key, arguments);
}
}
MUST_USE_RESULT static PropertyType GetTypeImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* parameters) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return FIELD;
} else {
// If not aliased, check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ElementsAccessor::ForArray(arguments)->GetType(
receiver, obj, key, arguments);
}
}
MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
FixedArrayBase* parameters) {
FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return NULL;
} else {
// If not aliased, check the arguments.
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
receiver, obj, key, arguments);
}
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
FixedArrayBase* parameter_map) {
FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
@ -1712,20 +1525,19 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
int packed_size,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return accessor->CopyElements(NULL, from_start, to, to_kind,
to_start, copy_size, arguments);
}
static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
FixedArray* parameter_map = FixedArray::cast(backing_store);
static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments));
}
static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
static uint32_t GetKeyForIndexImpl(FixedArray* dict,
uint32_t index) {
return index;
}
@ -1733,14 +1545,12 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* parameters) {
FixedArray* parameter_map = FixedArray::cast(parameters);
FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
FixedArrayBase* arguments =
FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
}
@ -1753,7 +1563,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
uint32_t length = holder->IsJSArray()
? Smi::cast(JSArray::cast(holder)->length())->value()
: parameter_map->length();
return key < (length - 2)
return key < (length - 2 )
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
@ -1820,7 +1630,7 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
FixedArrayBase* backing_store) {
typename ElementsKindTraits::BackingStore* backing_store) {
JSArray* array = JSArray::cast(obj);
// Fast case: The new length fits into a Smi.

43
deps/v8/src/elements.h

@ -71,39 +71,6 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's attributes, or ABSENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's type, or NONEXISTENT if there is no such
// element. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual PropertyType GetType(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Returns an element's accessors, or NULL if the element does not exist or
// is plain. This method doesn't iterate up the prototype chain. The caller
// can optionally pass in the backing store to use for the check, which must
// be compatible with the ElementsKind of the ElementsAccessor. If
// backing_store is NULL, the holder->elements() is used as the backing store.
MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
Object* receiver,
JSObject* holder,
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@ -197,6 +164,16 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
void CopyObjectToObjectElements(FixedArray* from_obj,
ElementsKind from_kind,
uint32_t from_start,
FixedArray* to_obj,
ElementsKind to_kind,
uint32_t to_start,
int copy_size);
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_

30
deps/v8/src/execution.cc

@ -211,9 +211,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
}
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@ -430,6 +427,25 @@ void StackGuard::TerminateExecution() {
}
bool StackGuard::IsRuntimeProfilerTick() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
}
void StackGuard::RequestRuntimeProfilerTick() {
// Ignore calls if we're not optimizing or if we can't get the lock.
if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
if (thread_local_.postpone_interrupts_nesting_ == 0) {
thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
isolate_->heap()->SetStackLimits();
}
ExecutionAccess::Unlock(isolate_);
}
}
void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) {
@ -921,14 +937,18 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
}
stack_guard->Continue(CODE_READY);
}
if (!stack_guard->IsTerminateExecution() &&
!FLAG_manual_parallel_recompilation) {
if (!stack_guard->IsTerminateExecution()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->counters()->stack_interrupts()->Increment();
// If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
if (FLAG_count_based_interrupts ||
stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
stack_guard->Continue(RUNTIME_PROFILER_TICK);
isolate->runtime_profiler()->OptimizeNow();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();

7
deps/v8/src/execution.h

@ -41,8 +41,9 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5,
CODE_READY = 1 << 6
RUNTIME_PROFILER_TICK = 1 << 5,
GC_REQUEST = 1 << 6,
CODE_READY = 1 << 7
};
@ -193,6 +194,8 @@ class StackGuard {
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
bool IsRuntimeProfilerTick();
void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT

5
deps/v8/src/extensions/externalize-string-extension.cc

@ -93,7 +93,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
if (string->IsAsciiRepresentation() && !force_two_byte) {
char* data = new char[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
@ -127,8 +127,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
}
return
Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
v8::True() : v8::False();
}

4
deps/v8/src/extensions/gc-extension.cc

@ -40,11 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
if (args[0]->BooleanValue()) {
HEAP->CollectGarbage(NEW_SPACE, "gc extension");
} else {
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
}
return v8::Undefined();
}

31
deps/v8/src/factory.cc

@ -178,7 +178,7 @@ Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
}
Handle<String> Factory::LookupAsciiSymbol(Handle<SeqOneByteString> string,
Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
int from,
int length) {
CALL_HEAP_FUNCTION(isolate(),
@ -200,7 +200,7 @@ Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
isolate()->heap()->AllocateStringFromAscii(string, pretenure),
String);
}
@ -222,12 +222,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
}
Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocateRawOneByteString(length, pretenure),
SeqOneByteString);
isolate()->heap()->AllocateRawAsciiString(length, pretenure),
SeqAsciiString);
}
@ -525,12 +525,6 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
int new_length) {
CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
}
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
@ -876,13 +870,6 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
}
Handle<JSObject> Factory::NewExternal(void* value) {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateExternal(value),
JSObject);
}
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
@ -950,9 +937,6 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
Handle<JSArray> Factory::NewJSArray(int capacity,
ElementsKind elements_kind,
PretenureFlag pretenure) {
if (capacity != 0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
@ -971,7 +955,6 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
elements->length(),
pretenure),
JSArray);
}
@ -1370,7 +1353,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Check to see whether there is a matching element in the cache.
Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache()));
Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
Handle<Map> map =
@ -1422,7 +1405,7 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
bool* pending_exception) {
// Configure the instance by adding the properties specified by the
// instance template.
Handle<Object> instance_template(desc->instance_template(), isolate());
Handle<Object> instance_template = Handle<Object>(desc->instance_template());
if (!instance_template->IsUndefined()) {
Execution::ConfigureInstance(instance,
instance_template,

9
deps/v8/src/factory.h

@ -82,7 +82,7 @@ class Factory {
Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str);
Handle<String> LookupAsciiSymbol(Vector<const char> str);
Handle<String> LookupAsciiSymbol(Handle<SeqOneByteString>,
Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
int from,
int length);
Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
@ -130,7 +130,7 @@ class Factory {
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
Handle<SeqOneByteString> NewRawOneByteString(
Handle<SeqAsciiString> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString(
@ -239,9 +239,6 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
int new_length);
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
@ -328,8 +325,6 @@ class Factory {
Handle<ScopeInfo> NewScopeInfo(int length);
Handle<JSObject> NewExternal(void* value);
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,

29
deps/v8/src/flag-definitions.h

@ -144,16 +144,12 @@ DEFINE_bool(harmony_modules, false,
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
DEFINE_bool(harmony_observation, false,
"enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony_modules, harmony_scoping)
DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@ -181,7 +177,6 @@ DEFINE_int(max_inlined_nodes, 196,
DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true,
"crankshaft harvests type feedback from stub cache")
@ -226,7 +221,7 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in "
"allow uint32 values on optimize frames if they are used only in"
"safe operations")
DEFINE_bool(parallel_recompilation, false,
@ -234,9 +229,6 @@ DEFINE_bool(parallel_recompilation, false,
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue")
DEFINE_bool(manual_parallel_recompilation, false,
"disable automatic optimization")
DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@ -247,6 +239,8 @@ DEFINE_bool(self_optimization, false,
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
DEFINE_bool(count_based_interrupts, false,
"trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
@ -262,6 +256,7 @@ DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
@ -394,12 +389,7 @@ DEFINE_bool(trace_external_memory, false,
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
"old code")
"flush code that we expect not to use again before full gc")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
@ -439,9 +429,6 @@ DEFINE_bool(incremental_code_compaction, true,
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
DEFINE_bool(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@ -655,14 +642,12 @@ DEFINE_bool(prof_lazy, false,
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
DEFINE_bool(log_timer_events, false,
"Time events including external callbacks.")
DEFINE_implication(log_timer_events, log_internal_timer_events)
//
// Disassembler only flags

4
deps/v8/src/frames.cc

@ -484,7 +484,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const {
return HEAP->js_entry_code();
return HEAP->raw_unchecked_js_entry_code();
}
@ -507,7 +507,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
return HEAP->js_construct_entry_code();
return HEAP->raw_unchecked_js_construct_entry_code();
}

208
deps/v8/src/full-codegen.cc

@ -86,10 +86,6 @@ void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
}
void BreakableStatementChecker::VisitBlock(Block* stmt) {
}
@ -470,8 +466,9 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
}
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a
// state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry, zone());
@ -585,137 +582,16 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
}
void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
ASSERT(scope_->is_global_scope());
for (int i = 0; i < declarations->length(); i++) {
ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
if (declaration != NULL) {
ModuleLiteral* module = declaration->module()->AsModuleLiteral();
if (module != NULL) {
Comment cmnt(masm_, "[ Link nested modules");
Scope* scope = module->body()->scope();
Interface* interface = scope->interface();
ASSERT(interface->IsModule() && interface->IsFrozen());
interface->Allocate(scope->module_var()->index());
// Set up module context.
ASSERT(scope->interface()->Index() >= 0);
__ Push(Smi::FromInt(scope->interface()->Index()));
__ Push(scope->GetScopeInfo());
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
AllocateModules(scope->declarations());
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
}
}
}
// Modules have their own local scope, represented by their own context.
// Module instance objects have an accessor for every export that forwards
// access to the respective slot from the module's context. (Exports that are
// modules themselves, however, are simple data properties.)
//
// All modules have a _hosting_ scope/context, which (currently) is the
// (innermost) enclosing global scope. To deal with recursion, nested modules
// are hosted by the same scope as global ones.
//
// For every (global or nested) module literal, the hosting context has an
// internal slot that points directly to the respective module context. This
// enables quick access to (statically resolved) module members by 2-dimensional
// access through the hosting context. For example,
//
// module A {
// let x;
// module B { let y; }
// }
// module C { let z; }
//
// allocates contexts as follows:
//
// [header| .A | .B | .C | A | C ] (global)
// | | |
// | | +-- [header| z ] (module)
// | |
// | +------- [header| y ] (module)
// |
// +------------ [header| x | B ] (module)
//
// Here, .A, .B, .C are the internal slots pointing to the hosted module
// contexts, whereas A, B, C hold the actual instance objects (note that every
// module context also points to the respective instance object through its
// extension slot in the header).
//
// To deal with arbitrary recursion and aliases between modules,
// they are created and initialized in several stages. Each stage applies to
// all modules in the hosting global scope, including nested ones.
//
// 1. Allocate: for each module _literal_, allocate the module contexts and
// respective instance object and wire them up. This happens in the
// PushModuleContext runtime function, as generated by AllocateModules
// (invoked by VisitDeclarations in the hosting scope).
//
// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
// assign the respective instance object to respective local variables. This
// happens in VisitModuleDeclaration, and uses the instance objects created
// in the previous stage.
// For each module _literal_, this phase also constructs a module descriptor
// for the next stage. This happens in VisitModuleLiteral.
//
// 3. Populate: invoke the DeclareModules runtime function to populate each
// _instance_ object with accessors for it exports. This is generated by
// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
// and uses the descriptors generated in the previous stage.
//
// 4. Initialize: execute the module bodies (and other code) in sequence. This
// happens by the separate statements generated for module bodies. To reenter
// the module scopes properly, the parser inserted ModuleStatements.
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
Handle<FixedArray> saved_modules = modules_;
int saved_module_index = module_index_;
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
if (scope_->num_modules() != 0) {
// This is a scope hosting modules. Allocate a descriptor array to pass
// to the runtime for initialization.
Comment cmnt(masm_, "[ Allocate modules");
ASSERT(scope_->is_global_scope());
modules_ =
isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
module_index_ = 0;
// Generate code for allocating all modules, including nested ones.
// The allocated contexts are stored in internal variables in this scope.
AllocateModules(declarations);
}
AstVisitor::VisitDeclarations(declarations);
if (scope_->num_modules() != 0) {
// Initialize modules from descriptor array.
ASSERT(module_index_ == modules_->length());
DeclareModules(modules_);
modules_ = saved_modules;
module_index_ = saved_module_index;
}
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
// declaration of the global functions and variables.
// declaration the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
@ -728,23 +604,19 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
// Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
Interface* interface = scope_->interface();
Interface* interface = module->interface();
Handle<JSModule> instance = interface->Instance();
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
ASSERT(!modules_.is_null());
ASSERT(module_index_ < modules_->length());
int index = module_index_++;
// Set up module context.
ASSERT(interface->Index() >= 0);
__ Push(Smi::FromInt(interface->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
__ Push(instance);
__ CallRuntime(Runtime::kPushModuleContext, 1);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@ -752,11 +624,6 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
VisitDeclarations(scope_->declarations());
}
// Populate the module description.
Handle<ModuleInfo> description =
ModuleInfo::Create(isolate(), interface, scope_);
modules_->set(index, *description);
scope_ = saved_scope;
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@ -777,20 +644,8 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) {
}
void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
// TODO(rossberg): dummy allocation for now.
Scope* scope = module->body()->scope();
Interface* interface = scope_->interface();
ASSERT(interface->IsModule() && interface->IsFrozen());
ASSERT(!modules_.is_null());
ASSERT(module_index_ < modules_->length());
interface->Allocate(scope->module_var()->index());
int index = module_index_++;
Handle<ModuleInfo> description =
ModuleInfo::Create(isolate(), interface, scope_);
modules_->set(index, *description);
void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
// TODO(rossberg)
}
@ -1049,10 +904,19 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
scope_ = stmt->scope();
ASSERT(!scope_->is_module_scope());
if (scope_->is_module_scope()) {
// If this block is a module body, then we have already allocated and
// initialized the declarations earlier. Just push the context.
ASSERT(!scope_->interface()->Instance().is_null());
__ Push(scope_->interface()->Instance());
__ CallRuntime(Runtime::kPushModuleContext, 1);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
} else {
{ Comment cmnt(masm_, "[ Extend block context");
Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
int heap_slots =
scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
__ Push(scope_info);
PushFunctionArgumentForContextAllocation();
if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
@ -1070,7 +934,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
VisitDeclarations(scope_->declarations());
}
}
}
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
scope_ = saved_scope;
@ -1087,26 +951,6 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
Comment cmnt(masm_, "[ Module context");
__ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
__ Push(Smi::FromInt(0));
__ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(
StandardFrameConstants::kContextOffset, context_register());
Scope* saved_scope = scope_;
scope_ = stmt->body()->scope();
VisitStatements(stmt->body()->statements());
scope_ = saved_scope;
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
// Update local stack frame context field.
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@ -1267,7 +1111,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
EmitBackEdgeBookkeeping(stmt, &body);
EmitStackCheck(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@ -1296,7 +1140,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
EmitBackEdgeBookkeeping(stmt, &body);
EmitStackCheck(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
@ -1342,7 +1186,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
EmitBackEdgeBookkeeping(stmt, &body);
EmitStackCheck(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {

19
deps/v8/src/full-codegen.h

@ -396,15 +396,9 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareModules(Handle<FixedArray> descriptions);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
// Generate code to allocate all (including nested) modules and contexts.
// Because of recursive linking and the presence of module alias declarations,
// this has to be a separate pass _before_ populating or executing any module.
void AllocateModules(ZoneList<Declaration*>* declarations);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
@ -448,13 +442,14 @@ class FullCodeGenerator: public AstVisitor {
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
// Platform-specific code for checking the stack limit at the back edge of
// a loop.
// This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
void EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target);
// Record the OSR AST id corresponding to a back edge in the code.
void RecordBackEdge(BailoutId osr_ast_id);
void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code.
void RecordStackCheck(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@ -809,12 +804,8 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
ZoneList<Handle<Object> >* globals_;
Handle<FixedArray> modules_;
int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
// TODO(svenpanne) Rename this to something like back_edges_ and rename
// related functions accordingly.
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;

91
deps/v8/src/global-handles.cc

@ -69,7 +69,6 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
independent_ = false;
partially_dependent_ = false;
in_new_space_list_ = false;
parameter_or_next_free_.next_free = NULL;
callback_ = NULL;
@ -90,7 +89,6 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
independent_ = false;
partially_dependent_ = false;
state_ = NORMAL;
parameter_or_next_free_.parameter = NULL;
callback_ = NULL;
@ -156,15 +154,6 @@ class GlobalHandles::Node {
}
bool is_independent() const { return independent_; }
void MarkPartiallyDependent(GlobalHandles* global_handles) {
ASSERT(state_ != FREE);
if (global_handles->isolate()->heap()->InNewSpace(object_)) {
partially_dependent_ = true;
}
}
bool is_partially_dependent() const { return partially_dependent_; }
void clear_partially_dependent() { partially_dependent_ = false; }
// In-new-space-list flag accessors.
void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
bool is_in_new_space_list() const { return in_new_space_list_; }
@ -271,7 +260,6 @@ class GlobalHandles::Node {
State state_ : 4;
bool independent_ : 1;
bool partially_dependent_ : 1;
bool in_new_space_list_ : 1;
// Handle specific callback.
@ -460,11 +448,6 @@ void GlobalHandles::MarkIndependent(Object** location) {
}
void GlobalHandles::MarkPartiallyDependent(Object** location) {
Node::FromLocation(location)->MarkPartiallyDependent(this);
}
bool GlobalHandles::IsIndependent(Object** location) {
return Node::FromLocation(location)->is_independent();
}
@ -518,8 +501,7 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && !node->is_independent() &&
!node->is_partially_dependent())) {
(node->IsWeakRetainer() && !node->is_independent())) {
v->VisitPointer(node->location());
}
}
@ -531,8 +513,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeak() && f(isolate_->heap(), node->location())) {
if (node->is_independent() && node->IsWeak() &&
f(isolate_->heap(), node->location())) {
node->MarkPending();
}
}
@ -543,61 +525,15 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeakRetainer()) {
if (node->is_independent() && node->IsWeakRetainer()) {
v->VisitPointer(node->location());
}
}
}
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
WeakSlotCallbackWithHeap can_skip) {
int last = 0;
bool any_group_was_visited = false;
for (int i = 0; i < object_groups_.length(); i++) {
ObjectGroup* entry = object_groups_.at(i);
ASSERT(entry != NULL);
Object*** objects = entry->objects_;
bool group_should_be_visited = false;
for (size_t j = 0; j < entry->length_; j++) {
Object* object = *objects[j];
if (object->IsHeapObject()) {
if (!can_skip(isolate_->heap(), &object)) {
group_should_be_visited = true;
break;
}
}
}
if (!group_should_be_visited) {
object_groups_[last++] = entry;
continue;
}
// An object in the group requires visiting, so iterate over all
// objects in the group.
for (size_t j = 0; j < entry->length_; ++j) {
Object* object = *objects[j];
if (object->IsHeapObject()) {
v->VisitPointer(&object);
any_group_was_visited = true;
}
}
// Once the entire group has been iterated over, set the object
// group to NULL so it won't be processed again.
entry->Dispose();
object_groups_.at(i) = NULL;
}
object_groups_.Rewind(last);
return any_group_was_visited;
}
bool GlobalHandles::PostGarbageCollectionProcessing(
GarbageCollector collector, GCTracer* tracer) {
GarbageCollector collector) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
@ -611,10 +547,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
if (!node->is_independent() && !node->is_partially_dependent()) {
continue;
}
node->clear_partially_dependent();
if (!node->is_independent()) continue;
if (node->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
@ -630,7 +563,6 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@ -647,17 +579,10 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
if (node->IsRetainer()) {
if (isolate_->heap()->InNewSpace(node->object())) {
if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
new_space_nodes_[last++] = node;
tracer->increment_nodes_copied_in_new_space();
} else {
node->set_in_new_space_list(false);
tracer->increment_nodes_promoted();
}
} else {
node->set_in_new_space_list(false);
tracer->increment_nodes_died_in_new_space();
}
}
new_space_nodes_.Rewind(last);
@ -685,7 +610,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
v->VisitEmbedderReference(it.node()->location(),
it.node()->wrapper_class_id());
}

20
deps/v8/src/global-handles.h

@ -155,9 +155,6 @@ class GlobalHandles {
// Clear the weakness of a global handle.
void MarkIndependent(Object** location);
// Mark the reference to this object externaly unreachable.
void MarkPartiallyDependent(Object** location);
static bool IsIndependent(Object** location);
// Tells whether global handle is near death.
@ -168,8 +165,7 @@ class GlobalHandles {
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
bool PostGarbageCollectionProcessing(GarbageCollector collector,
GCTracer* tracer);
bool PostGarbageCollectionProcessing(GarbageCollector collector);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@ -199,22 +195,16 @@ class GlobalHandles {
// Iterates over strong and dependent handles. See the node above.
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
// Finds weak independent or partially independent handles satisfying
// the callback predicate and marks them as pending. See the note above.
// Finds weak independent handles satisfying the callback predicate
// and marks them as pending. See the note above.
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
// Iterates over weak independent or partially independent handles.
// See the note above.
// Iterates over weak independent handles. See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
// Iterate over objects in object groups that have at least one object
// which requires visiting. The callback has to return true if objects
// can be skipped and false otherwise.
bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
// Add an object group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a garbage collection.
// All groups are destroyed after a mark-compact collection.
void AddObjectGroup(Object*** handles,
size_t length,
v8::RetainedObjectInfo* info);

29
deps/v8/src/handles.cc

@ -229,12 +229,12 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
}
Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate,
Runtime::SetObjectProperty(
@ -593,25 +593,6 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
}
Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
Handle<String> name_or_source_url_key =
isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
Handle<JSValue> script_wrapper = GetScriptWrapper(script);
Handle<Object> property = GetProperty(script_wrapper,
name_or_source_url_key);
ASSERT(property->IsJSFunction());
Handle<JSFunction> method = Handle<JSFunction>::cast(property);
bool caught_exception;
Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
NULL, &caught_exception);
if (caught_exception) {
result = isolate->factory()->undefined_value();
}
return result;
}
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
@ -915,7 +896,7 @@ int Utf8LengthHelper(String* input,
int total = 0;
bool dummy;
while (true) {
if (input->IsOneByteRepresentation()) {
if (input->IsAsciiRepresentation()) {
*starts_with_surrogate = false;
return total + to - from;
}
@ -948,14 +929,14 @@ int Utf8LengthHelper(String* input,
} else {
if (first_length > from) {
// Left hand side is shorter.
if (first->IsOneByteRepresentation()) {
if (first->IsAsciiRepresentation()) {
total += first_length - from;
*starts_with_surrogate = false;
starts_with_surrogate = &dummy;
input = second;
from = 0;
to -= first_length;
} else if (second->IsOneByteRepresentation()) {
} else if (second->IsAsciiRepresentation()) {
followed_by_surrogate = false;
total += to - first_length;
input = first;

11
deps/v8/src/handles.h

@ -95,13 +95,6 @@ class Handle {
};
// Convenience wrapper.
template<class T>
inline Handle<T> handle(T* t, Isolate* isolate) {
return Handle<T>(t, isolate);
}
class DeferredHandles;
class HandleScopeImplementer;
@ -216,8 +209,7 @@ Handle<String> FlattenGetString(Handle<String> str);
int Utf8Length(Handle<String> str);
Handle<Object> SetProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
@ -268,7 +260,6 @@ int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
int GetScriptColumnNumber(Handle<Script> script, int code_position);
Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.

14
deps/v8/src/heap-inl.h

@ -91,7 +91,7 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
return AllocateStringFromOneByte(str, pretenure);
return AllocateStringFromAscii(str, pretenure);
}
// Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
@ -109,12 +109,12 @@ MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
uint32_t hash_field) {
if (str.length() > SeqOneByteString::kMaxLength) {
if (str.length() > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Compute map and object size.
Map* map = ascii_symbol_map();
int size = SeqOneByteString::SizeFor(str.length());
int size = SeqAsciiString::SizeFor(str.length());
// Allocate string.
Object* result;
@ -134,7 +134,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
memcpy(answer->address() + SeqOneByteString::kHeaderSize,
memcpy(answer->address() + SeqAsciiString::kHeaderSize,
str.start(), str.length());
return answer;
@ -460,7 +460,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes > 0) {
if (change_in_bytes >= 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
@ -607,7 +607,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
@ -617,7 +617,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));

35
deps/v8/src/heap-profiler.cc

@ -65,29 +65,23 @@ void HeapProfiler::TearDown() {
}
HeapSnapshot* HeapProfiler::TakeSnapshot(
const char* name,
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
v8::ActivityControl* control) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
control);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(
String* name,
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
v8::ActivityControl* control) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
control,
resolver);
control);
}
@ -128,18 +122,16 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
const char* name,
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
v8::ActivityControl* control) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
HeapSnapshotGenerator generator(result, control, resolver);
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
}
@ -155,13 +147,10 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
String* name,
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
resolver);
v8::ActivityControl* control) {
return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
}
void HeapProfiler::StartHeapObjectsTrackingImpl() {

24
deps/v8/src/heap-profiler.h

@ -51,16 +51,12 @@ class HeapProfiler {
static size_t GetMemorySizeUsedByProfiler();
static HeapSnapshot* TakeSnapshot(
const char* name,
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
static HeapSnapshot* TakeSnapshot(
String* name,
v8::ActivityControl* control);
static HeapSnapshot* TakeSnapshot(String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
v8::ActivityControl* control);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
@ -85,16 +81,12 @@ class HeapProfiler {
private:
HeapProfiler();
~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(
const char* name,
HeapSnapshot* TakeSnapshotImpl(const char* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
HeapSnapshot* TakeSnapshotImpl(
String* name,
v8::ActivityControl* control);
HeapSnapshot* TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
v8::ActivityControl* control);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();

433
deps/v8/src/heap.cc

@ -117,6 +117,7 @@ Heap::Heap()
allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
debug_utils_(NULL),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
@ -136,7 +137,6 @@ Heap::Heap()
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
low_survival_rate_period_length_(0),
survival_rate_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
@ -212,20 +212,6 @@ intptr_t Heap::CommittedMemory() {
lo_space_->Size();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedPhysicalMemory() +
old_pointer_space_->CommittedPhysicalMemory() +
old_data_space_->CommittedPhysicalMemory() +
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
cell_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
@ -420,10 +406,6 @@ void Heap::GarbageCollectionPrologue() {
gc_count_++;
unflattened_strings_length_ = 0;
if (FLAG_flush_code && FLAG_flush_code_incrementally) {
mark_compact_collector()->EnableCodeFlushing(true);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@ -615,7 +597,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR &&
!mark_compact_collector()->abort_incremental_marking() &&
!mark_compact_collector()->abort_incremental_marking_ &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
@ -643,25 +625,25 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
{
HistogramTimerScope histogram_timer_scope(
(collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
: isolate_->counters()->gc_compactor());
HistogramTimer* rate = (collector == SCAVENGER)
? isolate_->counters()->gc_scavenger()
: isolate_->counters()->gc_compactor();
rate->Start();
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, &tracer);
}
rate->Stop();
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
// This can do debug callbacks and restart incremental marking.
GarbageCollectionEpilogue();
}
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
incremental_marking()->IsStopped() &&
incremental_marking()->WorthActivating() &&
NextGCIsLikelyToBeFull()) {
if (incremental_marking()->IsStopped()) {
if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
incremental_marking()->Start();
}
}
return next_gc_likely_to_collect_more;
}
@ -957,16 +939,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
isolate_->counters()->objs_since_last_young()->Set(0);
// Callbacks that fire after this point might trigger nested GCs and
// restart incremental marking, the assertion can't be moved down.
ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
isolate_->global_handles()->PostGarbageCollectionProcessing(
collector, tracer);
isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
}
gc_post_processing_depth_--;
@ -1328,23 +1305,10 @@ void Heap::Scavenge() {
}
}
// Copy objects reachable from the code flushing candidates list.
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
}
// Scavenge object reachable from the native contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
while (isolate()->global_handles()->IterateObjectGroups(
&scavenge_visitor, &IsUnscavengedHeapObject)) {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
isolate()->global_handles()->RemoveObjectGroups();
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
@ -1584,40 +1548,13 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
// Both the external string table and the symbol table may contain
// external strings, but neither lists them exhaustively, nor is the
// intersection set empty. Therefore we iterate over the external string
// table first, ignoring symbols, and then over the symbol table.
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
class VisitorAdapter : public ObjectVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
// Visit non-symbol external strings,
// since symbols are listed in the symbol table.
if (!(*p)->IsSymbol()) {
ASSERT((*p)->IsExternalString());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
}
}
private:
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
external_string_table_.Iterate(&external_string_table_visitor);
class SymbolTableVisitorAdapter : public ObjectVisitor {
public:
explicit SymbolTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
ASSERT((*p)->IsSymbol());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
@ -1625,9 +1562,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
private:
v8::ExternalResourceVisitor* visitor_;
} symbol_table_visitor(visitor);
symbol_table()->IterateElements(&symbol_table_visitor);
} visitor_adapter(visitor);
external_string_table_.Iterate(&visitor_adapter);
}
@ -1724,7 +1660,7 @@ template<MarksHandling marks_handling,
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
@ -1968,11 +1904,11 @@ class ScavengingVisitor : public StaticVisitorBase {
}
static inline void EvacuateSeqOneByteString(Map* map,
static inline void EvacuateSeqAsciiString(Map* map,
HeapObject** slot,
HeapObject* object) {
int object_size = SeqOneByteString::cast(object)->
SeqOneByteStringSize(map->instance_type());
int object_size = SeqAsciiString::cast(object)->
SeqAsciiStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@ -2539,14 +2475,6 @@ bool Heap::CreateInitialMaps() {
}
set_message_object_map(Map::cast(obj));
Map* external_map;
{ MaybeObject* maybe_obj =
AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
if (!maybe_obj->To(&external_map)) return false;
}
external_map->set_is_extensible(false);
set_external_map(external_map);
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@ -2765,7 +2693,7 @@ bool Heap::CreateInitialObjects() {
set_termination_exception(obj);
// Allocate the empty string.
{ MaybeObject* maybe_obj = AllocateRawOneByteString(0, TENURED);
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_string(String::cast(obj));
@ -2864,15 +2792,6 @@ bool Heap::CreateInitialObjects() {
}
set_natives_source_cache(FixedArray::cast(obj));
// Allocate object to hold object observation state.
{ MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
{ MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
if (!maybe_obj->ToObject(&obj)) return false;
}
set_observation_state(JSObject::cast(obj));
// Handling of script id generation is in FACTORY->NewScript.
set_last_script_id(undefined_value());
@ -2892,34 +2811,6 @@ bool Heap::CreateInitialObjects() {
}
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
RootListIndex writable_roots[] = {
kStoreBufferTopRootIndex,
kStackLimitRootIndex,
kInstanceofCacheFunctionRootIndex,
kInstanceofCacheMapRootIndex,
kInstanceofCacheAnswerRootIndex,
kCodeStubsRootIndex,
kNonMonomorphicCacheRootIndex,
kPolymorphicCodeCacheRootIndex,
kLastScriptIdRootIndex,
kEmptyScriptRootIndex,
kRealStackLimitRootIndex,
kArgumentsAdaptorDeoptPCOffsetRootIndex,
kConstructStubDeoptPCOffsetRootIndex,
kGetterStubDeoptPCOffsetRootIndex,
kSetterStubDeoptPCOffsetRootIndex,
kSymbolTableRootIndex,
};
for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
if (root_index == writable_roots[i])
return true;
}
return false;
}
Object* RegExpResultsCache::Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
@ -3137,7 +3028,7 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@ -3311,10 +3202,10 @@ MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
} else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
Object* result;
{ MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
{ MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
char* dest = SeqOneByteString::cast(result)->GetChars();
char* dest = SeqAsciiString::cast(result)->GetChars();
dest[0] = c1;
dest[1] = c2;
return result;
@ -3353,8 +3244,8 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
return MakeOrFindTwoCharacterString(this, c1, c2);
}
bool first_is_ascii = first->IsOneByteRepresentation();
bool second_is_ascii = second->IsOneByteRepresentation();
bool first_is_ascii = first->IsAsciiRepresentation();
bool second_is_ascii = second->IsAsciiRepresentation();
bool is_ascii = first_is_ascii && second_is_ascii;
// Make sure that an out of memory exception is thrown if the length
@ -3384,35 +3275,35 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
ASSERT(second->IsFlat());
if (is_ascii) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawOneByteString(length);
{ MaybeObject* maybe_result = AllocateRawAsciiString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
char* dest = SeqOneByteString::cast(result)->GetChars();
char* dest = SeqAsciiString::cast(result)->GetChars();
// Copy first part.
const char* src;
if (first->IsExternalString()) {
src = ExternalAsciiString::cast(first)->GetChars();
} else {
src = SeqOneByteString::cast(first)->GetChars();
src = SeqAsciiString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
src = ExternalAsciiString::cast(second)->GetChars();
} else {
src = SeqOneByteString::cast(second)->GetChars();
src = SeqAsciiString::cast(second)->GetChars();
}
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
if (is_ascii_data_in_two_byte_string) {
Object* result;
{ MaybeObject* maybe_result = AllocateRawOneByteString(length);
{ MaybeObject* maybe_result = AllocateRawAsciiString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
char* dest = SeqOneByteString::cast(result)->GetChars();
char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
@ -3479,17 +3370,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// WriteToFlat takes care of the case when an indirect string has a
// different encoding from its underlying string. These encodings may
// differ because of externalization.
bool is_ascii = buffer->IsOneByteRepresentation();
bool is_ascii = buffer->IsAsciiRepresentation();
{ MaybeObject* maybe_result = is_ascii
? AllocateRawOneByteString(length, pretenure)
? AllocateRawAsciiString(length, pretenure)
: AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
if (is_ascii) {
ASSERT(string_result->IsOneByteRepresentation());
char* dest = SeqOneByteString::cast(string_result)->GetChars();
ASSERT(string_result->IsAsciiRepresentation());
char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
@ -3513,7 +3404,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// indirect ASCII string is pointing to a two-byte string, the two-byte char
// codes of the underlying string must still fit into ASCII (because
// externalization must not change char codes).
{ Map* map = buffer->IsOneByteRepresentation()
{ Map* map = buffer->IsAsciiRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@ -3748,11 +3639,10 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
code->set_prologue_offset(kPrologueOffsetNotSet);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@ -4143,7 +4033,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
ASSERT(JSObject::cast(obj)->HasFastElements());
ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
return obj;
}
@ -4193,6 +4083,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@ -4205,7 +4098,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
if (IsFastDoubleElementsKind(elements_kind)) {
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@ -4232,14 +4125,13 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
array->set_length(Smi::FromInt(length));
array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array;
}
@ -4517,7 +4409,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
}
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
@ -4525,12 +4417,12 @@ MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
}
Object* result;
{ MaybeObject* maybe_result =
AllocateRawOneByteString(string.length(), pretenure);
AllocateRawAsciiString(string.length(), pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
CopyChars(SeqOneByteString::cast(result)->GetChars(), string.start(), length);
CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
return result;
}
@ -4583,9 +4475,9 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
const uc16* start = string.start();
if (String::IsAscii(start, length)) {
MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
} else { // It's not an ASCII string.
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -4640,11 +4532,11 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map;
if (is_ascii) {
if (chars > SeqOneByteString::kMaxLength) {
if (chars > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
map = ascii_symbol_map();
size = SeqOneByteString::SizeFor(chars);
size = SeqAsciiString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
@ -4684,14 +4576,13 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
}
MaybeObject* Heap::AllocateRawOneByteString(int length,
PretenureFlag pretenure) {
if (length < 0 || length > SeqOneByteString::kMaxLength) {
MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
if (length < 0 || length > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
int size = SeqOneByteString::SizeFor(length);
ASSERT(size <= SeqOneByteString::kMaxSize);
int size = SeqAsciiString::SizeFor(length);
ASSERT(size <= SeqAsciiString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
@ -4723,7 +4614,7 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
char* dest = SeqOneByteString::cast(result)->GetChars();
char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif
@ -5099,7 +4990,7 @@ MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
// Instance link will be set later.
// Context links will be set later.
context->set_extension(Smi::FromInt(0));
return context;
}
@ -5186,20 +5077,6 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
}
MaybeObject* Heap::AllocateExternal(void* value) {
Foreign* foreign;
{ MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
if (!maybe_result->To(&foreign)) return maybe_result;
}
JSObject* external;
{ MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
if (!maybe_result->To(&external)) return maybe_result;
}
external->SetInternalField(0, foreign);
return external;
}
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
@ -5289,6 +5166,10 @@ bool Heap::IdleNotification(int hint) {
AdvanceIdleIncrementalMarking(step_size);
contexts_disposed_ = 0;
}
// Make sure that we have no pending context disposals.
// Take into account that we might have decided to delay full collection
// because incremental marking is in progress.
ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
@ -5580,7 +5461,7 @@ MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
}
MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
int from,
int length) {
Object* symbol = NULL;
@ -5639,7 +5520,6 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
return symbol_table()->LookupSymbolIfExists(string, symbol);
}
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@ -6081,6 +5961,172 @@ intptr_t Heap::PromotedExternalMemorySize() {
- amount_of_external_allocated_memory_at_last_global_gc_;
}
#ifdef DEBUG
// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
static const int kMarkTag = 2;
class HeapDebugUtils {
public:
explicit HeapDebugUtils(Heap* heap)
: search_for_any_global_(false),
search_target_(NULL),
found_target_(false),
object_stack_(20),
heap_(heap) {
}
class MarkObjectVisitor : public ObjectVisitor {
public:
explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
utils_->MarkObjectRecursively(p);
}
}
HeapDebugUtils* utils_;
};
void MarkObjectRecursively(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Object* map = obj->map();
if (!map->IsHeapObject()) return; // visited before
if (found_target_) return; // stop if target found
object_stack_.Add(obj);
if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
(!search_for_any_global_ && (obj == search_target_))) {
found_target_ = true;
return;
}
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
Address map_addr = map_p->address();
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
MarkObjectRecursively(&map);
MarkObjectVisitor mark_visitor(this);
obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
&mark_visitor);
if (!found_target_) // don't pop if found the target
object_stack_.RemoveLast();
}
class UnmarkObjectVisitor : public ObjectVisitor {
public:
explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
utils_->UnmarkObjectRecursively(p);
}
}
HeapDebugUtils* utils_;
};
void UnmarkObjectRecursively(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Object* map = obj->map();
if (map->IsHeapObject()) return; // unmarked already
Address map_addr = reinterpret_cast<Address>(map);
map_addr -= kMarkTag;
ASSERT_TAG_ALIGNED(map_addr);
HeapObject* map_p = HeapObject::FromAddress(map_addr);
obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
UnmarkObjectVisitor unmark_visitor(this);
obj->IterateBody(Map::cast(map_p)->instance_type(),
obj->SizeFromMap(Map::cast(map_p)),
&unmark_visitor);
}
void MarkRootObjectRecursively(Object** root) {
if (search_for_any_global_) {
ASSERT(search_target_ == NULL);
} else {
ASSERT(search_target_->IsHeapObject());
}
found_target_ = false;
object_stack_.Clear();
MarkObjectRecursively(root);
UnmarkObjectRecursively(root);
if (found_target_) {
PrintF("=====================================\n");
PrintF("==== Path to object ====\n");
PrintF("=====================================\n\n");
ASSERT(!object_stack_.is_empty());
for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n");
Object* obj = object_stack_[i];
obj->Print();
}
PrintF("=====================================\n");
}
}
// Helper class for visiting HeapObjects recursively.
class MarkRootVisitor: public ObjectVisitor {
public:
explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
utils_->MarkRootObjectRecursively(p);
}
}
HeapDebugUtils* utils_;
};
bool search_for_any_global_;
Object* search_target_;
bool found_target_;
List<Object*> object_stack_;
Heap* heap_;
friend class Heap;
};
#endif
V8_DECLARE_ONCE(initialize_gc_once);
@ -6093,6 +6139,7 @@ static void InitializeGCOnce() {
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
debug_utils_ = new HeapDebugUtils(this);
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@ -6287,6 +6334,11 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
#ifdef DEBUG
delete debug_utils_;
debug_utils_ = NULL;
#endif
}
@ -6865,9 +6917,6 @@ GCTracer::GCTracer(Heap* heap,
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
heap_(heap),
gc_reason_(gc_reason),
collector_reason_(collector_reason) {
@ -7008,9 +7057,6 @@ GCTracer::~GCTracer() {
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", nodes_promoted_);
if (collector_ == SCAVENGER) {
PrintF("stepscount=%d ", steps_count_since_last_gc_);
@ -7018,7 +7064,6 @@ GCTracer::~GCTracer() {
} else {
PrintF("stepscount=%d ", steps_count_);
PrintF("stepstook=%d ", static_cast<int>(steps_took_));
PrintF("longeststep=%.f ", longest_step_);
}
PrintF("\n");
@ -7139,7 +7184,7 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
if (new_space_strings_[i] == heap_->the_hole_value()) {
if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
continue;
}
if (heap_->InNewSpace(new_space_strings_[i])) {
@ -7151,7 +7196,7 @@ void ExternalStringTable::CleanUp() {
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i] == heap_->the_hole_value()) {
if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
continue;
}
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));

58
deps/v8/src/heap.h

@ -154,9 +154,7 @@ namespace internal {
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
V(JSObject, observation_state, ObservationState) \
V(Map, external_map, ExternalMap)
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@ -286,6 +284,14 @@ class StoreBufferRebuilder {
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
#ifdef DEBUG
class HeapDebugUtils;
#endif
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
@ -481,9 +487,6 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
// Returns the amount of phyical memory currently committed for the heap.
size_t CommittedPhysicalMemory();
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@ -576,7 +579,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
@ -659,9 +661,6 @@ class Heap {
// Allocates a serialized scope info.
MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
// Allocates an External object for v8's external API.
MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@ -698,7 +697,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
@ -742,7 +741,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
@ -1037,8 +1036,9 @@ class Heap {
return LookupSymbol(CStrVector(str));
}
MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(
Handle<SeqOneByteString> string, int from, int length);
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
int from,
int length);
bool LookupSymbolIfExists(String* str, String** symbol);
bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@ -1448,10 +1448,6 @@ class Heap {
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
// Generated code can embed direct references to non-writable roots if
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
@ -1785,6 +1781,8 @@ class Heap {
// Do we expect to be able to handle allocation failure at this
// time?
bool disallow_allocation_failure_;
HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
@ -1901,6 +1899,7 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
@ -1927,7 +1926,7 @@ class Heap {
void CreateFixedStubs();
MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
MaybeObject* CreateOddball(const char* to_string,
Object* to_number,
byte kind);
@ -2549,18 +2548,6 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size;
}
void increment_nodes_died_in_new_space() {
nodes_died_in_new_space_++;
}
void increment_nodes_copied_in_new_space() {
nodes_copied_in_new_space_++;
}
void increment_nodes_promoted() {
nodes_promoted_++;
}
private:
// Returns a string matching the collector.
const char* CollectorString();
@ -2605,15 +2592,6 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
// Number of died nodes in the new space.
int nodes_died_in_new_space_;
// Number of copied nodes to the new space.
int nodes_copied_in_new_space_;
// Number of promoted nodes to the old space.
int nodes_promoted_;
// Incremental marking steps counters.
int steps_count_;
double steps_took_;

492
deps/v8/src/hydrogen-instructions.cc

@ -85,81 +85,6 @@ void HValue::AssumeRepresentation(Representation r) {
}
void HValue::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
}
Representation HValue::RepresentationFromUses() {
if (HasNoUses()) return Representation::None();
// Array of use counts for each representation.
int use_count[Representation::kNumRepresentations] = { 0 };
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
Representation rep = use->observed_input_representation(it.index());
if (rep.IsNone()) continue;
if (FLAG_trace_representation) {
PrintF("#%d %s is used by #%d %s as %s%s\n",
id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
(use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
}
use_count[rep.kind()] += use->LoopWeight();
}
if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
int tagged_count = use_count[Representation::kTagged];
int double_count = use_count[Representation::kDouble];
int int32_count = use_count[Representation::kInteger32];
if (tagged_count > 0) return Representation::Tagged();
if (double_count > 0) return Representation::Double();
if (int32_count > 0) return Representation::Integer32();
return Representation::None();
}
void HValue::UpdateRepresentation(Representation new_rep,
HInferRepresentation* h_infer,
const char* reason) {
Representation r = representation();
if (new_rep.is_more_general_than(r)) {
// When an HConstant is marked "not convertible to integer", then
// never try to represent it as an integer.
if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
new_rep = Representation::Tagged();
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
" (%s want i)\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
} else {
if (FLAG_trace_representation) {
PrintF("Changing #%d %s representation %s -> %s based on %s\n",
id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
}
}
ChangeRepresentation(new_rep);
AddDependantsToWorklist(h_infer);
}
}
void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
h_infer->AddToWorklist(it.value());
}
for (int i = 0; i < OperandCount(); ++i) {
h_infer->AddToWorklist(OperandAt(i));
}
}
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
@ -376,7 +301,6 @@ HUseListNode* HUseListNode::tail() {
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
@ -801,13 +725,6 @@ void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
void HWrapReceiver::PrintDataTo(StringStream* stream) {
receiver()->PrintNameTo(stream);
stream->Add(" ");
function()->PrintNameTo(stream);
}
void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintNameTo(stream);
stream->Add("[");
@ -847,24 +764,6 @@ void HReturn::PrintDataTo(StringStream* stream) {
}
Representation HBranch::observed_input_representation(int index) {
static const ToBooleanStub::Types tagged_types(
ToBooleanStub::UNDEFINED |
ToBooleanStub::NULL_TYPE |
ToBooleanStub::SPEC_OBJECT |
ToBooleanStub::STRING);
if (expected_input_types_.ContainsAnyOf(tagged_types)) {
return Representation::Tagged();
} else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
return Representation::Double();
} else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
return Representation::Integer32();
} else {
return Representation::None();
}
}
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@ -960,6 +859,16 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
}
HValue* HConstant::Canonicalize() {
return HasNoUses() ? NULL : this;
}
HValue* HTypeof::Canonicalize() {
return HasNoUses() ? NULL : this;
}
HValue* HBitwise::Canonicalize() {
if (!representation().IsInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
@ -1440,11 +1349,15 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
Representation rep = value->observed_input_representation(it.index());
Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
PrintF("%d %s is used by %d %s as %s\n",
this->id(),
this->Mnemonic(),
value->id(),
value->Mnemonic(),
rep.Mnemonic());
}
}
}
@ -1453,8 +1366,11 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
id(), other->id(),
PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
this->id(),
this->Mnemonic(),
other->id(),
other->Mnemonic(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@ -1473,20 +1389,9 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
void HSimulate::MergeInto(HSimulate* other) {
for (int i = 0; i < values_.length(); ++i) {
HValue* value = values_[i];
if (HasAssignedIndexAt(i)) {
other->AddAssignedValue(GetAssignedIndexAt(i), value);
} else {
if (other->pop_count_ > 0) {
other->pop_count_--;
} else {
other->AddPushedValue(value);
}
}
}
other->pop_count_ += pop_count();
void HPhi::ResetInteger32Uses() {
non_phi_uses_[Representation::kInteger32] = 0;
indirect_uses_[Representation::kInteger32] = 0;
}
@ -1495,7 +1400,7 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
for (int i = values_.length() - 1; i >= 0; --i) {
for (int i = 0; i < values_.length(); ++i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
@ -1534,6 +1439,7 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
has_double_value_(false) {
set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
@ -1542,16 +1448,6 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n;
has_double_value_ = true;
}
if (r.IsNone()) {
if (has_int32_value_) {
r = Representation::Integer32();
} else if (has_double_value_) {
r = Representation::Double();
} else {
r = Representation::Tagged();
}
}
set_representation(r);
}
@ -1650,60 +1546,6 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
// When the operation has information about its own output type, don't look
// at uses.
if (!observed_output_representation_.IsNone()) return;
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
}
Representation HBinaryOperation::RepresentationFromInputs() {
// Determine the worst case of observed input representations and
// the currently assumed output representation.
Representation rep = representation();
if (observed_output_representation_.is_more_general_than(rep)) {
rep = observed_output_representation_;
}
for (int i = 1; i <= 2; ++i) {
Representation input_rep = observed_input_representation(i);
if (input_rep.is_more_general_than(rep)) rep = input_rep;
}
// If any of the actual input representation is more general than what we
// have so far but not Tagged, use that representation instead.
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
if (left_rep.is_more_general_than(rep) &&
left()->CheckFlag(kFlexibleRepresentation)) {
rep = left_rep;
}
if (right_rep.is_more_general_than(rep) &&
right()->CheckFlag(kFlexibleRepresentation)) {
rep = right_rep;
}
return rep;
}
void HBinaryOperation::AssumeRepresentation(Representation r) {
set_observed_input_representation(r, r);
HValue::AssumeRepresentation(r);
}
void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
Representation new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
// Do not care about uses.
}
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
@ -1780,7 +1622,7 @@ Range* HShl::InferRange(Zone* zone) {
}
Range* HLoadKeyed::InferRange(Zone* zone) {
Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new(zone) Range(0, 255);
@ -1835,19 +1677,9 @@ void HGoto::PrintDataTo(StringStream* stream) {
}
void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
Representation rep = Representation::None();
Representation left_rep = left()->representation();
Representation right_rep = right()->representation();
bool observed_integers =
observed_input_representation(0).IsInteger32() &&
observed_input_representation(1).IsInteger32();
bool inputs_are_not_doubles =
!left_rep.IsDouble() && !right_rep.IsDouble();
if (observed_integers && inputs_are_not_doubles) {
rep = Representation::Integer32();
} else {
rep = Representation::Double();
void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
input_representation_ = r;
if (r.IsDouble()) {
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@ -1864,8 +1696,9 @@ void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
} else {
ASSERT(r.IsInteger32());
}
ChangeRepresentation(rep);
}
@ -2016,25 +1849,11 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
}
void HLoadKeyed::PrintDataTo(StringStream* stream) {
if (!is_external()) {
elements()->PrintNameTo(stream);
} else {
ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
elements()->PrintNameTo(stream);
stream->Add(".");
stream->Add(ElementsKindToString(elements_kind()));
}
void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
stream->Add(" + %d] ", index_offset());
} else {
stream->Add("] ");
}
dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
@ -2042,26 +1861,29 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) {
}
bool HLoadKeyed::RequiresHoleCheck() const {
bool HLoadKeyedFastElement::RequiresHoleCheck() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
if (IsFastDoubleElementsKind(elements_kind())) {
return true;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) {
return true;
}
if (!use->IsChange()) return true;
}
return false;
}
void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@ -2074,22 +1896,21 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
if (key()->IsLoadKeyed()) {
HLoadKeyed* key_load = HLoadKeyed::cast(key());
if (key_load->elements()->IsForInCacheArray()) {
if (key()->IsLoadKeyedFastElement()) {
HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
if (key_load->object()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
HForInCacheArray::cast(key_load->elements());
HForInCacheArray::cast(key_load->object());
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check =
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
HInstruction* index = new(block()->zone()) HLoadKeyed(
HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
key_load->key(),
key_load->elements_kind());
key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
@ -2104,6 +1925,56 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
}
void HLoadKeyedSpecializedArrayElement::PrintDataTo(
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
switch (elements_kind()) {
case EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
case EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
case EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
case EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
case EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] ");
dependency()->PrintNameTo(stream);
}
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@ -2130,25 +2001,20 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
}
void HStoreKeyed::PrintDataTo(StringStream* stream) {
if (!is_external()) {
elements()->PrintNameTo(stream);
} else {
elements()->PrintNameTo(stream);
stream->Add(".");
stream->Add(ElementsKindToString(elements_kind()));
ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
}
void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
stream->Add(" + %d] = ", index_offset());
} else {
stream->Add("] = ");
}
value()->PrintNameTo(stream);
}
void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
value()->PrintNameTo(stream);
}
@ -2162,6 +2028,56 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
void HStoreKeyedSpecializedArrayElement::PrintDataTo(
StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add(".");
switch (elements_kind()) {
case EXTERNAL_BYTE_ELEMENTS:
stream->Add("byte");
break;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
stream->Add("u_byte");
break;
case EXTERNAL_SHORT_ELEMENTS:
stream->Add("short");
break;
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
stream->Add("u_short");
break;
case EXTERNAL_INT_ELEMENTS:
stream->Add("int");
break;
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
stream->Add("u_int");
break;
case EXTERNAL_FLOAT_ELEMENTS:
stream->Add("float");
break;
case EXTERNAL_DOUBLE_ELEMENTS:
stream->Add("double");
break;
case EXTERNAL_PIXEL_ELEMENTS:
stream->Add("pixel");
break;
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
value()->PrintNameTo(stream);
}
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
ElementsKind from_kind = original_map()->elements_kind();
@ -2452,10 +2368,10 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
bool HStoreKeyed::NeedsCanonicalization() {
// If value is an integer or comes from the result of a keyed load
// then it will be a non-hole value: no need for canonicalization.
if (value()->IsLoadKeyed() ||
bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
// If value was loaded from unboxed double backing store or
// converted from an integer then we don't have to canonicalize it.
if (value()->IsLoadKeyedFastDoubleElement() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false;
}
@ -2638,41 +2554,7 @@ void HBitwise::PrintDataTo(StringStream* stream) {
}
void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
ASSERT(CheckFlag(kFlexibleRepresentation));
// If there are non-Phi uses, and all of them have observed the same
// representation, than that's what this Phi is going to use.
Representation new_rep = RepresentationObservedByAllNonPhiUses();
if (!new_rep.IsNone()) {
UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
return;
}
new_rep = RepresentationFromInputs();
UpdateRepresentation(new_rep, h_infer, "inputs");
new_rep = RepresentationFromUses();
UpdateRepresentation(new_rep, h_infer, "uses");
new_rep = RepresentationFromUseRequirements();
UpdateRepresentation(new_rep, h_infer, "use requirements");
}
Representation HPhi::RepresentationObservedByAllNonPhiUses() {
int non_phi_use_count = 0;
for (int i = Representation::kInteger32;
i < Representation::kNumRepresentations; ++i) {
non_phi_use_count += non_phi_uses_[i];
}
if (non_phi_use_count <= 1) return Representation::None();
for (int i = 0; i < Representation::kNumRepresentations; ++i) {
if (non_phi_uses_[i] == non_phi_use_count) {
return Representation::FromKind(static_cast<Representation::Kind>(i));
}
}
return Representation::None();
}
Representation HPhi::RepresentationFromInputs() {
Representation HPhi::InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
@ -2681,7 +2563,6 @@ Representation HPhi::RepresentationFromInputs() {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
@ -2700,12 +2581,10 @@ Representation HPhi::RepresentationFromInputs() {
return Representation::Tagged();
}
} else {
if (value->IsPhi() && !IsConvertibleToInteger()) {
return Representation::Tagged();
}
}
}
}
if (double_occurred) return Representation::Double();
@ -2715,37 +2594,6 @@ Representation HPhi::RepresentationFromInputs() {
}
Representation HPhi::RepresentationFromUseRequirements() {
Representation all_uses_require = Representation::None();
bool all_uses_require_the_same = true;
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
// We check for observed_input_representation elsewhere.
Representation use_rep =
it.value()->RequiredInputRepresentation(it.index());
// No useful info from this use -> look at the next one.
if (use_rep.IsNone()) {
continue;
}
if (use_rep.Equals(all_uses_require)) {
continue;
}
// This use's representation contradicts what we've seen so far.
if (!all_uses_require.IsNone()) {
ASSERT(!use_rep.Equals(all_uses_require));
all_uses_require_the_same = false;
break;
}
// Otherwise, initialize observed representation.
all_uses_require = use_rep;
}
if (all_uses_require_the_same) {
return all_uses_require;
}
return Representation::None();
}
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
@ -2784,6 +2632,12 @@ void HCheckFunction::Verify() {
ASSERT(HasNoUses());
}
void HCheckPrototypeMaps::Verify() {
HInstruction::Verify();
ASSERT(HasNoUses());
}
#endif
} } // namespace v8::internal

666
deps/v8/src/hydrogen-instructions.h

File diff suppressed because it is too large

904
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

72
deps/v8/src/hydrogen.h

@ -125,10 +125,7 @@ class HBasicBlock: public ZoneObject {
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
void AddSimulate(BailoutId ast_id,
RemovableSimulate removable = FIXED_SIMULATE) {
AddInstruction(CreateSimulate(ast_id, removable));
}
void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@ -169,7 +166,7 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
HSimulate* CreateSimulate(BailoutId ast_id);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@ -258,7 +255,6 @@ class HGraph: public ZoneObject {
void InitializeInferredTypes();
void InsertTypeConversions();
void MergeRemovableSimulates();
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
@ -274,7 +270,6 @@ class HGraph: public ZoneObject {
void DehoistSimpleArrayIndexComputations();
void DeadCodeElimination();
void PropagateDeoptimizingMark();
void EliminateUnusedInstructions();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@ -617,25 +612,6 @@ class HEnvironment: public ZoneObject {
};
class HInferRepresentation BASE_EMBEDDED {
public:
explicit HInferRepresentation(HGraph* graph)
: graph_(graph),
worklist_(8, graph->zone()),
in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
void Analyze();
void AddToWorklist(HValue* current);
private:
Zone* zone() const { return graph_->zone(); }
HGraph* graph_;
ZoneList<HValue*> worklist_;
BitVector in_worklist_;
};
class HGraphBuilder;
enum ArgumentsAllowedFlag {
@ -903,8 +879,7 @@ class HGraphBuilder: public AstVisitor {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(BailoutId ast_id,
RemovableSimulate removable = FIXED_SIMULATE);
void AddSimulate(BailoutId ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@ -1049,6 +1024,10 @@ class HGraphBuilder: public AstVisitor {
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
void TraceRepresentation(Token::Value op,
TypeInfo info,
HValue* value,
Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetUpScope(Scope* scope);
@ -1185,7 +1164,8 @@ class HGraphBuilder: public AstVisitor {
HLoadNamedField* BuildLoadNamedField(HValue* object,
Handle<Map> map,
LookupResult* result);
LookupResult* result,
bool smi_and_map_check);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
@ -1206,14 +1186,12 @@ class HGraphBuilder: public AstVisitor {
ElementsKind elements_kind,
bool is_store);
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
Handle<Map> map,
LookupResult* lookup);
LookupResult* lookup,
bool smi_and_map_check);
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
@ -1234,17 +1212,10 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildThisFunction();
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
void AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
Handle<Map> receiver_map);
bool MatchRotateRight(HValue* left,
HValue* right,
HValue** operand,
HValue** shift_amount);
Handle<Map> receiver_map,
bool smi_and_map_check);
Zone* zone() const { return zone_; }
@ -1378,22 +1349,12 @@ class HStatistics: public Malloced {
return instance.get();
}
void IncrementSubtotals(int64_t create_graph,
int64_t optimize_graph,
int64_t generate_code) {
create_graph_ += create_graph;
optimize_graph_ += optimize_graph;
generate_code_ += generate_code;
}
private:
HStatistics()
: timing_(5),
names_(5),
sizes_(5),
create_graph_(0),
optimize_graph_(0),
generate_code_(0),
total_(0),
total_size_(0),
full_code_gen_(0),
source_size_(0) { }
@ -1401,9 +1362,7 @@ class HStatistics: public Malloced {
List<int64_t> timing_;
List<const char*> names_;
List<unsigned> sizes_;
int64_t create_graph_;
int64_t optimize_graph_;
int64_t generate_code_;
int64_t total_;
unsigned total_size_;
int64_t full_code_gen_;
double source_size_;
@ -1413,6 +1372,7 @@ class HStatistics: public Malloced {
class HPhase BASE_EMBEDDED {
public:
static const char* const kFullCodeGen;
static const char* const kTotal;
explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
HPhase(const char* name, HGraph* graph) {

32
deps/v8/src/ia32/assembler-ia32-inl.h

@ -46,21 +46,12 @@ namespace v8 {
namespace internal {
static const byte kCallOpcode = 0xE8;
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == CODE_AGE_SEQUENCE) {
if (*pc_ == kCallOpcode) {
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
}
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
@ -178,21 +169,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
Assembler::target_address_at(pc_ + 1));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
}
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@ -230,7 +206,7 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
return *pc_ == kCallOpcode;
return *pc_ == 0xE8;
}
@ -251,9 +227,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
@ -281,8 +255,6 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&

107
deps/v8/src/ia32/assembler-ia32.cc

@ -169,7 +169,7 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
1 << RelocInfo::DEBUG_BREAK_SLOT;
bool RelocInfo::IsCodedSpecially() {
@ -312,19 +312,48 @@ Register Operand::reg() const {
static void InitCoverageLog();
#endif
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
positions_recorder_(this) {
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (isolate()->assembler_spare_buffer() != NULL) {
buffer = isolate()->assembler_spare_buffer();
isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
buffer_ = NewArray<byte>(buffer_size);
} else {
buffer_ = static_cast<byte*>(buffer);
}
buffer_size_ = buffer_size;
own_buffer_ = true;
} else {
// Use externally provided buffer instead.
ASSERT(buffer_size > 0);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
own_buffer_ = false;
}
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size_); // int3
memset(buffer_, 0xCC, buffer_size); // int3
}
#endif
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@ -332,6 +361,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
}
Assembler::~Assembler() {
if (own_buffer_) {
if (isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
}
}
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@ -1023,25 +1064,6 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
void Assembler::ror(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
ASSERT(is_uint5(imm8)); // illegal shift count
if (imm8 == 1) {
EMIT(0xD1);
EMIT(0xC8 | dst.code());
} else {
EMIT(0xC1);
EMIT(0xC8 | dst.code());
EMIT(imm8);
}
}
void Assembler::ror_cl(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xD3);
EMIT(0xC8 | dst.code());
}
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
@ -1479,7 +1501,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
ASSERT(0 <= cc && static_cast<int>(cc) < 16);
ASSERT(0 <= cc && cc < 16);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@ -1511,7 +1533,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
ASSERT((0 <= cc) && (cc < 16));
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
@ -1966,16 +1988,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
void Assembler::addsd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x58);
emit_sse_operand(dst, src);
}
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -1986,16 +1998,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
void Assembler::mulsd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
EMIT(0x59);
emit_sse_operand(dst, src);
}
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2103,15 +2105,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
void Assembler::movmskps(Register dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x50);
emit_sse_operand(dst, src);
}
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2401,7 +2394,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);

35
deps/v8/src/ia32/assembler-ia32.h

@ -582,7 +582,15 @@ class Assembler : public AssemblerBase {
// upon destruction of the assembler.
// TODO(vitalyr): the assembler does not need an isolate.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
virtual ~Assembler() { }
~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// Avoids using instructions that vary in size in unpredictable ways between
// the snapshot and the running VM. This is needed by the full compiler so
// that it can recompile code with debug support and fix the PC.
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@ -809,8 +817,6 @@ class Assembler : public AssemblerBase {
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
void ror(Register dst, uint8_t imm8);
void ror_cl(Register dst);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);
@ -990,10 +996,8 @@ class Assembler : public AssemblerBase {
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
@ -1015,7 +1019,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
void movmskps(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
@ -1051,7 +1054,7 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
@ -1094,6 +1097,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
int pc_offset() const { return pc_ - buffer_; }
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@ -1112,11 +1117,15 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
bool emit_debug_code() const { return emit_debug_code_; }
bool predictable_code_size() const { return predictable_code_size_ ; }
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@ -1177,10 +1186,22 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
// Code buffer:
// The buffer into which code and relocation info are generated.
byte* buffer_;
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
PositionsRecorder positions_recorder_;
bool emit_debug_code_;
bool predictable_code_size_;
friend class PositionsRecorder;
};

36
deps/v8/src/ia32/builtins-ia32.cc

@ -538,42 +538,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// For now, we are relying on the fact that make_code_young doesn't do any
// garbage collection which allows us to save/restore the registers without
// worrying about which of them contain pointers. We also don't build an
// internal frame to make the code faster, since we shouldn't have to do stack
// crawls in MakeCodeYoung. This seems a bit fragile.
// Re-execute the code that was patched back to the young age when
// the stub returns.
__ sub(Operand(esp, 0), Immediate(5));
__ pushad();
__ mov(eax, Operand(esp, 8 * kPointerSize));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(1, ebx);
__ mov(Operand(esp, 0), eax);
__ CallCFunction(
ExternalReference::get_make_code_young_function(masm->isolate()), 1);
}
__ popad();
__ ret(0);
}
#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
} \
void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
MacroAssembler* masm) { \
GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{

628
deps/v8/src/ia32/code-stubs-ia32.cc

File diff suppressed because it is too large

90
deps/v8/src/ia32/code-stubs-ia32.h

@ -154,6 +154,96 @@ class UnaryOpStub: public CodeStub {
};
class BinaryOpStub: public CodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
BinaryOpStub(
int key,
BinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
NO_HEAPNUMBER_RESULTS
};
Token::Value op_;
OverwriteMode mode_;
bool use_sse3_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| SSE3Bits::encode(use_sse3_)
| OperandTypeInfoBits::encode(operands_type_)
| ResultTypeInfoBits::encode(result_type_);
}
void Generate(MacroAssembler* masm);
void GenerateGeneric(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm,
Label* slow,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateUninitializedStub(MacroAssembler* masm);
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only

203
deps/v8/src/ia32/codegen-ia32.cc

@ -102,43 +102,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(SSE2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return &exp;
ExternalReference::InitializeMathExpData();
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// esp[1 * kPointerSize]: raw double input
// esp[0 * kPointerSize]: return address
{
CpuFeatures::Scope use_sse2(SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
__ movdbl(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
__ pop(ebx);
__ pop(eax);
__ movdbl(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
CodeDesc desc;
masm.GetCode(&desc);
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
@ -769,7 +732,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@ -788,174 +751,12 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzx_b(result, FieldOperand(string,
index,
times_1,
SeqOneByteString::kHeaderSize));
__ bind(&done);
}
void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
String::Encoding encoding,
Register string,
Register index,
Register value) {
if (FLAG_debug_code) {
__ test(index, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi index");
__ test(value, Immediate(kSmiTagMask));
__ Check(zero, "Non-smi value");
__ cmp(index, FieldOperand(string, String::kLengthOffset));
__ Check(less, "Index is too large");
__ cmp(index, Immediate(Smi::FromInt(0)));
__ Check(greater_equal, "Index is negative");
__ push(value);
__ mov(value, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
__ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, "Unexpected string type");
__ pop(value);
}
__ SmiUntag(value);
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (encoding == String::ONE_BYTE_ENCODING) {
__ SmiUntag(index);
__ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
value);
} else {
// No need to untag a smi for two-byte addressing.
__ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
value);
}
}
static Operand ExpConstant(int index) {
return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
}
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
XMMRegister input,
XMMRegister result,
XMMRegister double_scratch,
Register temp1,
Register temp2) {
ASSERT(!input.is(double_scratch));
ASSERT(!input.is(result));
ASSERT(!result.is(double_scratch));
ASSERT(!temp1.is(temp2));
ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
Label done;
__ movdbl(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
__ movdbl(result, ExpConstant(2));
__ j(above_equal, &done);
__ movdbl(double_scratch, ExpConstant(3));
__ movdbl(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movdbl(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
__ movsd(input, double_scratch);
__ mulsd(input, double_scratch);
__ mulsd(result, input);
__ mov(temp1, temp2);
__ mulsd(result, ExpConstant(7));
__ subsd(result, double_scratch);
__ add(temp1, Immediate(0x1ff800));
__ addsd(result, ExpConstant(8));
__ and_(temp2, Immediate(0x7ff));
__ shr(temp1, 11);
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
__ movdbl(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
__ por(input, double_scratch);
__ mulsd(result, input);
SeqAsciiString::kHeaderSize));
__ bind(&done);
}
#undef __
static const int kNoCodeAgeSequenceLength = 5;
static byte* GetNoCodeAgeSequence(uint32_t* length) {
static bool initialized = false;
static byte sequence[kNoCodeAgeSequenceLength];
*length = kNoCodeAgeSequenceLength;
if (!initialized) {
// The sequence of instructions that is patched out for aging code is the
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
patcher.masm()->push(ebp);
patcher.masm()->mov(ebp, esp);
patcher.masm()->push(esi);
patcher.masm()->push(edi);
initialized = true;
}
return sequence;
}
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
bool result = (!memcmp(sequence, young_sequence, young_length));
ASSERT(result || *sequence == kCallOpcode);
return result;
}
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(sequence)) {
*age = kNoAge;
*parity = NO_MARKING_PARITY;
} else {
sequence++; // Skip the kCallOpcode byte
Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
Assembler::kCallTargetAddressOffset;
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
}
void Code::PatchPlatformCodeAge(byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
if (age == kNoAge) {
memcpy(sequence, young_sequence, young_length);
CPU::FlushICache(sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE);
}
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

14
deps/v8/src/ia32/codegen-ia32.h

@ -88,20 +88,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
XMMRegister input,
XMMRegister result,
XMMRegister double_scratch,
Register temp1,
Register temp2);
private:
DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

20
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -210,6 +210,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x13;
static const byte kJaeInstruction = 0x73;
static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@ -222,25 +224,30 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
// The back edge bookkeeping code matches the pattern:
// The stack check code matches the pattern:
//
// sub <profiling_counter>, <delta>
// jns ok
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// sub <profiling_counter>, <delta> ;; Not changed
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
if (FLAG_count_based_interrupts) {
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
} else {
ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
}
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
@ -265,8 +272,13 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (FLAG_count_based_interrupts) {
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
} else {
*(call_target_address - 3) = kJaeInstruction;
*(call_target_address - 2) = kJaeOffset;
}
Assembler::set_target_address_at(call_target_address,
check_code->entry());

9
deps/v8/src/ia32/disasm-ia32.cc

@ -869,7 +869,6 @@ static const char* F0Mnem(byte f0byte) {
case 0xAF: return "imul";
case 0xA5: return "shld";
case 0xAD: return "shrd";
case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
default: return NULL;
}
@ -1040,14 +1039,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (f0byte == 0x50) {
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movmskps %s,%s",
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save