Browse Source

Update v8 to 3.8.9

v0.7.4-release
isaacs 13 years ago
parent
commit
05471f5c2a
  1. 18
      deps/v8/ChangeLog
  2. 2
      deps/v8/build/common.gypi
  3. 14
      deps/v8/include/v8.h
  4. 3
      deps/v8/include/v8stdint.h
  5. 21
      deps/v8/src/accessors.cc
  6. 3
      deps/v8/src/accessors.h
  7. 9
      deps/v8/src/allocation.cc
  8. 3
      deps/v8/src/allocation.h
  9. 24
      deps/v8/src/api.cc
  10. 10
      deps/v8/src/api.h
  11. 1
      deps/v8/src/arm/assembler-arm-inl.h
  12. 12
      deps/v8/src/arm/assembler-arm.h
  13. 1
      deps/v8/src/arm/builtins-arm.cc
  14. 123
      deps/v8/src/arm/deoptimizer-arm.cc
  15. 34
      deps/v8/src/arm/disasm-arm.cc
  16. 7
      deps/v8/src/arm/frames-arm.h
  17. 39
      deps/v8/src/arm/ic-arm.cc
  18. 24
      deps/v8/src/arm/lithium-arm.cc
  19. 12
      deps/v8/src/arm/lithium-codegen-arm.cc
  20. 41
      deps/v8/src/assembler.cc
  21. 10
      deps/v8/src/assembler.h
  22. 11
      deps/v8/src/ast.cc
  23. 41
      deps/v8/src/ast.h
  24. 7
      deps/v8/src/cpu-profiler.cc
  25. 29
      deps/v8/src/d8.cc
  26. 22
      deps/v8/src/d8.h
  27. 271
      deps/v8/src/debug.cc
  28. 307
      deps/v8/src/deoptimizer.cc
  29. 66
      deps/v8/src/deoptimizer.h
  30. 25
      deps/v8/src/frames.cc
  31. 2
      deps/v8/src/full-codegen.cc
  32. 1
      deps/v8/src/heap-inl.h
  33. 112
      deps/v8/src/heap.cc
  34. 42
      deps/v8/src/heap.h
  35. 17
      deps/v8/src/hydrogen-instructions.cc
  36. 7
      deps/v8/src/hydrogen-instructions.h
  37. 74
      deps/v8/src/hydrogen.cc
  38. 17
      deps/v8/src/hydrogen.h
  39. 4
      deps/v8/src/ia32/assembler-ia32-inl.h
  40. 3
      deps/v8/src/ia32/builtins-ia32.cc
  41. 124
      deps/v8/src/ia32/deoptimizer-ia32.cc
  42. 8
      deps/v8/src/ia32/frames-ia32.h
  43. 37
      deps/v8/src/ia32/ic-ia32.cc
  44. 12
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  45. 24
      deps/v8/src/ia32/lithium-ia32.cc
  46. 8
      deps/v8/src/jsregexp.h
  47. 12
      deps/v8/src/lithium-allocator.cc
  48. 4
      deps/v8/src/lithium.cc
  49. 21
      deps/v8/src/lithium.h
  50. 13
      deps/v8/src/mark-compact-inl.h
  51. 111
      deps/v8/src/mark-compact.cc
  52. 6
      deps/v8/src/mark-compact.h
  53. 1
      deps/v8/src/mips/assembler-mips-inl.h
  54. 41
      deps/v8/src/mips/ic-mips.cc
  55. 3
      deps/v8/src/objects-inl.h
  56. 53
      deps/v8/src/objects.cc
  57. 11
      deps/v8/src/objects.h
  58. 15
      deps/v8/src/platform-freebsd.cc
  59. 15
      deps/v8/src/platform-linux.cc
  60. 17
      deps/v8/src/platform-macos.cc
  61. 15
      deps/v8/src/platform-openbsd.cc
  62. 54
      deps/v8/src/platform-solaris.cc
  63. 15
      deps/v8/src/platform-win32.cc
  64. 16
      deps/v8/src/platform.h
  65. 124
      deps/v8/src/runtime.cc
  66. 32
      deps/v8/src/serialize.cc
  67. 7
      deps/v8/src/serialize.h
  68. 18
      deps/v8/src/spaces.cc
  69. 2
      deps/v8/src/spaces.h
  70. 3
      deps/v8/src/type-info.h
  71. 19
      deps/v8/src/v8globals.h
  72. 2
      deps/v8/src/version.cc
  73. 4
      deps/v8/src/x64/assembler-x64-inl.h
  74. 1
      deps/v8/src/x64/builtins-x64.cc
  75. 129
      deps/v8/src/x64/deoptimizer-x64.cc
  76. 7
      deps/v8/src/x64/frames-x64.h
  77. 43
      deps/v8/src/x64/ic-x64.cc
  78. 12
      deps/v8/src/x64/lithium-codegen-x64.cc
  79. 24
      deps/v8/src/x64/lithium-x64.cc
  80. 119
      deps/v8/test/cctest/test-api.cc
  81. 5
      deps/v8/test/cctest/test-compiler.cc
  82. 30
      deps/v8/test/cctest/test-debug.cc
  83. 4
      deps/v8/test/cctest/test-deoptimization.cc
  84. 15
      deps/v8/test/cctest/test-mark-compact.cc
  85. 4
      deps/v8/test/cctest/test-parsing.cc
  86. 20
      deps/v8/test/mjsunit/compiler/regress-funarguments.js
  87. 174
      deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
  88. 160
      deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
  89. 103
      deps/v8/test/mjsunit/regress/regress-1229.js
  90. 5
      deps/v8/tools/js2c.py
  91. 4
      deps/v8/tools/test.py

18
deps/v8/ChangeLog

@ -1,3 +1,21 @@
2012-01-26: Version 3.8.9
Flush number string cache on GC (issue 1605).
Provide access to function inferred name with
v8::Function::GetInferredName in V8 public API.
Fix building with Clang (issue 1912).
Reduce the space used by the stack for the profiling thread.
Fix misleading documentation of v8::Locker (issue 542).
Introduce readbinary function in d8 to read binary files.
Performance and stability improvements on all platforms.
2012-01-23: Version 3.8.8
Limited number of loop iterations in Heap::ReserveSpace

2
deps/v8/build/common.gypi

@ -295,7 +295,7 @@
'-O3',
],
'conditions': [
[ 'gcc_version==44', {
[ 'gcc_version==44 and clang==0', {
'cflags': [
# Avoid crashes with gcc 4.4 in the v8 test suite.
'-fno-tree-vrp',

14
deps/v8/include/v8.h

@ -1731,6 +1731,14 @@ class Function : public Object {
V8EXPORT void SetName(Handle<String> name);
V8EXPORT Handle<Value> GetName() const;
/**
* Name inferred from variable or property assignment of this function.
* Used to facilitate debugging and profiling of JavaScript code written
* in an OO style, where many functions are anonymous but are assigned
* to object properties.
*/
V8EXPORT Handle<Value> GetInferredName() const;
/**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
@ -2717,7 +2725,7 @@ class RetainedObjectInfo;
* default isolate is implicitly created and entered. The embedder
* can create additional isolates and use them in parallel in multiple
* threads. An isolate can be entered by at most one thread at any
* given time. The Locker/Unlocker API can be used to synchronize.
* given time. The Locker/Unlocker API must be used to synchronize.
*/
class V8EXPORT Isolate {
public:
@ -3559,7 +3567,9 @@ class V8EXPORT Context {
* accessing handles or holding onto object pointers obtained
* from V8 handles while in the particular V8 isolate. It is up
* to the user of V8 to ensure (perhaps with locking) that this
* constraint is not violated.
* constraint is not violated. In addition to any other synchronization
* mechanism that may be used, the v8::Locker and v8::Unlocker classes
* must be used to signal thead switches to V8.
*
* v8::Locker is a scoped lock object. While it's
* active (i.e. between its construction and destruction) the current thread is

3
deps/v8/include/v8stdint.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -30,6 +30,7 @@
#ifndef V8STDINT_H_
#define V8STDINT_H_
#include <stddef.h>
#include <stdio.h>
#if defined(_WIN32) && !defined(__MINGW32__)

21
deps/v8/src/accessors.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -26,15 +26,16 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "accessors.h"
#include "ast.h"
#include "contexts.h"
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
#include "frames-inl.h"
#include "isolate.h"
#include "list-inl.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "property-details.h"
namespace v8 {
namespace internal {
@ -574,11 +575,12 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Factory* factory = Isolate::Current()->factory();
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
SlotRef::ComputeSlotMappingForArguments(frame,
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
inlined_frame_index,
&args_slots);
inlined_function->shared()->formal_parameter_count());
int args_count = args_slots.length();
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
@ -587,6 +589,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
array->set(i, *value);
}
arguments->set_elements(*array);
args_slots.Dispose();
// Return the freshly allocated arguments object.
return *arguments;

3
deps/v8/src/accessors.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,6 +29,7 @@
#define V8_ACCESSORS_H_
#include "allocation.h"
#include "v8globals.h"
namespace v8 {
namespace internal {

9
deps/v8/src/allocation.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,10 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "allocation.h"
#include <stdlib.h> // For free, malloc.
#include <string.h> // For memcpy.
#include "checks.h"
#include "utils.h"
namespace v8 {

3
deps/v8/src/allocation.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,7 +28,6 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
#include "checks.h"
#include "globals.h"
namespace v8 {

24
deps/v8/src/api.cc

@ -25,34 +25,36 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "api.h"
#include "arguments.h"
#include <math.h> // For isnan.
#include <string.h> // For memcpy, strlen.
#include "../include/v8-debug.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#include "bootstrapper.h"
#include "compiler.h"
#include "conversions-inl.h"
#include "counters.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "flags.h"
#include "global-handles.h"
#include "heap-profiler.h"
#include "messages.h"
#include "natives.h"
#include "parser.h"
#include "platform.h"
#include "profile-generator-inl.h"
#include "property-details.h"
#include "property.h"
#include "runtime-profiler.h"
#include "scanner-character-streams.h"
#include "serialize.h"
#include "snapshot.h"
#include "unicode-inl.h"
#include "v8threads.h"
#include "version.h"
#include "vm-state-inl.h"
#include "../include/v8-profiler.h"
#include "../include/v8-testing.h"
#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
@ -3622,6 +3624,12 @@ Handle<Value> Function::GetName() const {
}
Handle<Value> Function::GetInferredName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
}
ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {

10
deps/v8/src/api.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,10 +28,14 @@
#ifndef V8_API_H_
#define V8_API_H_
#include "apiutils.h"
#include "factory.h"
#include "v8.h"
#include "../include/v8-testing.h"
#include "apiutils.h"
#include "contexts.h"
#include "factory.h"
#include "isolate.h"
#include "list-inl.h"
namespace v8 {

1
deps/v8/src/arm/assembler-arm-inl.h

@ -38,6 +38,7 @@
#define V8_ARM_ASSEMBLER_ARM_INL_H_
#include "arm/assembler-arm.h"
#include "cpu.h"
#include "debug.h"

12
deps/v8/src/arm/assembler-arm.h

@ -300,11 +300,13 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
static const DwVfpRegister& kDoubleRegZero = d14;
static const DwVfpRegister& kScratchDoubleReg = d15;
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
#define kFirstCalleeSavedDoubleReg d8
#define kLastCalleeSavedDoubleReg d15
#define kDoubleRegZero d14
#define kScratchDoubleReg d15
// Coprocessor register

1
deps/v8/src/arm/builtins-arm.cc

@ -1760,6 +1760,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ Call(r3);
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Jump(lr);

123
deps/v8/src/arm/deoptimizer-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -211,12 +211,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@ -252,9 +253,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -342,15 +341,115 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
uint32_t pc = reinterpret_cast<uint32_t>(
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@ -370,9 +469,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);

34
deps/v8/src/arm/disasm-arm.cc

@ -662,6 +662,15 @@ void Decoder::Format(Instruction* instr, const char* format) {
}
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not ressemble any known instruction.
#define VERIFY(condition) \
if(!(condition)) { \
Unknown(instr); \
return; \
}
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) {
@ -947,13 +956,13 @@ void Decoder::DecodeType2(Instruction* instr) {
void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
ASSERT(!instr->HasW());
VERIFY(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
break;
}
case ia_x: {
if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1);
VERIFY(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) {
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
} else {
@ -1074,8 +1083,8 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmsr
// Dd = vsqrt(Dm)
void Decoder::DecodeTypeVFP(Instruction* instr) {
ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5);
VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
VERIFY(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
if (instr->Opc1Value() == 0x7) {
@ -1166,7 +1175,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
Instruction* instr) {
ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0));
bool to_arm_register = (instr->VLValue() == 0x1);
@ -1180,8 +1189,8 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
void Decoder::DecodeVCMP(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
(instr->Opc3Value() & 0x1));
// Comparison.
@ -1203,8 +1212,8 @@ void Decoder::DecodeVCMP(Instruction* instr) {
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
bool double_to_single = (instr->SzValue() == 1);
@ -1217,8 +1226,8 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
(((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
bool to_integer = (instr->Bit(18) == 1);
@ -1265,7 +1274,7 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
ASSERT(instr->TypeValue() == 6);
VERIFY(instr->TypeValue() == 6);
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@ -1347,6 +1356,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
#undef VERIFIY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));

7
deps/v8/src/arm/frames-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -136,6 +136,9 @@ class ExitFrameConstants : public AllStatic {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
// context and function.
static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@ -161,6 +164,8 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};

39
deps/v8/src/arm/ic-arm.cc

@ -1036,21 +1036,29 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
// Move r4 to second entry.
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
// Load map and move r4 to next entry.
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ b(ne, &try_second_entry);
__ b(ne, &try_next_entry);
__ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
__ cmp(r0, r5);
__ b(eq, &hit_on_first_entry);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
// Last entry: Load map and move r4 to symbol.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
__ ldr(r5, MemOperand(r4));
@ -1065,22 +1073,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
__ mov(r4, Operand(cache_field_offsets));
__ add(r3, r3, Operand(1));
if (i != 0) {
__ add(r3, r3, Operand(i));
}
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ sub(r5, r5, r6, SetCC);
__ b(ge, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ mov(r4, Operand(cache_field_offsets));
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
__ sub(r5, r5, r6, SetCC);
__ b(ge, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);

24
deps/v8/src/arm/lithium-arm.cc

@ -1005,14 +1005,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1021,13 +1023,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++);
op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
if (!hydrogen_env->is_arguments_adaptor()) {
*argument_index_accumulator = argument_index;
}
return result;
}
@ -1917,12 +1923,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
(representation.IsInteger32() &&
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@ -1982,13 +1987,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(representation.IsInteger32() &&
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@ -2244,6 +2248,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@ -2254,7 +2259,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* outer = current_block_->last_environment()->outer();
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}

12
deps/v8/src/arm/lithium-codegen-arm.cc

@ -479,7 +479,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->BeginFrame(environment->ast_id(), closure_id, height);
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@ -612,10 +616,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
// |>------------ translation_size ------------<|
int frame_count = 0;
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
++jsframe_count;
}
}
Translation translation(&translations_, frame_count);
Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();

41
deps/v8/src/assembler.cc

@ -30,25 +30,42 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
#include "v8.h"
#include "assembler.h"
#include "arguments.h"
#include <math.h> // For cos, log, pow, sin, tan, etc.
#include "api.h"
#include "builtins.h"
#include "counters.h"
#include "cpu.h"
#include "debug.h"
#include "deoptimizer.h"
#include "execution.h"
#include "ic-inl.h"
#include "incremental-marking.h"
#include "factory.h"
#include "ic.h"
#include "isolate.h"
#include "jsregexp.h"
#include "platform.h"
#include "regexp-macro-assembler.h"
#include "regexp-stack.h"
#include "runtime.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "store-buffer-inl.h"
#include "stub-cache.h"
#include "regexp-stack.h"
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "platform.h"
#include "store-buffer.h"
#include "token.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/assembler-mips-inl.h"
#else
#error "Unknown architecture."
#endif
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32

10
deps/v8/src/assembler.h

@ -30,19 +30,27 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
#include "v8.h"
#include "allocation.h"
#include "builtins.h"
#include "gdb-jit.h"
#include "isolate.h"
#include "runtime.h"
#include "token.h"
namespace v8 {
class ApiFunction;
namespace internal {
struct StatsCounter;
const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.

11
deps/v8/src/ast.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,10 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "ast.h"
#include <math.h> // For isfinite.
#include "builtins.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
#include "property-details.h"
#include "property.h"
#include "scopes.h"
#include "string-stream.h"
#include "type-info.h"

41
deps/v8/src/ast.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,14 +28,19 @@
#ifndef V8_AST_H_
#define V8_AST_H_
#include "allocation.h"
#include "execution.h"
#include "v8.h"
#include "assembler.h"
#include "factory.h"
#include "isolate.h"
#include "jsregexp.h"
#include "list-inl.h"
#include "runtime.h"
#include "small-pointer-list.h"
#include "smart-array-pointer.h"
#include "token.h"
#include "variables.h"
#include "zone-inl.h"
namespace v8 {
namespace internal {
@ -98,12 +103,28 @@ namespace internal {
EXPRESSION_NODE_LIST(V)
// Forward declarations
class BitVector;
class DefinitionInfo;
class AstVisitor;
class BreakableStatement;
class Expression;
class IterationStatement;
class MaterializedLiteral;
class Statement;
class TargetCollector;
class TypeFeedbackOracle;
class RegExpAlternative;
class RegExpAssertion;
class RegExpAtom;
class RegExpBackReference;
class RegExpCapture;
class RegExpCharacterClass;
class RegExpCompiler;
class RegExpDisjunction;
class RegExpEmpty;
class RegExpLookahead;
class RegExpQuantifier;
class RegExpText;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
@ -115,11 +136,6 @@ typedef ZoneList<Handle<String> > ZoneStringList;
typedef ZoneList<Handle<Object> > ZoneObjectList;
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
class AstNode: public ZoneObject {
public:
#define DECLARE_TYPE_ENUM(type) k##type,
@ -190,6 +206,11 @@ class AstNode: public ZoneObject {
};
#define DECLARE_NODE_TYPE(type) \
virtual void Accept(AstVisitor* v); \
virtual AstNode::Type node_type() const { return AstNode::k##type; } \
class Statement: public AstNode {
public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {}

7
deps/v8/src/cpu-profiler.cc

@ -39,13 +39,14 @@
namespace v8 {
namespace internal {
static const int kEventsBufferSize = 256*KB;
static const int kTickSamplesBufferChunkSize = 64*KB;
static const int kEventsBufferSize = 256 * KB;
static const int kTickSamplesBufferChunkSize = 64 * KB;
static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 32 * KB;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: Thread("v8:ProfEvntProc"),
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),

29
deps/v8/src/d8.cc

@ -120,6 +120,9 @@ ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
const int MB = 1024 * 1024;
#ifndef V8_SHARED
bool CounterMap::Match(void* key1, void* key2) {
const char* name1 = reinterpret_cast<const char*>(key1);
@ -803,6 +806,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("readbinary"),
FunctionTemplate::New(ReadBinary));
global_template->Set(String::New("readline"),
FunctionTemplate::New(ReadLine));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@ -1021,6 +1026,23 @@ static char* ReadChars(const char* name, int* size_out) {
}
Handle<Value> Shell::ReadBinary(const Arguments& args) {
String::Utf8Value filename(args[0]);
int size;
if (*filename == NULL) {
return ThrowException(String::New("Error loading file"));
}
char* chars = ReadChars(*filename, &size);
if (chars == NULL) {
return ThrowException(String::New("Error reading file"));
}
// We skip checking the string for UTF8 characters and use it raw as
// backing store for the external string with 8-bit characters.
BinaryResource* resource = new BinaryResource(chars, size);
return String::NewExternal(resource);
}
#ifndef V8_SHARED
static char* ReadToken(char* data, char token) {
char* next = i::OS::StrChr(data, token);
@ -1191,14 +1213,11 @@ Handle<String> SourceGroup::ReadFile(const char* name) {
#ifndef V8_SHARED
i::Thread::Options SourceGroup::GetThreadOptions() {
i::Thread::Options options;
options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code.
options.stack_size = 2 << 20; // 2 Mb seems to be enough
return options;
// OS-specific padding for thread startup code. 2Mbytes seems to be enough.
return i::Thread::Options("IsolateThread", 2 * MB);
}

22
deps/v8/src/d8.h

@ -195,6 +195,27 @@ class SourceGroup {
};
class BinaryResource : public v8::String::ExternalAsciiStringResource {
public:
BinaryResource(const char* string, int length)
: data_(string),
length_(length) { }
~BinaryResource() {
delete[] data_;
data_ = NULL;
length_ = 0;
}
virtual const char* data() const { return data_; }
virtual size_t length() const { return length_; }
private:
const char* data_;
size_t length_;
};
class ShellOptions {
public:
ShellOptions() :
@ -286,6 +307,7 @@ class Shell : public i::AllStatic {
static Handle<Value> EnableProfiler(const Arguments& args);
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBinary(const Arguments& args);
static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
return ReadFromStdin();

271
deps/v8/src/debug.cc

@ -1758,134 +1758,56 @@ static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
}
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
{
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Ensure no GC in this scope as we are comparing raw pointer
// values and performing a heap iteration.
AssertNoAllocation no_allocation;
static void CollectActiveFunctionsFromThread(
Isolate* isolate,
ThreadLocalTop* top,
List<Handle<JSFunction> >* active_functions,
Object* active_code_marker) {
// Find all non-optimized code functions with activation frames
// on the stack. This includes functions which have optimized
// activations (including inlined functions) on the stack as the
// non-optimized code is needed for the lazy deoptimization.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized()) {
List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
frame->GetFunctions(&functions);
for (int i = 0; i < functions.length(); i++) {
if (!functions[i]->shared()->code()->has_debug_break_slots()) {
active_functions.Add(Handle<JSFunction>(functions[i]));
}
JSFunction* function = functions[i];
active_functions->Add(Handle<JSFunction>(function));
function->shared()->code()->set_gc_metadata(active_code_marker);
}
} else if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
if (!frame->LookupCode()->has_debug_break_slots() ||
!function->shared()->code()->has_debug_break_slots()) {
active_functions.Add(Handle<JSFunction>(function));
}
active_functions->Add(Handle<JSFunction>(function));
function->shared()->code()->set_gc_metadata(active_code_marker);
}
}
}
// Sort the functions on the object pointer value to prepare for
// the binary search below.
active_functions.Sort(HandleObjectPointerCompare<JSFunction>);
// Scan the heap for all non-optimized functions which has no
// debug break slots.
HeapIterator iterator;
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
if (function->shared()->allows_lazy_compilation() &&
function->shared()->script()->IsScript() &&
function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots()) {
bool has_activation =
SortedListBSearch<Handle<JSFunction> >(
active_functions,
Handle<JSFunction>(function),
HandleObjectPointerCompare<JSFunction>) != -1;
if (!has_activation) {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
}
}
}
}
}
static void RedirectActivationsToRecompiledCodeOnThread(
Isolate* isolate,
ThreadLocalTop* top) {
for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
// Now the non-GC scope is left, and the sorting of the functions
// in active_function is not ensured any more. The code below does
// not rely on it.
if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
// Now recompile all functions with activation frames and and
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
shared->code()->kind() == Code::BUILTIN) {
continue;
}
// Make sure that the shared full code is compiled with debug
// break slots.
if (function->code() == *lazy_compile) {
function->set_code(shared->code());
}
if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
shared->set_code(*current_code);
continue;
}
}
Handle<Code> new_code(shared->code());
JSFunction* function = JSFunction::cast(frame->function());
// Find the function and patch the return address.
for (JavaScriptFrameIterator it(isolate_); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
// If the current frame is for this function in its
// non-optimized form rewrite the return address to continue
// in the newly compiled full code with debug break slots.
if (!frame->is_optimized() &&
frame->function()->IsJSFunction() &&
frame->function() == *function) {
ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
Handle<Code> frame_code(frame->LookupCode());
if (frame_code->has_debug_break_slots()) continue;
Handle<Code> new_code(function->shared()->code());
if (new_code->kind() != Code::FUNCTION ||
!new_code->has_debug_break_slots()) {
continue;
}
intptr_t delta = frame->pc() - frame_code->instruction_start();
int debug_break_slot_count = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
@ -1934,8 +1856,147 @@ void Debug::PrepareForBreakPoints() {
frame->set_pc(
new_code->instruction_start() + delta + debug_break_slot_bytes);
}
}
class ActiveFunctionsCollector : public ThreadVisitor {
public:
explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
Object* active_code_marker)
: active_functions_(active_functions),
active_code_marker_(active_code_marker) { }
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
CollectActiveFunctionsFromThread(isolate,
top,
active_functions_,
active_code_marker_);
}
private:
List<Handle<JSFunction> >* active_functions_;
Object* active_code_marker_;
};
class ActiveFunctionsRedirector : public ThreadVisitor {
public:
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
RedirectActivationsToRecompiledCodeOnThread(isolate, top);
}
};
void Debug::PrepareForBreakPoints() {
// If preparing for the first break point make sure to deoptimize all
// functions as debugging does not work with optimized code.
if (!has_break_points_) {
Deoptimizer::DeoptimizeAll();
Handle<Code> lazy_compile =
Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
// Keep the list of activated functions in a handlified list as it
// is used both in GC and non-GC code.
List<Handle<JSFunction> > active_functions(100);
{
// We are going to iterate heap to find all functions without
// debug break slots.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
AssertNoAllocation no_allocation;
Object* active_code_marker = isolate_->heap()->the_hole_value();
CollectActiveFunctionsFromThread(isolate_,
isolate_->thread_local_top(),
&active_functions,
active_code_marker);
ActiveFunctionsCollector active_functions_collector(&active_functions,
active_code_marker);
isolate_->thread_manager()->IterateArchivedThreads(
&active_functions_collector);
// Scan the heap for all non-optimized functions which have no
// debug break slots and are not active or inlined into an active
// function and mark them for lazy compilation.
HeapIterator iterator;
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
SharedFunctionInfo* shared = function->shared();
if (shared->allows_lazy_compilation() &&
shared->script()->IsScript() &&
function->code()->kind() == Code::FUNCTION &&
!function->code()->has_debug_break_slots() &&
shared->code()->gc_metadata() != active_code_marker) {
function->set_code(*lazy_compile);
function->shared()->set_code(*lazy_compile);
}
}
}
// Clear gc_metadata field.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
}
}
// Now recompile all functions with activation frames and and
// patch the return address to run in the new compiled code.
for (int i = 0; i < active_functions.length(); i++) {
Handle<JSFunction> function = active_functions[i];
if (function->code()->kind() == Code::FUNCTION &&
function->code()->has_debug_break_slots()) {
// Nothing to do. Function code already had debug break slots.
continue;
}
Handle<SharedFunctionInfo> shared(function->shared());
// If recompilation is not possible just skip it.
if (shared->is_toplevel() ||
!shared->allows_lazy_compilation() ||
shared->code()->kind() == Code::BUILTIN) {
continue;
}
// Make sure that the shared full code is compiled with debug
// break slots.
if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
if (!shared->is_compiled()) {
shared->set_code(*current_code);
continue;
}
}
// Keep function code in sync with shared function info.
function->set_code(shared->code());
}
RedirectActivationsToRecompiledCodeOnThread(isolate_,
isolate_->thread_local_top());
ActiveFunctionsRedirector active_functions_redirector;
isolate_->thread_manager()->IterateArchivedThreads(
&active_functions_redirector);
}
}

307
deps/v8/src/deoptimizer.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -104,10 +104,27 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
return result;
}
int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
if (jsframe_index == 0) return 0;
int frame_index = 0;
while (jsframe_index >= 0) {
FrameDescription* frame = output_[frame_index];
if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
jsframe_index--;
}
frame_index++;
}
return frame_index - 1;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int frame_index,
int jsframe_index,
Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
ASSERT(frame->is_optimized());
@ -143,22 +160,40 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
// Create the GC safe output frame information and register it for GC
// handling.
ASSERT_LT(frame_index, deoptimizer->output_count());
ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
// Convert JS frame index into frame index.
int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
bool has_arguments_adaptor =
frame_index > 0 &&
deoptimizer->output_[frame_index - 1]->GetFrameType() ==
StackFrame::ARGUMENTS_ADAPTOR;
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(deoptimizer, frame_index);
new DeoptimizedFrameInfo(deoptimizer, frame_index, has_arguments_adaptor);
isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
// Get the "simulated" top and size for the requested frame.
Address top =
reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
FrameDescription* parameters_frame =
deoptimizer->output_[
has_arguments_adaptor ? (frame_index - 1) : frame_index];
uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
Address parameters_top = reinterpret_cast<Address>(
parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
parameters_size));
uint32_t expressions_size = info->expression_count() * kPointerSize;
Address expressions_top = reinterpret_cast<Address>(
deoptimizer->output_[frame_index]->GetTop());
// Done with the GC-unsafe frame descriptions. This re-enables allocation.
deoptimizer->DeleteFrameDescriptions();
// Allocate a heap number for the doubles belonging to this frame.
deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
top, size, info);
parameters_top, parameters_size, expressions_top, expressions_size, info);
// Finished using the deoptimizer instance.
delete deoptimizer;
@ -313,6 +348,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
fp_to_sp_delta_(fp_to_sp_delta),
input_(NULL),
output_count_(0),
jsframe_count_(0),
output_(NULL),
frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
has_alignment_padding_(0),
@ -377,9 +413,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
#ifdef DEBUG
input_->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
input_->SetFrameType(StackFrame::JAVA_SCRIPT);
}
@ -515,6 +549,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Read the number of output frames and allocate an array for their
// descriptions.
int count = iterator.Next();
iterator.Next(); // Drop JS frames count.
ASSERT(output_ == NULL);
output_ = new FrameDescription*[count];
for (int i = 0; i < count; ++i) {
@ -524,7 +559,21 @@ void Deoptimizer::DoComputeOutputFrames() {
// Translate each output frame.
for (int i = 0; i < count; ++i) {
DoComputeFrame(&iterator, i);
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
switch (opcode) {
case Translation::JS_FRAME:
DoComputeJSFrame(&iterator, i);
jsframe_count_++;
break;
case Translation::ARGUMENTS_ADAPTOR_FRAME:
DoComputeArgumentsAdaptorFrame(&iterator, i);
break;
default:
UNREACHABLE();
break;
}
}
// Print some helpful diagnostic information.
@ -565,39 +614,52 @@ void Deoptimizer::MaterializeHeapNumbers() {
#ifdef ENABLE_DEBUGGER_SUPPORT
void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, uint32_t size, DeoptimizedFrameInfo* info) {
Address parameters_top,
uint32_t parameters_size,
Address expressions_top,
uint32_t expressions_size,
DeoptimizedFrameInfo* info) {
ASSERT_EQ(DEBUGGER, bailout_type_);
Address parameters_bottom = parameters_top + parameters_size;
Address expressions_bottom = expressions_top + expressions_size;
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
// Check of the heap number to materialize actually belong to the frame
// being extracted.
Address slot = d.slot_address();
if (top <= slot && slot < top + size) {
if (parameters_top <= slot && slot < parameters_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
// Calculate the index with the botton of the expression stack
// at index 0, and the fixed part (including incoming arguments)
// at negative indexes.
int index = static_cast<int>(
info->expression_count_ - (slot - top) / kPointerSize - 1);
int index = (info->parameters_count() - 1) -
static_cast<int>(slot - parameters_top) / kPointerSize;
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for stack index %d\n",
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
index);
}
if (index >=0) {
info->SetExpression(index, *num);
} else {
// Calculate parameter index subtracting one for the receiver.
int parameter_index =
index +
static_cast<int>(size) / kPointerSize -
info->expression_count_ - 1;
info->SetParameter(parameter_index, *num);
info->SetParameter(index, *num);
} else if (expressions_top <= slot && slot < expressions_bottom) {
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
int index = info->expression_count() - 1 -
static_cast<int>(slot - expressions_top) / kPointerSize;
if (FLAG_trace_deopt) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
d.value(),
d.slot_address(),
index);
}
info->SetExpression(index, *num);
}
}
}
@ -622,7 +684,8 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@ -691,7 +754,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
input_->GetOffsetFromSlotIndex(this, input_slot_index);
input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": ",
@ -710,7 +773,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::INT32_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
input_->GetOffsetFromSlotIndex(this, input_slot_index);
input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
if (FLAG_trace_deopt) {
@ -739,7 +802,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_STACK_SLOT: {
int input_slot_index = iterator->Next();
unsigned input_offset =
input_->GetOffsetFromSlotIndex(this, input_slot_index);
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
@ -808,7 +871,8 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@ -871,7 +935,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::STACK_SLOT: {
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
output->GetOffsetFromSlotIndex(output_index);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
output_offset,
@ -890,7 +954,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
output->GetOffsetFromSlotIndex(output_index);
int int32_value = input_object->IsSmi()
? Smi::cast(input_object)->value()
: DoubleToInt32(input_object->Number());
@ -922,7 +986,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int output_index = iterator->Next();
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
output->GetOffsetFromSlotIndex(output_index);
double double_value = input_object->Number();
uint64_t int_value = BitCast<uint64_t, double>(double_value);
int32_t lower = static_cast<int32_t>(int_value);
@ -1033,8 +1097,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
static const unsigned kFixedSlotSize = 4 * kPointerSize;
return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
return ComputeIncomingArgumentSize(function) +
StandardFrameConstants::kFixedFrameSize;
}
@ -1154,49 +1218,62 @@ FrameDescription::FrameDescription(uint32_t frame_size,
}
unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
int slot_index) {
int FrameDescription::ComputeFixedSize() {
return StandardFrameConstants::kFixedFrameSize +
(ComputeParametersCount() + 1) * kPointerSize;
}
unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
if (slot_index >= 0) {
// Local or spill slots. Skip the fixed part of the frame
// including all arguments.
unsigned base =
GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
unsigned base = GetFrameSize() - ComputeFixedSize();
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
unsigned base = GetFrameSize() -
deoptimizer->ComputeIncomingArgumentSize(GetFunction());
int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
unsigned base = GetFrameSize() - arg_size;
return base - ((slot_index + 1) * kPointerSize);
}
}
int FrameDescription::ComputeParametersCount() {
switch (type_) {
case StackFrame::JAVA_SCRIPT:
return function_->shared()->formal_parameter_count();
case StackFrame::ARGUMENTS_ADAPTOR: {
// Last slot contains number of incomming arguments as a smi.
// Can't use GetExpression(0) because it would cause infinite recursion.
return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
}
default:
UNREACHABLE();
return 0;
}
}
Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
ASSERT_EQ(Code::FUNCTION, kind_);
Object* FrameDescription::GetParameter(int index) {
ASSERT(index >= 0);
ASSERT(index < ComputeParametersCount());
// The slot indexes for incoming arguments are negative.
unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
index - ComputeParametersCount());
unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
ASSERT_EQ(Code::FUNCTION, kind_);
unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
unsigned FrameDescription::GetExpressionCount() {
ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
unsigned size = GetFrameSize() - ComputeFixedSize();
return size / kPointerSize;
}
Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
ASSERT_EQ(Code::FUNCTION, kind_);
unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
Object* FrameDescription::GetExpression(int index) {
ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
unsigned offset = GetOffsetFromSlotIndex(index);
return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
}
@ -1242,8 +1319,15 @@ Handle<ByteArray> TranslationBuffer::CreateByteArray() {
}
void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
buffer_->Add(FRAME);
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
buffer_->Add(JS_FRAME);
buffer_->Add(node_id);
buffer_->Add(literal_id);
buffer_->Add(height);
@ -1307,7 +1391,6 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case ARGUMENTS_OBJECT:
case DUPLICATE:
return 0;
case BEGIN:
case REGISTER:
case INT32_REGISTER:
case DOUBLE_REGISTER:
@ -1316,7 +1399,10 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case DOUBLE_STACK_SLOT:
case LITERAL:
return 1;
case FRAME:
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
return 2;
case JS_FRAME:
return 3;
}
UNREACHABLE();
@ -1330,8 +1416,10 @@ const char* Translation::StringFor(Opcode opcode) {
switch (opcode) {
case BEGIN:
return "BEGIN";
case FRAME:
return "FRAME";
case JS_FRAME:
return "JS_FRAME";
case ARGUMENTS_ADAPTOR_FRAME:
return "ARGUMENTS_ADAPTOR_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@ -1385,7 +1473,8 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
case Translation::JS_FRAME:
case Translation::ARGUMENTS_ADAPTOR_FRAME:
// Peeled off before getting here.
break;
@ -1431,9 +1520,27 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
}
void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
int inlined_frame_index,
Vector<SlotRef>* args_slots) {
void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
TranslationIterator* it,
DeoptimizationInputData* data,
JavaScriptFrame* frame) {
// Process the translation commands for the arguments.
// Skip the translation command for the receiver.
it->Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it->Next())));
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
}
}
Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
JavaScriptFrame* frame,
int inlined_jsframe_index,
int formal_parameter_count) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data =
@ -1442,51 +1549,73 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
USE(frame_count);
ASSERT(frame_count > inlined_frame_index);
int frames_to_skip = inlined_frame_index;
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
USE(jsframe_count);
ASSERT(jsframe_count > inlined_jsframe_index);
int jsframes_to_skip = inlined_jsframe_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
if (jsframes_to_skip == 0) {
ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
it.Skip(1); // literal id
int height = it.Next();
// We reached the arguments adaptor frame corresponding to the
// inlined function in question. Number of arguments is height - 1.
Vector<SlotRef> args_slots =
Vector<SlotRef>::New(height - 1); // Minus receiver.
ComputeSlotsForArguments(&args_slots, &it, data, frame);
return args_slots;
}
} else if (opcode == Translation::JS_FRAME) {
if (jsframes_to_skip == 0) {
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::FRAME) {
if (frames_to_skip == 0) {
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
// arguments.
//
// Skip the translation command for the receiver.
it.Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it.Next())));
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
}
return;
// arguments. Number of arguments is equal to the number of
// format parameter count.
Vector<SlotRef> args_slots =
Vector<SlotRef>::New(formal_parameter_count);
ComputeSlotsForArguments(&args_slots, &it, data, frame);
return args_slots;
}
frames_to_skip--;
jsframes_to_skip--;
}
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
}
UNREACHABLE();
return Vector<SlotRef>();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
DeoptimizedFrameInfo::DeoptimizedFrameInfo(
Deoptimizer* deoptimizer, int frame_index) {
Deoptimizer* deoptimizer, int frame_index, bool has_arguments_adaptor) {
FrameDescription* output_frame = deoptimizer->output_[frame_index];
SetFunction(output_frame->GetFunction());
expression_count_ = output_frame->GetExpressionCount(deoptimizer);
expression_count_ = output_frame->GetExpressionCount();
expression_stack_ = new Object*[expression_count_];
for (int i = 0; i < expression_count_; i++) {
SetExpression(i, output_frame->GetExpression(i));
}
if (has_arguments_adaptor) {
output_frame = deoptimizer->output_[frame_index - 1];
ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
}
parameters_count_ = output_frame->ComputeParametersCount();
parameters_ = new Object*[parameters_count_];
for (int i = 0; i < parameters_count_; i++) {
SetParameter(i, output_frame->GetParameter(deoptimizer, i));
}
expression_stack_ = new Object*[expression_count_];
for (int i = 0; i < expression_count_; i++) {
SetExpression(i, output_frame->GetExpression(deoptimizer, i));
SetParameter(i, output_frame->GetParameter(i));
}
}

66
deps/v8/src/deoptimizer.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -119,6 +119,9 @@ class Deoptimizer : public Malloced {
int output_count() const { return output_count_; }
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
static Deoptimizer* New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
@ -131,7 +134,7 @@ class Deoptimizer : public Malloced {
// The returned object with information on the optimized frame needs to be
// freed before another one can be generated.
static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
int frame_index,
int jsframe_index,
Isolate* isolate);
static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate);
@ -196,7 +199,11 @@ class Deoptimizer : public Malloced {
void MaterializeHeapNumbers();
#ifdef ENABLE_DEBUGGER_SUPPORT
void MaterializeHeapNumbersForDebuggerInspectableFrame(
Address top, uint32_t size, DeoptimizedFrameInfo* info);
Address parameters_top,
uint32_t parameters_size,
Address expressions_top,
uint32_t expressions_size,
DeoptimizedFrameInfo* info);
#endif
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@ -257,8 +264,10 @@ class Deoptimizer : public Malloced {
int count_;
};
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
private:
static const int kNumberOfEntries = 4096;
static const int kNumberOfEntries = 8192;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@ -271,7 +280,9 @@ class Deoptimizer : public Malloced {
void DoComputeOutputFrames();
void DoComputeOsrOutputFrame();
void DoComputeFrame(TranslationIterator* iterator, int frame_index);
void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@ -319,6 +330,8 @@ class Deoptimizer : public Malloced {
FrameDescription* input_;
// Number of output frames.
int output_count_;
// Number of output js frames.
int jsframe_count_;
// Array of output frame descriptions.
FrameDescription** output_;
@ -362,7 +375,7 @@ class FrameDescription {
JSFunction* GetFunction() const { return function_; }
unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
unsigned GetOffsetFromSlotIndex(int slot_index);
intptr_t GetFrameSlot(unsigned offset) {
return *GetFrameSlotPointer(offset);
@ -423,22 +436,20 @@ class FrameDescription {
void SetContinuation(intptr_t pc) { continuation_ = pc; }
#ifdef DEBUG
Code::Kind GetKind() const { return kind_; }
void SetKind(Code::Kind kind) { kind_ = kind; }
#endif
StackFrame::Type GetFrameType() const { return type_; }
void SetFrameType(StackFrame::Type type) { type_ = type; }
// Get the incoming arguments count.
int ComputeParametersCount();
// Get a parameter value for an unoptimized frame.
Object* GetParameter(Deoptimizer* deoptimizer, int index);
Object* GetParameter(int index);
// Get the expression stack height for a unoptimized frame.
unsigned GetExpressionCount(Deoptimizer* deoptimizer);
unsigned GetExpressionCount();
// Get the expression stack value for an unoptimized frame.
Object* GetExpression(Deoptimizer* deoptimizer, int index);
Object* GetExpression(int index);
static int registers_offset() {
return OFFSET_OF(FrameDescription, registers_);
@ -481,6 +492,7 @@ class FrameDescription {
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
StackFrame::Type type_;
Smi* state_;
#ifdef DEBUG
Code::Kind kind_;
@ -499,6 +511,8 @@ class FrameDescription {
return reinterpret_cast<intptr_t*>(
reinterpret_cast<Address>(this) + frame_content_offset() + offset);
}
int ComputeFixedSize();
};
@ -541,7 +555,8 @@ class Translation BASE_EMBEDDED {
public:
enum Opcode {
BEGIN,
FRAME,
JS_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
REGISTER,
INT32_REGISTER,
DOUBLE_REGISTER,
@ -556,17 +571,19 @@ class Translation BASE_EMBEDDED {
DUPLICATE
};
Translation(TranslationBuffer* buffer, int frame_count)
Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
: buffer_(buffer),
index_(buffer->CurrentIndex()) {
buffer_->Add(BEGIN);
buffer_->Add(frame_count);
buffer_->Add(jsframe_count);
}
int index() const { return index_; }
// Commands.
void BeginFrame(int node_id, int literal_id, unsigned height);
void BeginJSFrame(int node_id, int literal_id, unsigned height);
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void StoreRegister(Register reg);
void StoreInt32Register(Register reg);
void StoreDoubleRegister(DoubleRegister reg);
@ -656,9 +673,10 @@ class SlotRef BASE_EMBEDDED {
}
}
static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
static Vector<SlotRef> ComputeSlotMappingForArguments(
JavaScriptFrame* frame,
int inlined_frame_index,
Vector<SlotRef>* args_slots);
int formal_parameter_count);
private:
Address addr_;
@ -678,6 +696,12 @@ class SlotRef BASE_EMBEDDED {
static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame);
static void ComputeSlotsForArguments(
Vector<SlotRef>* args_slots,
TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame);
};
@ -686,9 +710,13 @@ class SlotRef BASE_EMBEDDED {
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
// by the debugger frame information is copied to an object of this type.
// Represents parameters in unadapted form so their number might mismatch
// formal parameter count.
class DeoptimizedFrameInfo : public Malloced {
public:
DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
int frame_index,
bool has_arguments_adaptor);
virtual ~DeoptimizedFrameInfo();
// GC support.

25
deps/v8/src/frames.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -813,17 +813,18 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
// We create the summary in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
int i = frame_count;
int i = jsframe_count;
while (i > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::FRAME) {
if (opcode == Translation::JS_FRAME) {
// We don't inline constructor calls, so only the first, outermost
// frame can be a constructor frame in case of inlining.
bool is_constructor = (i == frame_count) && IsConstructor();
bool is_constructor = (i == jsframe_count) && IsConstructor();
i--;
int ast_id = it.Next();
@ -918,8 +919,9 @@ int OptimizedFrame::GetInlineCount() {
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
USE(opcode);
int frame_count = it.Next();
return frame_count;
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
return jsframe_count;
}
@ -934,14 +936,15 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
it.Next(); // Drop frame count.
int jsframe_count = it.Next();
// We insert the frames in reverse order because the frames
// in the deoptimization translation are ordered bottom-to-top.
while (frame_count > 0) {
while (jsframe_count > 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
if (opcode == Translation::FRAME) {
frame_count--;
if (opcode == Translation::JS_FRAME) {
jsframe_count--;
it.Next(); // Skip ast id.
int function_id = it.Next();
it.Next(); // Skip height.

2
deps/v8/src/full-codegen.cc

@ -370,6 +370,7 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
StateField::encode(state) | PcField::encode(masm_->pc_offset());
BailoutEntry entry = { id, pc_and_state };
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
// Assert that we don't have multiple bailout entries for the same node.
for (int i = 0; i < bailout_entries_.length(); i++) {
if (bailout_entries_.at(i).id == entry.id) {
@ -378,6 +379,7 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
UNREACHABLE();
}
}
}
#endif // DEBUG
bailout_entries_.Add(entry);
}

1
deps/v8/src/heap-inl.h

@ -505,7 +505,6 @@ Isolate* Heap::isolate() {
#define GC_GREEDY_CHECK() { }
#endif
// Calls the FUNCTION_CALL function and retries it up to three times
// to guarantee that any allocations performed during the call will
// succeed if there's enough memory.

112
deps/v8/src/heap.cc

@ -695,12 +695,18 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
start_new_space_size;
if (survival_rate > kYoungSurvivalRateThreshold) {
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
if (survival_rate < kYoungSurvivalRateLowThreshold) {
low_survival_rate_period_length_++;
} else {
low_survival_rate_period_length_ = 0;
}
double survival_rate_diff = survival_rate_ - survival_rate;
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
@ -760,32 +766,6 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
if (!new_space_high_promotion_mode_active_ &&
new_space_.Capacity() == new_space_.MaximumCapacity() &&
IsStableOrIncreasingSurvivalTrend() &&
IsHighSurvivalRate()) {
// Stable high survival rates even though young generation is at
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
new_space_high_promotion_mode_active_ = true;
if (FLAG_trace_gc) {
PrintF("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
} else if (new_space_high_promotion_mode_active_ &&
IsDecreasingSurvivalTrend() &&
!IsHighSurvivalRate()) {
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
new_space_high_promotion_mode_active_ = false;
if (FLAG_trace_gc) {
PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
}
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
if (high_survival_rate_during_scavenges &&
@ -815,6 +795,32 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
UpdateSurvivalRateTrend(start_new_space_size);
}
if (!new_space_high_promotion_mode_active_ &&
new_space_.Capacity() == new_space_.MaximumCapacity() &&
IsStableOrIncreasingSurvivalTrend() &&
IsHighSurvivalRate()) {
// Stable high survival rates even though young generation is at
// maximum capacity indicates that most objects will be promoted.
// To decrease scavenger pauses and final mark-sweep pauses, we
// have to limit maximal capacity of the young generation.
new_space_high_promotion_mode_active_ = true;
if (FLAG_trace_gc) {
PrintF("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
IsLowSurvivalRate()) {
// Decreasing low survival rates might indicate that the above high
// promotion mode is over and we should allow the young generation
// to grow again.
new_space_high_promotion_mode_active_ = false;
if (FLAG_trace_gc) {
PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
}
if (new_space_high_promotion_mode_active_ &&
new_space_.Capacity() > new_space_.InitialCapacity()) {
new_space_.Shrink();
@ -1099,7 +1105,7 @@ void Heap::Scavenge() {
isolate_->descriptor_lookup_cache()->Clear();
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSize();
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
CheckNewSpaceExpansionCriteria();
@ -1191,7 +1197,7 @@ void Heap::Scavenge() {
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
LOG(isolate_, ResourceEvent("scavenge", "end"));
@ -3302,7 +3308,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_next_code_flushing_candidate(undefined_value());
code->set_gc_metadata(Smi::FromInt(0));
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@ -5422,6 +5428,16 @@ intptr_t Heap::PromotedSpaceSize() {
}
intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects()
+ old_data_space_->SizeOfObjects()
+ code_space_->SizeOfObjects()
+ map_space_->SizeOfObjects()
+ cell_space_->SizeOfObjects()
+ lo_space_->SizeOfObjects();
}
int Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
@ -6523,15 +6539,11 @@ int KeyedLookupCache::Hash(Map* map, String* name) {
int KeyedLookupCache::Lookup(Map* map, String* name) {
int index = (Hash(map, name) & kHashMask);
Key& key = keys_[index];
for (int i = 0; i < kEntriesPerBucket; i++) {
Key& key = keys_[index + i];
if ((key.map == map) && key.name->Equals(name)) {
return field_offsets_[index];
return field_offsets_[index + i];
}
ASSERT(kEntriesPerBucket == 2); // There are two entries to check.
// First entry in the bucket missed, check the second.
Key& key2 = keys_[index + 1];
if ((key2.map == map) && key2.name->Equals(name)) {
return field_offsets_[index + 1];
}
return kNotFound;
}
@ -6541,13 +6553,29 @@ void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
int index = (Hash(map, symbol) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i< kEntriesPerBucket; i++) {
Key& key = keys_[index];
Key& key2 = keys_[index + 1]; // Second entry in the bucket.
// Demote the first entry to the second in the bucket.
key2.map = key.map;
key2.name = key.name;
field_offsets_[index + 1] = field_offsets_[index];
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = map;
key.name = symbol;
field_offsets_[index + i] = field_offset;
return;
}
}
// No free entry found in this bucket, so we move them all down one and
// put the new entry at position zero.
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
Key& key = keys_[index + i];
Key& key2 = keys_[index + i - 1];
key = key2;
field_offsets_[index + i] = field_offsets_[index + i - 1];
}
// Write the new first entry.
Key& key = keys_[index];
key.map = map;
key.name = symbol;
field_offsets_[index] = field_offset;

42
deps/v8/src/heap.h

@ -156,6 +156,7 @@ inline Heap* _inline_get_heap_();
V(Script, empty_script, EmptyScript) \
V(Smi, real_stack_limit, RealStackLimit) \
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@ -1067,7 +1068,7 @@ class Heap {
// Heap root getters. We have versions with and without type::cast() here.
// You can't use type::cast during GC because the assert fails.
// TODO(1490): Try removing the unchecked accessors, now that GC marking does
// not corrupt the stack.
// not corrupt the map.
#define ROOT_ACCESSOR(type, name, camel_name) \
type* name() { \
return type::cast(roots_[k##camel_name##RootIndex]); \
@ -1381,6 +1382,7 @@ class Heap {
void CheckNewSpaceExpansionCriteria();
inline void IncrementYoungSurvivorsCounter(int survived) {
ASSERT(survived >= 0);
young_survivors_after_last_gc_ = survived;
survived_since_last_expansion_ += survived;
}
@ -1430,6 +1432,7 @@ class Heap {
// Returns the size of objects residing in non new spaces.
intptr_t PromotedSpaceSize();
intptr_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
@ -1517,6 +1520,11 @@ class Heap {
return seed;
}
void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
private:
Heap();
@ -1799,11 +1807,13 @@ class Heap {
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
static const int kYoungSurvivalRateThreshold = 90;
static const int kYoungSurvivalRateHighThreshold = 90;
static const int kYoungSurvivalRateLowThreshold = 10;
static const int kYoungSurvivalRateAllowedDeviation = 15;
int young_survivors_after_last_gc_;
int high_survival_rate_period_length_;
int low_survival_rate_period_length_;
double survival_rate_;
SurvivalRateTrend previous_survival_rate_trend_;
SurvivalRateTrend survival_rate_trend_;
@ -1836,18 +1846,28 @@ class Heap {
}
}
bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
bool IsStableOrDecreasingSurvivalTrend() {
switch (survival_rate_trend()) {
case STABLE:
case DECREASING:
return true;
default:
return false;
}
}
bool IsDecreasingSurvivalTrend() {
return survival_rate_trend() == DECREASING;
bool IsIncreasingSurvivalTrend() {
return survival_rate_trend() == INCREASING;
}
bool IsHighSurvivalRate() {
return high_survival_rate_period_length_ > 0;
}
bool IsLowSurvivalRate() {
return low_survival_rate_period_length_ > 0;
}
void SelectScavengingVisitorsTable();
void StartIdleRound() {
@ -2135,13 +2155,17 @@ class KeyedLookupCache {
// Clear the cache.
void Clear();
static const int kLength = 128;
static const int kLength = 256;
static const int kCapacityMask = kLength - 1;
static const int kMapHashShift = 5;
static const int kHashMask = -2; // Zero the last bit.
static const int kEntriesPerBucket = 2;
static const int kHashMask = -4; // Zero the last two bits.
static const int kEntriesPerBucket = 4;
static const int kNotFound = -1;
// kEntriesPerBucket should be a power of 2.
STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
private:
KeyedLookupCache() {
for (int i = 0; i < kLength; ++i) {

17
deps/v8/src/hydrogen-instructions.cc

@ -1339,6 +1339,23 @@ Range* HShl::InferRange() {
}
Range* HLoadKeyedSpecializedArrayElement::InferRange() {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new Range(0, 255);
case EXTERNAL_BYTE_ELEMENTS:
return new Range(-128, 127);
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
return new Range(0, 255);
case EXTERNAL_SHORT_ELEMENTS:
return new Range(-32768, 32767);
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
return new Range(0, 65535);
default:
return HValue::InferRange();
}
}
void HCompareGeneric::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));

7
deps/v8/src/hydrogen-instructions.h

@ -764,6 +764,7 @@ class HValue: public ZoneObject {
int flags_;
GVNFlagSet gvn_flags_;
private:
DISALLOW_COPY_AND_ASSIGN(HValue);
};
@ -1340,9 +1341,11 @@ class HStackCheck: public HTemplateInstruction<1> {
class HEnterInlined: public HTemplateInstruction<0> {
public:
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
CallKind call_kind)
: closure_(closure),
arguments_count_(arguments_count),
function_(function),
call_kind_(call_kind) {
}
@ -1350,6 +1353,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> closure() const { return closure_; }
int arguments_count() const { return arguments_count_; }
FunctionLiteral* function() const { return function_; }
CallKind call_kind() const { return call_kind_; }
@ -1361,6 +1365,7 @@ class HEnterInlined: public HTemplateInstruction<0> {
private:
Handle<JSFunction> closure_;
int arguments_count_;
FunctionLiteral* function_;
CallKind call_kind_;
};
@ -3844,6 +3849,8 @@ class HLoadKeyedSpecializedArrayElement: public HTemplateInstruction<2> {
HValue* key() { return OperandAt(1); }
ElementsKind elements_kind() const { return elements_kind_; }
virtual Range* InferRange();
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
protected:

74
deps/v8/src/hydrogen.cc

@ -167,8 +167,7 @@ void HBasicBlock::Finish(HControlInstruction* end) {
void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
if (block->IsInlineReturnTarget()) {
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
if (drop_extra) last_environment_->Drop(1);
last_environment_ = last_environment()->DiscardInlined(drop_extra);
}
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(block);
@ -182,8 +181,7 @@ void HBasicBlock::AddLeaveInlined(HValue* return_value,
ASSERT(target->IsInlineReturnTarget());
ASSERT(return_value != NULL);
AddInstruction(new(zone()) HLeaveInlined);
last_environment_ = last_environment()->outer();
if (drop_extra) last_environment_->Drop(1);
last_environment_ = last_environment()->DiscardInlined(drop_extra);
last_environment()->Push(return_value);
AddSimulate(AstNode::kNoNumber);
HGoto* instr = new(zone()) HGoto(target);
@ -2076,6 +2074,7 @@ AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
for_typeof_(false) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
ASSERT(!owner->environment()->is_arguments_adaptor());
original_length_ = owner->environment()->length();
#endif
}
@ -2089,14 +2088,16 @@ AstContext::~AstContext() {
EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
owner()->environment()->length() == original_length_);
(owner()->environment()->length() == original_length_ &&
!owner()->environment()->is_arguments_adaptor()));
}
ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
owner()->current_block() == NULL ||
owner()->environment()->length() == original_length_ + 1);
(owner()->environment()->length() == original_length_ + 1 &&
!owner()->environment()->is_arguments_adaptor()));
}
@ -4828,7 +4829,9 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
TraceInline(target, caller, "inline depth limit reached");
return false;
}
if (!env->outer()->is_arguments_adaptor()) {
current_level++;
}
env = env->outer();
}
@ -4876,11 +4879,8 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
return false;
}
// Don't inline functions that uses the arguments object or that
// have a mismatching number of parameters.
int arity = expr->arguments()->length();
if (function->scope()->arguments() != NULL ||
arity != target_shared->formal_parameter_count()) {
// Don't inline functions that uses the arguments object.
if (function->scope()->arguments() != NULL) {
TraceInline(target, caller, "target requires special argument handling");
return false;
}
@ -4944,6 +4944,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner_env =
environment()->CopyForInlining(target,
expr->arguments()->length(),
function,
undefined,
call_kind);
@ -4963,6 +4964,7 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
body_entry->SetJoinId(expr->ReturnId());
set_current_block(body_entry);
AddInstruction(new(zone()) HEnterInlined(target,
expr->arguments()->length(),
function,
call_kind));
VisitDeclarations(target_info.scope()->declarations());
@ -6902,7 +6904,8 @@ HEnvironment::HEnvironment(HEnvironment* outer,
outer_(outer),
pop_count_(0),
push_count_(0),
ast_id_(AstNode::kNoNumber) {
ast_id_(AstNode::kNoNumber),
arguments_adaptor_(false) {
Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
}
@ -6916,11 +6919,28 @@ HEnvironment::HEnvironment(const HEnvironment* other)
outer_(NULL),
pop_count_(0),
push_count_(0),
ast_id_(other->ast_id()) {
ast_id_(other->ast_id()),
arguments_adaptor_(false) {
Initialize(other);
}
HEnvironment::HEnvironment(HEnvironment* outer,
Handle<JSFunction> closure,
int arguments)
: closure_(closure),
values_(arguments),
assigned_variables_(0),
parameter_count_(arguments),
local_count_(0),
outer_(outer),
pop_count_(0),
push_count_(0),
ast_id_(AstNode::kNoNumber),
arguments_adaptor_(true) {
}
void HEnvironment::Initialize(int parameter_count,
int local_count,
int stack_height) {
@ -6944,6 +6964,7 @@ void HEnvironment::Initialize(const HEnvironment* other) {
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
arguments_adaptor_ = other->arguments_adaptor_;
}
@ -7047,20 +7068,36 @@ HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
HEnvironment* HEnvironment::CopyForInlining(
Handle<JSFunction> target,
int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const {
ASSERT(!is_arguments_adaptor());
Zone* zone = closure()->GetIsolate()->zone();
// Outer environment is a copy of this one without the arguments.
int arity = function->scope()->num_parameters();
HEnvironment* outer = Copy();
outer->Drop(arity + 1); // Including receiver.
outer->Drop(arguments + 1); // Including receiver.
outer->ClearHistory();
Zone* zone = closure()->GetIsolate()->zone();
if (arity != arguments) {
// Create artificial arguments adaptation environment.
outer = new(zone) HEnvironment(outer, target, arguments + 1);
for (int i = 0; i <= arguments; ++i) { // Include receiver.
outer->Push(ExpressionStackAt(arguments - i));
}
outer->ClearHistory();
}
HEnvironment* inner =
new(zone) HEnvironment(outer, function->scope(), target);
// Get the argument values from the original environment.
for (int i = 0; i <= arity; ++i) { // Include receiver.
HValue* push = ExpressionStackAt(arity - i);
HValue* push = (i <= arguments) ?
ExpressionStackAt(arguments - i) : undefined;
inner->SetValueAt(i, push);
}
// If the function we are inlining is a strict mode function or a
@ -7070,7 +7107,7 @@ HEnvironment* HEnvironment::CopyForInlining(
call_kind == CALL_AS_FUNCTION) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, outer->LookupContext());
inner->SetValueAt(arity + 1, LookupContext());
for (int i = arity + 2; i < inner->length(); ++i) {
inner->SetValueAt(i, undefined);
}
@ -7086,7 +7123,7 @@ void HEnvironment::PrintTo(StringStream* stream) {
if (i == parameter_count()) stream->Add("specials\n");
if (i == parameter_count() + specials_count()) stream->Add("locals\n");
if (i == parameter_count() + specials_count() + local_count()) {
stream->Add("expressions");
stream->Add("expressions\n");
}
HValue* val = values_.at(i);
stream->Add("%d: ", i);
@ -7097,6 +7134,7 @@ void HEnvironment::PrintTo(StringStream* stream) {
}
stream->Add("\n");
}
PrintF("\n");
}

17
deps/v8/src/hydrogen.h

@ -343,6 +343,17 @@ class HEnvironment: public ZoneObject {
Scope* scope,
Handle<JSFunction> closure);
bool is_arguments_adaptor() const {
return arguments_adaptor_;
}
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_->is_arguments_adaptor() ?
outer_->outer_ : outer_;
if (drop_extra) outer->Drop(1);
return outer;
}
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
@ -427,6 +438,7 @@ class HEnvironment: public ZoneObject {
// environment is the outer environment but the top expression stack
// elements are moved to an inner environment as parameters.
HEnvironment* CopyForInlining(Handle<JSFunction> target,
int arguments,
FunctionLiteral* function,
HConstant* undefined,
CallKind call_kind) const;
@ -450,6 +462,10 @@ class HEnvironment: public ZoneObject {
private:
explicit HEnvironment(const HEnvironment* other);
// Create an argument adaptor environment.
HEnvironment(HEnvironment* outer, Handle<JSFunction> closure, int arguments);
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
@ -478,6 +494,7 @@ class HEnvironment: public ZoneObject {
int pop_count_;
int push_count_;
int ast_id_;
bool arguments_adaptor_;
};

4
deps/v8/src/ia32/assembler-ia32-inl.h

@ -30,13 +30,15 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
#include "ia32/assembler-ia32.h"
#include "cpu.h"
#include "debug.h"

3
deps/v8/src/ia32/builtins-ia32.cc

@ -537,7 +537,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
// Pass deoptimization type to the runtime system.
__ push(Immediate(Smi::FromInt(static_cast<int>(type))));
__ CallRuntime(Runtime::kNotifyDeoptimized, 1);
@ -1644,6 +1644,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ call(edx);
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);

124
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -299,12 +299,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
iterator.Next(); // Drop JS frames count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@ -340,9 +341,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -437,13 +436,112 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
uint32_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
uint32_t pc = reinterpret_cast<uint32_t>(
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc);
}
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@ -463,9 +561,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);

8
deps/v8/src/ia32/frames-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -95,9 +95,11 @@ class ExitFrameConstants : public AllStatic {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
// context and function.
// StandardFrame::IterateExpressions assumes that kContextOffset is the last
// object pointer.
static const int kFixedFrameSize = 4; // Currently unused.
static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@ -123,6 +125,8 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};

37
deps/v8/src/ia32/ic-ia32.cc

@ -538,20 +538,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(masm->isolate());
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ mov(edi, ecx);
__ shl(edi, kPointerSizeLog2 + 1);
if (i != 0) {
__ add(edi, Immediate(kPointerSize * i * 2));
}
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &try_second_entry);
__ j(not_equal, &try_next_entry);
__ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(equal, &hit_on_first_entry);
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ lea(edi, Operand(ecx, 1));
__ shl(edi, kPointerSizeLog2 + 1);
__ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ add(edi, Immediate(kPointerSize));
@ -566,22 +576,21 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
// Hit on second entry.
__ add(ecx, Immediate(1));
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
if (i != 0) {
__ add(ecx, Immediate(i));
}
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
__ sub(edi, ecx);
__ j(above_equal, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);

12
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -394,7 +394,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->BeginFrame(environment->ast_id(), closure_id, height);
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@ -543,10 +547,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
// |>------------ translation_size ------------<|
int frame_count = 0;
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
++jsframe_count;
}
}
Translation translation(&translations_, frame_count);
Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();

24
deps/v8/src/ia32/lithium-ia32.cc

@ -1013,15 +1013,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result =
new(zone()) LEnvironment(hydrogen_env->closure(),
hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1030,13 +1032,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new(zone()) LArgument((*argument_index_accumulator)++);
op = new(zone()) LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
if (!hydrogen_env->is_arguments_adaptor()) {
*argument_index_accumulator = argument_index;
}
return result;
}
@ -2002,12 +2008,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
(representation.IsInteger32() &&
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@ -2070,13 +2075,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(representation.IsInteger32() &&
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@ -2380,6 +2384,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@ -2390,7 +2395,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* outer = current_block_->last_environment()->outer();
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}

8
deps/v8/src/jsregexp.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -35,9 +35,11 @@
namespace v8 {
namespace internal {
class NodeVisitor;
class RegExpCompiler;
class RegExpMacroAssembler;
class RegExpNode;
class RegExpTree;
class RegExpImpl {
public:

12
deps/v8/src/lithium-allocator.cc

@ -161,9 +161,8 @@ LiveRange::LiveRange(int id)
next_(NULL),
current_interval_(NULL),
last_processed_use_(NULL),
spill_start_index_(kMaxInt) {
spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
}
spill_operand_(new LOperand()),
spill_start_index_(kMaxInt) { }
void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
@ -184,14 +183,15 @@ void LiveRange::MakeSpilled() {
bool LiveRange::HasAllocatedSpillOperand() const {
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
ASSERT(spill_operand_ != NULL);
return !spill_operand_->IsIgnored();
}
void LiveRange::SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
ASSERT(spill_operand_ != NULL);
ASSERT(spill_operand_->IsUnallocated());
ASSERT(spill_operand_->IsIgnored());
spill_operand_->ConvertTo(operand->kind(), operand->index());
}
@ -1643,7 +1643,7 @@ void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
int LAllocator::max_initial_value_ids() {
return LUnallocated::kMaxVirtualRegisters / 32;
return LUnallocated::kMaxVirtualRegisters / 16;
}

4
deps/v8/src/lithium.cc

@ -36,6 +36,7 @@ void LOperand::PrintTo(StringStream* stream) {
LUnallocated* unalloc = NULL;
switch (kind()) {
case INVALID:
stream->Add("(0)");
break;
case UNALLOCATED:
unalloc = LUnallocated::cast(this);
@ -70,9 +71,6 @@ void LOperand::PrintTo(StringStream* stream) {
case LUnallocated::ANY:
stream->Add("(-)");
break;
case LUnallocated::IGNORE:
stream->Add("(0)");
break;
}
break;
case CONSTANT_OPERAND:

21
deps/v8/src/lithium.h

@ -59,6 +59,7 @@ class LOperand: public ZoneObject {
bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
bool IsArgument() const { return kind() == ARGUMENT; }
bool IsUnallocated() const { return kind() == UNALLOCATED; }
bool IsIgnored() const { return kind() == INVALID; }
bool Equals(LOperand* other) const { return value_ == other->value_; }
int VirtualRegister();
@ -89,8 +90,7 @@ class LUnallocated: public LOperand {
FIXED_SLOT,
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT,
IGNORE
SAME_AS_FIRST_INPUT
};
// Lifetime of operand inside the instruction.
@ -121,9 +121,9 @@ class LUnallocated: public LOperand {
// The superclass has a KindField. Some policies have a signed fixed
// index in the upper bits.
static const int kPolicyWidth = 4;
static const int kPolicyWidth = 3;
static const int kLifetimeWidth = 1;
static const int kVirtualRegisterWidth = 17;
static const int kVirtualRegisterWidth = 18;
static const int kPolicyShift = kKindFieldWidth;
static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
@ -143,12 +143,10 @@ class LUnallocated: public LOperand {
kVirtualRegisterWidth> {
};
static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
static const int kMaxFixedIndex = 63;
static const int kMinFixedIndex = -64;
bool HasIgnorePolicy() const { return policy() == IGNORE; }
bool HasNoPolicy() const { return policy() == NONE; }
bool HasAnyPolicy() const {
return policy() == ANY;
}
@ -234,9 +232,7 @@ class LMoveOperands BASE_EMBEDDED {
}
bool IsIgnored() const {
return destination_ != NULL &&
destination_->IsUnallocated() &&
LUnallocated::cast(destination_)->HasIgnorePolicy();
return destination_ != NULL && destination_->IsIgnored();
}
// We clear both operands to indicate move that's been eliminated.
@ -443,12 +439,14 @@ class LPointerMap: public ZoneObject {
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
bool is_arguments_adaptor,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
is_arguments_adaptor_(is_arguments_adaptor),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
@ -505,8 +503,11 @@ class LEnvironment: public ZoneObject {
void PrintTo(StringStream* stream);
bool is_arguments_adaptor() const { return is_arguments_adaptor_; }
private:
Handle<JSFunction> closure_;
bool is_arguments_adaptor_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;

13
deps/v8/src/mark-compact-inl.h

@ -66,6 +66,19 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
}
bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
MarkBit mark = Marking::MarkBitFrom(object);
bool old_mark = mark.Get();
if (!old_mark) SetMark(object, mark);
return old_mark;
}
void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
}
void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
ASSERT(!mark_bit.Get());
ASSERT(Marking::MarkBitFrom(obj) == mark_bit);

111
deps/v8/src/mark-compact.cc

@ -710,16 +710,17 @@ class CodeFlusher {
SharedFunctionInfo* candidate) {
Code* code = candidate->code();
return reinterpret_cast<SharedFunctionInfo**>(
code->address() + Code::kNextCodeFlushingCandidateOffset);
code->address() + Code::kGCMetadataOffset);
}
static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
return *GetNextCandidateField(candidate);
return reinterpret_cast<SharedFunctionInfo*>(
candidate->code()->gc_metadata());
}
static void SetNextCandidate(SharedFunctionInfo* candidate,
SharedFunctionInfo* next_candidate) {
*GetNextCandidateField(candidate) = next_candidate;
candidate->code()->set_gc_metadata(next_candidate);
}
Isolate* isolate_;
@ -1672,6 +1673,16 @@ void MarkCompactCollector::MarkMapContents(Map* map) {
}
void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
int offset) {
Object** slot = HeapObject::RawField(accessors, offset);
HeapObject* accessor = HeapObject::cast(*slot);
if (accessor->IsMap()) return;
RecordSlot(slot, slot, accessor);
MarkObjectAndPush(accessor);
}
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
@ -1699,27 +1710,37 @@ void MarkCompactCollector::MarkDescriptorArray(
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
Object* value = *slot;
if (!value->IsHeapObject()) continue;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
RecordSlot(slot, slot, *slot);
if (details.IsProperty()) {
HeapObject* object = HeapObject::cast(value);
MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
if (!mark.Get()) {
SetMark(HeapObject::cast(object), mark);
marking_deque_.PushBlack(object);
switch (details.type()) {
case NORMAL:
case FIELD:
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
MarkObjectAndPush(value);
} else if (!MarkObjectWithoutPush(value)) {
MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
}
} else if (details.type() == ELEMENTS_TRANSITION && value->IsFixedArray()) {
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
HeapObject* object = HeapObject::cast(value);
MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
if (!mark.Get()) {
SetMark(HeapObject::cast(object), mark);
}
if (value->IsFixedArray()) MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
case NULL_DESCRIPTOR:
break;
}
}
// The DescriptorArray descriptors contains a pointer to its contents array,
@ -2290,14 +2311,19 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
}
// Clear dead prototype transitions.
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
}
}
void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
int number_of_transitions = map->NumberOfProtoTransitions();
FixedArray* prototype_transitions = map->prototype_transitions();
int new_number_of_transitions = 0;
const int header = Map::kProtoTransitionHeaderSize;
const int proto_offset =
header + Map::kProtoTransitionPrototypeOffset;
const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
const int map_offset = header + Map::kProtoTransitionMapOffset;
const int step = Map::kProtoTransitionElementsPerEntry;
for (int i = 0; i < number_of_transitions; i++) {
@ -2336,42 +2362,41 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
i++) {
prototype_transitions->set_undefined(heap_, header + i);
}
}
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
MarkBit map_mark) {
// Follow the chain of back pointers to find the prototype.
Map* current = map;
while (current->IsMap()) {
current = reinterpret_cast<Map*>(current->prototype());
ASSERT(current->IsHeapObject());
Map* real_prototype = map;
while (real_prototype->IsMap()) {
real_prototype = reinterpret_cast<Map*>(real_prototype->prototype());
ASSERT(real_prototype->IsHeapObject());
}
Object* real_prototype = current;
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
bool on_dead_path = !map_mark.Get();
Object* next;
// Follow back pointers, setting them to prototype, clearing map transitions
// when necessary.
Map* current = map;
bool current_is_alive = map_mark.Get();
bool on_dead_path = !current_is_alive;
while (current->IsMap()) {
next = current->prototype();
Object* next = current->prototype();
// There should never be a dead map above a live map.
MarkBit current_mark = Marking::MarkBitFrom(current);
bool is_alive = current_mark.Get();
ASSERT(on_dead_path || is_alive);
ASSERT(on_dead_path || current_is_alive);
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
if (on_dead_path && is_alive) {
// A live map above a dead map indicates a dead transition. This test will
// always be false on the first iteration.
if (on_dead_path && current_is_alive) {
on_dead_path = false;
current->ClearNonLiveTransitions(heap(), real_prototype);
}
*HeapObject::RawField(current, Map::kPrototypeOffset) =
real_prototype;
if (is_alive) {
Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
RecordSlot(slot, slot, real_prototype);
}
*slot = real_prototype;
if (current_is_alive) RecordSlot(slot, slot, real_prototype);
current = reinterpret_cast<Map*>(next);
}
current_is_alive = Marking::MarkBitFrom(current).Get();
}
}

6
deps/v8/src/mark-compact.h

@ -628,6 +628,9 @@ class MarkCompactCollector {
// This is for non-incremental marking.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
INLINE(bool MarkObjectWithoutPush(HeapObject* object));
INLINE(void MarkObjectAndPush(HeapObject* value));
// Marks the object black. This is for non-incremental marking.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
@ -645,6 +648,7 @@ class MarkCompactCollector {
// Mark a Map and its DescriptorArray together, skipping transitions.
void MarkMapContents(Map* map);
void MarkAccessorPairSlot(HeapObject* accessors, int offset);
void MarkDescriptorArray(DescriptorArray* descriptors);
// Mark the heap roots and all objects reachable from them.
@ -692,6 +696,8 @@ class MarkCompactCollector {
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveTransitions();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps

1
deps/v8/src/mips/assembler-mips-inl.h

@ -37,6 +37,7 @@
#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
#include "mips/assembler-mips.h"
#include "cpu.h"
#include "debug.h"

41
deps/v8/src/mips/ic-mips.cc

@ -1038,21 +1038,27 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ li(t0, Operand(cache_keys));
__ sll(at, a3, kPointerSizeLog2 + 1);
__ addu(t0, t0, at);
__ lw(t1, MemOperand(t0));
__ Branch(&try_second_entry, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize));
__ Branch(&hit_on_first_entry, eq, a0, Operand(t1));
__ bind(&try_second_entry);
__ lw(t1, MemOperand(t0, kPointerSize * 2));
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ lw(t1, MemOperand(t0, kPointerSize * i * 2));
__ Branch(&try_next_entry, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
__ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
__ bind(&try_next_entry);
}
__ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
__ Branch(&slow, ne, a2, Operand(t1));
__ lw(t1, MemOperand(t0, kPointerSize * 3));
__ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
__ Branch(&slow, ne, a0, Operand(t1));
// Get field offset.
@ -1063,25 +1069,20 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
__ li(t0, Operand(cache_field_offsets));
__ sll(at, a3, kPointerSizeLog2);
__ addu(at, t0, at);
__ lw(t1, MemOperand(at, kPointerSize));
__ lw(t1, MemOperand(at, kPointerSize * i));
__ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ Subu(t1, t1, t2);
__ Branch(&property_array_property, ge, t1, Operand(zero_reg));
if (i != 0) {
__ Branch(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ li(t0, Operand(cache_field_offsets));
__ sll(at, a3, kPointerSizeLog2);
__ addu(at, t0, at);
__ lw(t1, MemOperand(at));
__ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
__ Subu(t1, t1, t2);
__ Branch(&property_array_property, ge, t1, Operand(zero_reg));
}
}
// Load in-object property.
__ bind(&load_in_object_property);

3
deps/v8/src/objects-inl.h

@ -4042,8 +4042,7 @@ INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
ACCESSORS(Code, next_code_flushing_candidate,
Object, kNextCodeFlushingCandidateOffset)
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
byte* Code::instruction_start() {

53
deps/v8/src/objects.cc

@ -4954,15 +4954,43 @@ class IntrusiveMapTransitionIterator {
Map* Next() {
ASSERT(IsIterating());
FixedArray* contents = ContentArray();
// Attention, tricky index manipulation ahead: Every entry in the contents
// array consists of a value/details pair, so the index is typically even.
// An exception is made for CALLBACKS entries: An even index means we look
// at its getter, and an odd index means we look at its setter.
int index = Smi::cast(*ContentHeader())->value();
while (index < contents->length()) {
int next_index = index + 2;
PropertyDetails details(Smi::cast(contents->get(index + 1)));
if (details.IsTransition()) {
*ContentHeader() = Smi::FromInt(next_index);
PropertyDetails details(Smi::cast(contents->get(index | 1)));
switch (details.type()) {
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
case ELEMENTS_TRANSITION:
// We definitely have a map transition.
*ContentHeader() = Smi::FromInt(index + 2);
return static_cast<Map*>(contents->get(index));
case CALLBACKS: {
// We might have a map transition in a getter or in a setter.
AccessorPair* accessors =
static_cast<AccessorPair*>(contents->get(index & ~1));
Object* accessor =
((index & 1) == 0) ? accessors->getter() : accessors->setter();
index++;
if (accessor->IsMap()) {
*ContentHeader() = Smi::FromInt(index);
return static_cast<Map*>(accessor);
}
break;
}
case NORMAL:
case FIELD:
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
case NULL_DESCRIPTOR:
// We definitely have no map transition.
index += 2;
break;
}
index = next_index;
}
*ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map();
return NULL;
@ -8113,8 +8141,11 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
int frame_count = iterator.Next();
PrintF(out, " %s {count=%d}\n", Translation::StringFor(opcode),
frame_count);
int jsframe_count = iterator.Next();
PrintF(out, " %s {frame count=%d, js frame count=%d}\n",
Translation::StringFor(opcode),
frame_count,
jsframe_count);
while (iterator.HasNext() &&
Translation::BEGIN !=
@ -8126,7 +8157,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
UNREACHABLE();
break;
case Translation::FRAME: {
case Translation::JS_FRAME: {
int ast_id = iterator.Next();
int function_id = iterator.Next();
JSFunction* function =
@ -8138,6 +8169,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
unsigned height = iterator.Next();
PrintF(out, "{arguments adaptor, height=%d}", height);
break;
}
case Translation::DUPLICATE:
break;

11
deps/v8/src/objects.h

@ -4050,11 +4050,10 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
// [code_flushing_candidate]: Field only used during garbage
// collection to hold code flushing candidates. The contents of this
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
// it is only used by the garbage collector itself.
DECL_ACCESSORS(next_code_flushing_candidate, Object)
DECL_ACCESSORS(gc_metadata, Object)
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
@ -4278,10 +4277,8 @@ class Code: public HeapObject {
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
static const int kDeoptimizationDataOffset =
kHandlerTableOffset + kPointerSize;
static const int kNextCodeFlushingCandidateOffset =
kDeoptimizationDataOffset + kPointerSize;
static const int kFlagsOffset =
kNextCodeFlushingCandidateOffset + kPointerSize;
static const int kGCMetadataOffset = kDeoptimizationDataOffset + kPointerSize;
static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlagsSize = 2 * kIntSize;

15
deps/v8/src/platform-freebsd.cc

@ -464,15 +464,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData),
stack_size_(0) {
set_name(name);
stack_size_(options.stack_size()) {
set_name(options.name());
}
@ -717,8 +710,10 @@ class SignalSender : public Thread {
FULL_INTERVAL
};
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval)
: Thread("SignalSender"),
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {

15
deps/v8/src/platform-linux.cc

@ -720,15 +720,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
stack_size_(options.stack_size()) {
set_name(options.name());
}
@ -1035,8 +1028,10 @@ class SignalSender : public Thread {
FULL_INTERVAL
};
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval)
: Thread("SignalSender"),
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}

17
deps/v8/src/platform-macos.cc

@ -473,17 +473,11 @@ class Thread::PlatformData : public Malloced {
pthread_t thread_; // Thread handle for pthread.
};
Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
Thread::Thread(const Options& options)
: data_(new PlatformData),
stack_size_(0) {
set_name(name);
stack_size_(options.stack_size()) {
set_name(options.name());
}
@ -736,10 +730,13 @@ class Sampler::PlatformData : public Malloced {
thread_act_t profiled_thread_;
};
class SamplerThread : public Thread {
public:
static const int kSamplerThreadStackSize = 32 * KB;
explicit SamplerThread(int interval)
: Thread("SamplerThread"),
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {

15
deps/v8/src/platform-openbsd.cc

@ -512,15 +512,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
stack_size_(options.stack_size()) {
set_name(options.name());
}
@ -789,8 +782,10 @@ class SignalSender : public Thread {
FULL_INTERVAL
};
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval)
: Thread("SignalSender"),
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
vm_tgid_(getpid()),
interval_(interval) {}

54
deps/v8/src/platform-solaris.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -53,8 +53,8 @@
#include "v8.h"
#include "platform.h"
#include "vm-state-inl.h"
#include "v8threads.h"
#include "vm-state-inl.h"
// It seems there is a bug in some Solaris distributions (experienced in
@ -84,33 +84,6 @@ namespace internal {
static const pthread_t kNoThread = (pthread_t) 0;
static void* GetRandomMmapAddr() {
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
// the code page is immediately freed.
if (isolate != NULL) {
#ifdef V8_TARGET_ARCH_X64
uint64_t rnd1 = V8::RandomPrivate(isolate);
uint64_t rnd2 = V8::RandomPrivate(isolate);
uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc).
raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
#endif
return reinterpret_cast<void*>(raw_addr);
}
return NULL;
}
double ceiling(double x) {
return ceil(x);
}
@ -167,7 +140,7 @@ double OS::LocalTimeOffset() {
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
// and verification). The estimate is conservative, i.e., not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@ -363,7 +336,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
size_t request_size = RoundUp(size + alignment,
static_cast<intptr_t>(OS::AllocateAlignment()));
void* reservation = mmap(GetRandomMmapAddr(),
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
@ -429,7 +402,7 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(GetRandomMmapAddr(),
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
@ -480,17 +453,11 @@ class Thread::PlatformData : public Malloced {
pthread_t thread_; // Thread handle for pthread.
};
Thread::Thread(const Options& options)
: data_(new PlatformData()),
stack_size_(options.stack_size) {
set_name(options.name);
}
Thread::Thread(const char* name)
Thread::Thread(const Options& options)
: data_(new PlatformData()),
stack_size_(0) {
set_name(name);
stack_size_(options.stack_size()) {
set_name(options.name());
}
@ -737,8 +704,10 @@ class SignalSender : public Thread {
FULL_INTERVAL
};
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval)
: Thread("SignalSender"),
: Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
interval_(interval) {}
static void InstallSignalHandler() {
@ -870,6 +839,7 @@ class SignalSender : public Thread {
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
DISALLOW_COPY_AND_ASSIGN(SignalSender);
};

15
deps/v8/src/platform-win32.cc

@ -1526,16 +1526,9 @@ class Thread::PlatformData : public Malloced {
// handle until it is started.
Thread::Thread(const Options& options)
: stack_size_(options.stack_size) {
: stack_size_(options.stack_size()) {
data_ = new PlatformData(kNoThread);
set_name(options.name);
}
Thread::Thread(const char* name)
: stack_size_(0) {
data_ = new PlatformData(kNoThread);
set_name(name);
set_name(options.name());
}
@ -1901,8 +1894,10 @@ class Sampler::PlatformData : public Malloced {
class SamplerThread : public Thread {
public:
static const int kSamplerThreadStackSize = 32 * KB;
explicit SamplerThread(int interval)
: Thread("SamplerThread"),
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
static void AddActiveSampler(Sampler* sampler) {

16
deps/v8/src/platform.h

@ -412,16 +412,22 @@ class Thread {
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
struct Options {
Options() : name("v8:<unknown>"), stack_size(0) {}
class Options {
public:
Options() : name_("v8:<unknown>"), stack_size_(0) {}
Options(const char* name, int stack_size = 0)
: name_(name), stack_size_(stack_size) {}
const char* name() const { return name_; }
int stack_size() const { return stack_size_; }
const char* name;
int stack_size;
private:
const char* name_;
int stack_size_;
};
// Create new thread.
explicit Thread(const Options& options);
explicit Thread(const char* name);
virtual ~Thread();
// Start new thread by calling the Run() method in the new thread.

124
deps/v8/src/runtime.cc

@ -8126,13 +8126,15 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
if (functions.length() > 1) {
int inlined_frame_index = functions.length() - 1;
JSFunction* inlined_function = functions[inlined_frame_index];
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
SlotRef::ComputeSlotMappingForArguments(frame,
inlined_frame_index,
&args_slots);
int inlined_jsframe_index = functions.length() - 1;
JSFunction* inlined_function = functions[inlined_jsframe_index];
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
inlined_jsframe_index,
inlined_function->shared()->formal_parameter_count());
int args_count = args_slots.length();
*total_argc = prefix_argc + args_count;
SmartArrayPointer<Handle<Object> > param_data(
@ -8141,6 +8143,9 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
Handle<Object> val = args_slots[i].GetValue();
param_data[prefix_argc + i] = val;
}
args_slots.Dispose();
return param_data;
} else {
it.AdvanceToArgumentsFrame();
@ -8486,14 +8491,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
int frames = deoptimizer->output_count();
int jsframes = deoptimizer->jsframe_count();
deoptimizer->MaterializeHeapNumbers();
delete deoptimizer;
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = NULL;
for (int i = 0; i < frames - 1; i++) it.Advance();
for (int i = 0; i < jsframes - 1; i++) it.Advance();
frame = it.frame();
RUNTIME_ASSERT(frame->function()->IsJSFunction());
@ -10703,13 +10708,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
class FrameInspector {
public:
FrameInspector(JavaScriptFrame* frame,
int inlined_frame_index,
int inlined_jsframe_index,
Isolate* isolate)
: frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
// Calculate the deoptimized frame.
if (frame->is_optimized()) {
deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
frame, inlined_frame_index, isolate);
frame, inlined_jsframe_index, isolate);
}
has_adapted_arguments_ = frame_->has_adapted_arguments();
is_optimized_ = frame_->is_optimized();
@ -10825,8 +10830,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
return heap->undefined_value();
}
int inlined_frame_index = 0; // Inlined frame index in optimized frame.
int count = 0;
JavaScriptFrameIterator it(isolate, id);
for (; !it.done(); it.Advance()) {
@ -10835,11 +10838,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
if (it.done()) return heap->undefined_value();
if (it.frame()->is_optimized()) {
inlined_frame_index =
bool is_optimized = it.frame()->is_optimized();
int inlined_jsframe_index = 0; // Inlined frame index in optimized frame.
if (is_optimized) {
inlined_jsframe_index =
it.frame()->GetInlineCount() - (index - count) - 1;
}
FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@ -10853,12 +10859,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
// Check for constructor frame. Inlined frames cannot be construct calls.
bool inlined_frame =
it.frame()->is_optimized() && inlined_frame_index != 0;
bool inlined_frame = is_optimized && inlined_jsframe_index != 0;
bool constructor = !inlined_frame && it.frame()->IsConstructor();
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
ASSERT(*scope_info != ScopeInfo::Empty());
@ -10895,7 +10900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// Check whether this frame is positioned at return. If not top
// frame or if the frame is optimized it cannot be at a return.
bool at_return = false;
if (!it.frame()->is_optimized() && index == 0) {
if (!is_optimized && index == 0) {
at_return = isolate->debug()->IsBreakAtReturn(it.frame());
}
@ -10935,7 +10940,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
// the provided parameters whereas the function frame always have the number
// of arguments matching the functions parameters. The rest of the
// information (except for what is collected above) is the same.
if (it.frame()->has_adapted_arguments()) {
if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
it.AdvanceToArgumentsFrame();
frame_inspector.SetArgumentsFrame(it.frame());
}
@ -10946,11 +10951,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
if (argument_count < frame_inspector.GetParametersCount()) {
argument_count = frame_inspector.GetParametersCount();
}
#ifdef DEBUG
if (it.frame()->is_optimized()) {
ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
}
#endif
// Calculate the size of the result.
int details_size = kFrameDetailsFirstDynamicIndex +
@ -10992,9 +10992,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
if (*save->context() == *isolate->debug()->debug_context()) {
flags |= 1 << 0;
}
if (it.frame()->is_optimized()) {
if (is_optimized) {
flags |= 1 << 1;
flags |= inlined_frame_index << 2;
flags |= inlined_jsframe_index << 2;
}
details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
@ -11011,7 +11011,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
// Parameter value.
if (i < it.frame()->ComputeParametersCount()) {
if (i < frame_inspector.GetParametersCount()) {
// Get the value from the stack.
details->set(details_index++, frame_inspector.GetParameter(i));
} else {
@ -11084,14 +11084,13 @@ static bool CopyContextLocalsToScopeObject(
// Create a plain JSObject which materializes the local scope for the specified
// frame.
static Handle<JSObject> MaterializeLocalScope(
static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
Isolate* isolate,
JavaScriptFrame* frame,
int inlined_frame_index) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
FrameInspector* frame_inspector) {
Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
@ -11100,11 +11099,15 @@ static Handle<JSObject> MaterializeLocalScope(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
Handle<Object> value(
i < frame_inspector->GetParametersCount() ?
frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
SetProperty(local_scope,
Handle<String>(scope_info->ParameterName(i)),
Handle<Object>(frame_inspector.GetParameter(i)),
value,
NONE,
kNonStrictMode),
Handle<JSObject>());
@ -11116,7 +11119,7 @@ static Handle<JSObject> MaterializeLocalScope(
isolate,
SetProperty(local_scope,
Handle<String>(scope_info->StackLocalName(i)),
Handle<Object>(frame_inspector.GetExpression(i)),
Handle<Object>(frame_inspector->GetExpression(i)),
NONE,
kNonStrictMode),
Handle<JSObject>());
@ -11163,6 +11166,17 @@ static Handle<JSObject> MaterializeLocalScope(
}
static Handle<JSObject> MaterializeLocalScope(
Isolate* isolate,
JavaScriptFrame* frame,
int inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
return MaterializeLocalScopeWithFrameInspector(isolate,
frame,
&frame_inspector);
}
// Create a plain JSObject which materializes the closure content for the
// context.
static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@ -11268,10 +11282,10 @@ class ScopeIterator {
ScopeIterator(Isolate* isolate,
JavaScriptFrame* frame,
int inlined_frame_index)
int inlined_jsframe_index)
: isolate_(isolate),
frame_(frame),
inlined_frame_index_(inlined_frame_index),
inlined_jsframe_index_(inlined_jsframe_index),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
nested_scope_chain_(4) {
@ -11428,7 +11442,7 @@ class ScopeIterator {
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
ASSERT(nested_scope_chain_.length() == 1);
return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
case ScopeIterator::ScopeTypeWith:
// Return the with object.
return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
@ -11524,7 +11538,7 @@ class ScopeIterator {
private:
Isolate* isolate_;
JavaScriptFrame* frame_;
int inlined_frame_index_;
int inlined_jsframe_index_;
Handle<JSFunction> function_;
Handle<Context> context_;
List<Handle<ScopeInfo> > nested_scope_chain_;
@ -11586,7 +11600,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
if (!maybe_check->ToObject(&check)) return maybe_check;
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
// Get the frame where the debugging is performed.
@ -11596,7 +11610,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
// Find the requested scope.
int n = 0;
ScopeIterator it(isolate, frame, inlined_frame_index);
ScopeIterator it(isolate, frame, inlined_jsframe_index);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
@ -11994,12 +12008,12 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
Handle<JSFunction> function,
Handle<Context> base,
JavaScriptFrame* frame,
int inlined_frame_index) {
int inlined_jsframe_index) {
HandleScope scope(isolate);
List<Handle<ScopeInfo> > scope_chain;
List<Handle<Context> > context_chain;
ScopeIterator it(isolate, frame, inlined_frame_index);
ScopeIterator it(isolate, frame, inlined_jsframe_index);
for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
ASSERT(!it.Done());
@ -12056,8 +12070,7 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
// Runtime_DebugEvaluate.
static Handle<Object> GetArgumentsObject(Isolate* isolate,
JavaScriptFrame* frame,
int inlined_frame_index,
Handle<JSFunction> function,
FrameInspector* frame_inspector,
Handle<ScopeInfo> scope_info,
Handle<Context> function_context) {
// Try to find the value of 'arguments' to pass as parameter. If it is not
@ -12081,9 +12094,8 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
}
}
FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
int length = frame_inspector.GetParametersCount();
Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
int length = frame_inspector->GetParametersCount();
Handle<JSObject> arguments =
isolate->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@ -12091,7 +12103,7 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
AssertNoAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < length; i++) {
array->set(i, frame_inspector.GetParameter(i), mode);
array->set(i, frame_inspector->GetParameter(i), mode);
}
arguments->set_elements(*array);
return arguments;
@ -12127,7 +12139,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
}
}
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_CHECKED(String, source, 3);
CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
Handle<Object> additional_context(args[5]);
@ -12139,7 +12151,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
Handle<JSFunction> function(JSFunction::cast(frame->function()));
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<ScopeInfo> scope_info(function->shared()->scope_info());
// Traverse the saved contexts chain to find the active context for the
@ -12166,8 +12179,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
#endif
// Materialize the content of the local scope into a JSObject.
Handle<JSObject> local_scope = MaterializeLocalScope(
isolate, frame, inlined_frame_index);
Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
isolate, frame, &frame_inspector);
RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
// Allocate a new context for the debug evaluation and set the extension
@ -12187,7 +12200,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
go_between,
context,
frame,
inlined_frame_index);
inlined_jsframe_index);
if (additional_context->IsJSObject()) {
Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@ -12227,8 +12240,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> arguments = GetArgumentsObject(isolate,
frame,
inlined_frame_index,
function,
&frame_inspector,
scope_info,
function_context);

32
deps/v8/src/serialize.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1081,36 +1081,6 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
}
#ifdef DEBUG
void Deserializer::Synchronize(const char* tag) {
int data = source_->Get();
// If this assert fails then that indicates that you have a mismatch between
// the number of GC roots when serializing and deserializing.
ASSERT_EQ(kSynchronize, data);
do {
int character = source_->Get();
if (character == 0) break;
if (FLAG_debug_serialization) {
PrintF("%c", character);
}
} while (true);
if (FLAG_debug_serialization) {
PrintF("\n");
}
}
void Serializer::Synchronize(const char* tag) {
sink_->Put(kSynchronize, tag);
int character;
do {
character = *tag++;
sink_->PutSection(character, "TagCharacter");
} while (character != 0);
}
#endif
Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),

7
deps/v8/src/serialize.h

@ -341,10 +341,6 @@ class Deserializer: public SerializerDeserializer {
// Deserialize a single object and the objects reachable from it.
void DeserializePartial(Object** root);
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
private:
virtual void VisitPointers(Object** start, Object** end);
@ -485,9 +481,6 @@ class Serializer : public SerializerDeserializer {
SerializationAddressMapper* address_mapper() { return &address_mapper_; }
void PutRoot(
int index, HeapObject* object, HowToCode how, WhereToPoint where);
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
protected:
static const int kInvalidRootIndex = -1;

18
deps/v8/src/spaces.cc

@ -922,14 +922,13 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
if (!to_space_.SetUp(chunk_base_,
to_space_.SetUp(chunk_base_,
initial_semispace_capacity,
maximum_semispace_capacity)) {
return false;
}
if (!from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
maximum_semispace_capacity);
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
initial_semispace_capacity,
maximum_semispace_capacity)) {
maximum_semispace_capacity);
if (!to_space_.Commit()) {
return false;
}
@ -1162,7 +1161,7 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
bool SemiSpace::SetUp(Address start,
void SemiSpace::SetUp(Address start,
int initial_capacity,
int maximum_capacity) {
// Creates a space in the young generation. The constructor does not
@ -1181,8 +1180,6 @@ bool SemiSpace::SetUp(Address start,
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
return Commit();
}
@ -1232,6 +1229,9 @@ bool SemiSpace::Uncommit() {
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity <= maximum_capacity_);
ASSERT(new_capacity > capacity_);

2
deps/v8/src/spaces.h

@ -1834,7 +1834,7 @@ class SemiSpace : public Space {
current_page_(NULL) { }
// Sets up the semispace using the given chunk.
bool SetUp(Address start, int initial_capacity, int maximum_capacity);
void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.

3
deps/v8/src/type-info.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -223,6 +223,7 @@ class CaseClause;
class CompareOperation;
class CompilationInfo;
class CountOperation;
class Expression;
class Property;
class SmallMapList;
class UnaryOperation;

19
deps/v8/src/v8globals.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -107,14 +107,12 @@ const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
// (sorted alphabetically)
class AccessorInfo;
class Allocation;
class Arguments;
class Assembler;
class AssertNoAllocation;
class BreakableStatement;
class Code;
class CodeGenerator;
class CodeStub;
@ -124,10 +122,8 @@ class Debugger;
class DebugInfo;
class Descriptor;
class DescriptorArray;
class Expression;
class ExternalReference;
class FixedArray;
class FunctionLiteral;
class FunctionTemplateInfo;
class MemoryChunk;
class SeededNumberDictionary;
@ -138,7 +134,6 @@ class Heap;
class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
class JSArray;
class JSFunction;
class JSObject;
@ -149,31 +144,19 @@ class Map;
class MapSpace;
class MarkCompactCollector;
class NewSpace;
class NodeVisitor;
class Object;
class MaybeObject;
class OldSpace;
class Property;
class Foreign;
class RegExpNode;
struct RegExpCompileData;
class RegExpTree;
class RegExpCompiler;
class RegExpVisitor;
class Scope;
class ScopeInfo;
class Script;
class Slot;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
class Statement;
class String;
class Struct;
class SwitchStatement;
class AstVisitor;
class Variable;
class VariableProxy;
class RelocInfo;
class Deserializer;
class MessageLocation;

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 8
#define BUILD_NUMBER 8
#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

4
deps/v8/src/x64/assembler-x64-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,6 +28,8 @@
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
#include "x64/assembler-x64.h"
#include "cpu.h"
#include "debug.h"
#include "v8memory.h"

1
deps/v8/src/x64/builtins-x64.cc

@ -1547,6 +1547,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&invoke);
__ call(rdx);
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);

129
deps/v8/src/x64/deoptimizer-x64.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -206,12 +206,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@ -247,9 +248,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@ -338,13 +337,117 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
int frame_index) {
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
if (FLAG_trace_deopt) {
PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
ASSERT(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous
// frame's top and this frame's size.
intptr_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
DoTranslateCommand(iterator, frame_index, output_offset);
}
input_offset -= (parameter_count * kPointerSize);
// Read caller's PC from the previous frame.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
}
// A marker value is used in place of the context.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
intptr_t context = reinterpret_cast<intptr_t>(
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
output_frame->SetFrameSlot(output_offset, context);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context (adaptor sentinel)\n",
top_address + output_offset, output_offset, context);
}
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function\n",
top_address + output_offset, output_offset, value);
}
// Number of incoming arguments.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
ASSERT(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = reinterpret_cast<intptr_t>(
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
}
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@ -364,9 +467,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);

7
deps/v8/src/x64/frames-x64.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -87,6 +87,9 @@ class ExitFrameConstants : public AllStatic {
class StandardFrameConstants : public AllStatic {
public:
// Fixed part of the frame consists of return address, caller fp,
// context and function.
static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@ -112,6 +115,8 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
static const int kFrameSize =
StandardFrameConstants::kFixedFrameSize + kPointerSize;
};

43
deps/v8/src/x64/ic-x64.cc

@ -467,43 +467,50 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load the key (consisting of map and symbol) from the cache and
// check for match.
Label try_second_entry, hit_on_first_entry, load_in_object_property;
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
Label hit_on_nth_entry[kEntriesPerBucket];
ExternalReference cache_keys
= ExternalReference::keyed_lookup_cache_keys(masm->isolate());
for (int i = 0; i < kEntriesPerBucket - 1; i++) {
Label try_next_entry;
__ movq(rdi, rcx);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
__ j(not_equal, &try_second_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
__ j(equal, &hit_on_first_entry);
int off = kPointerSize * i * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
__ bind(&try_second_entry);
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, kPointerSize * 2));
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
__ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize * 3));
__ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
ExternalReference cache_field_offsets
= ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
// Hit on second entry.
// Hit on nth entry.
for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
__ bind(&hit_on_nth_entry[i]);
if (i != 0) {
__ addl(rcx, Immediate(i));
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ addl(rcx, Immediate(1));
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
// Hit on first entry.
__ bind(&hit_on_first_entry);
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
__ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
__ subq(rdi, rcx);
__ j(above_equal, &property_array_property);
}
}
// Load in-object property.
__ bind(&load_in_object_property);

12
deps/v8/src/x64/lithium-codegen-x64.cc

@ -368,7 +368,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->BeginFrame(environment->ast_id(), closure_id, height);
if (environment->is_arguments_adaptor()) {
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
} else {
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
}
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@ -504,10 +508,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
// |>------------ translation_size ------------<|
int frame_count = 0;
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (!e->is_arguments_adaptor()) {
++jsframe_count;
}
}
Translation translation(&translations_, frame_count);
Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();

24
deps/v8/src/x64/lithium-x64.cc

@ -1000,14 +1000,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@ -1016,13 +1018,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
op = new LArgument((*argument_index_accumulator)++);
op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
if (!hydrogen_env->is_arguments_adaptor()) {
*argument_index_accumulator = argument_index;
}
return result;
}
@ -1912,12 +1918,11 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
(representation.IsInteger32() &&
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
@ -1976,13 +1981,12 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(representation.IsInteger32() &&
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(representation.IsDouble() &&
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
@ -2245,6 +2249,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@ -2255,7 +2260,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
HEnvironment* outer = current_block_->last_environment()->outer();
HEnvironment* outer = current_block_->last_environment()->
DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}

119
deps/v8/test/cctest/test-api.cc

@ -1189,7 +1189,6 @@ THREADED_TEST(GlobalPrototype) {
templ->Set("x", v8_num(200));
templ->SetAccessor(v8_str("m"), GetM);
LocalContext env(0, templ);
v8::Handle<v8::Object> obj(env->Global());
v8::Handle<Script> script(v8_compile("dummy()"));
v8::Handle<Value> result(script->Run());
CHECK_EQ(13.4, result->NumberValue());
@ -1847,7 +1846,7 @@ THREADED_TEST(DeepCrossLanguageRecursion) {
env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
call_recursively_script = v8_compile("callScriptRecursively()");
v8::Handle<Value> result(call_recursively_script->Run());
call_recursively_script->Run();
call_recursively_script = v8::Handle<Script>();
env->Global()->Set(v8_str("depth"), v8::Integer::New(0));
@ -4476,7 +4475,7 @@ THREADED_TEST(ExtensibleOnUndetectable) {
source = v8_str("undetectable.y = 2000;");
script = Script::Compile(source);
Local<Value> result(script->Run());
script->Run();
ExpectBoolean("undetectable.y == undefined", true);
}
@ -4829,9 +4828,10 @@ THREADED_TEST(NativeFunctionDeclarationError) {
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context(Context::New(&extensions));
ASSERT(context.IsEmpty());
CHECK(context.IsEmpty());
}
THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
v8::HandleScope handle_scope;
const char* name = "nativedeclerresc";
@ -4843,7 +4843,7 @@ THREADED_TEST(NativeFunctionDeclarationErrorEscape) {
const char* extension_names[] = { name };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Handle<Context> context(Context::New(&extensions));
ASSERT(context.IsEmpty());
CHECK(context.IsEmpty());
}
@ -5009,7 +5009,7 @@ TEST(RegexpOutOfMemory) {
Local<Script> script =
Script::Compile(String::New(js_code_causing_huge_string_flattening));
last_location = NULL;
Local<Value> result(script->Run());
script->Run();
CHECK(false); // Should not return.
}
@ -5787,7 +5787,6 @@ THREADED_TEST(ErrorConstruction) {
v8::Handle<String> message = v8_str("message");
v8::Handle<Value> range_error = v8::Exception::RangeError(foo);
CHECK(range_error->IsObject());
v8::Handle<v8::Object> range_obj(range_error.As<v8::Object>());
CHECK(range_error.As<v8::Object>()->Get(message)->Equals(foo));
v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo);
CHECK(reference_error->IsObject());
@ -7357,7 +7356,7 @@ THREADED_TEST(CallKnownGlobalReceiver) {
// Create new environment reusing the global object.
LocalContext env(NULL, instance_template, global_object);
env->Global()->Set(v8_str("foo"), foo);
Local<Value> value(Script::Compile(v8_str("foo()"))->Run());
Script::Compile(v8_str("foo()"))->Run();
}
}
@ -7683,6 +7682,7 @@ THREADED_TEST(Constructor) {
context->Global()->Set(v8_str("Fun"), cons);
Local<v8::Object> inst = cons->NewInstance();
i::Handle<i::JSObject> obj(v8::Utils::OpenHandle(*inst));
CHECK(obj->IsJSObject());
Local<Value> value = CompileRun("(new Fun()).constructor === Fun");
CHECK(value->BooleanValue());
}
@ -8154,6 +8154,7 @@ THREADED_TEST(CallAsFunction) {
{ Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
Local<ObjectTemplate> instance_template(t->InstanceTemplate());
USE(instance_template);
Local<v8::Object> instance = t->GetFunction()->NewInstance();
context->Global()->Set(v8_str("obj2"), instance);
v8::TryCatch try_catch;
@ -8783,10 +8784,10 @@ THREADED_TEST(InterceptorStoreIC) {
0, 0, 0, v8_str("data"));
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"for (var i = 0; i < 1000; i++) {"
" o.x = 42;"
"}"));
"}");
}
@ -9254,11 +9255,11 @@ THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = o.method(41);"
"}"));
"}");
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(100, interceptor_call_count);
}
@ -9281,14 +9282,14 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature) {
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = receiver.method(41);"
"}"));
"}");
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(100, interceptor_call_count);
}
@ -9311,7 +9312,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9323,7 +9324,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
" saved_result = result;"
" receiver = {method: function(x) { return x - 1 }};"
" }"
"}"));
"}");
CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
CHECK_GE(interceptor_call_count, 50);
@ -9347,7 +9348,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9359,7 +9360,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
" saved_result = result;"
" o.method = function(x) { return x - 1 };"
" }"
"}"));
"}");
CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
CHECK_GE(interceptor_call_count, 50);
@ -9384,7 +9385,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::TryCatch try_catch;
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9396,7 +9397,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
" saved_result = result;"
" receiver = 333;"
" }"
"}"));
"}");
CHECK(try_catch.HasCaught());
CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
try_catch.Exception()->ToString());
@ -9423,7 +9424,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::TryCatch try_catch;
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9435,7 +9436,7 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
" saved_result = result;"
" receiver = {method: receiver.method};"
" }"
"}"));
"}");
CHECK(try_catch.HasCaught());
CHECK_EQ(v8_str("TypeError: Illegal invocation"),
try_catch.Exception()->ToString());
@ -9453,15 +9454,16 @@ THREADED_TEST(CallICFastApi_TrivialSignature) {
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
USE(templ);
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = o.method(41);"
"}"));
"}");
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9476,18 +9478,19 @@ THREADED_TEST(CallICFastApi_SimpleSignature) {
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
"var result = 0;"
"for (var i = 0; i < 100; i++) {"
" result = receiver.method(41);"
"}"));
"}");
CHECK_EQ(42, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9502,11 +9505,12 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss1) {
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9518,7 +9522,7 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss1) {
" saved_result = result;"
" receiver = {method: function(x) { return x - 1 }};"
" }"
"}"));
"}");
CHECK_EQ(40, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
@ -9533,12 +9537,13 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
context->Global()->Set(v8_str("o"), fun->NewInstance());
v8::TryCatch try_catch;
v8::Handle<Value> value(CompileRun(
CompileRun(
"o.foo = 17;"
"var receiver = {};"
"receiver.__proto__ = o;"
@ -9550,7 +9555,7 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
" saved_result = result;"
" receiver = 333;"
" }"
"}"));
"}");
CHECK(try_catch.HasCaught());
CHECK_EQ(v8_str("TypeError: Object 333 has no method 'method'"),
try_catch.Exception()->ToString());
@ -9578,7 +9583,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"proto = new Object();"
"proto.y = function(x) { return x + 1; };"
"proto.z = function(x) { return x - 1; };"
@ -9588,7 +9593,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'z'; };"
" result += o[method](41);"
"}"));
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9604,7 +9609,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
context->Global()->Set(v8_str("proto1"), templ->NewInstance());
keyed_call_ic_function =
v8_compile("function f(x) { return x - 1; }; f")->Run();
v8::Handle<Value> value(CompileRun(
CompileRun(
"o = new Object();"
"proto2 = new Object();"
"o.y = function(x) { return x + 1; };"
@ -9616,7 +9621,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}"));
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9629,7 +9634,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
templ->SetNamedPropertyHandler(NoBlockGetterX);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"function inc(x) { return x + 1; };"
"inc(1);"
"function dec(x) { return x - 1; };"
@ -9642,7 +9647,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { method = 'y'; };"
" result += o[method](41);"
"}"));
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9655,7 +9660,7 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"function len(x) { return x.length; };"
"o.__proto__ = this;"
"var m = 'parseFloat';"
@ -9666,7 +9671,7 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
" saved_result = result;"
" };"
" result = o[m]('239');"
"}"));
"}");
CHECK_EQ(3, context->Global()->Get(v8_str("result"))->Int32Value());
CHECK_EQ(239, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
@ -9679,7 +9684,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
LocalContext context;
context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"var o = new Object();"
"o.__proto__ = proto;"
"o.method = function(x) { return x + 1; };"
@ -9688,7 +9693,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { o.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}"));
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -9701,7 +9706,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
LocalContext context;
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
v8::Handle<Value> value(CompileRun(
CompileRun(
"var proto = new Object();"
"o.__proto__ = proto;"
"proto.method = function(x) { return x + 1; };"
@ -9710,7 +9715,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
"for (var i = 0; i < 10; i++) {"
" if (i == 5) { proto.method = function(x) { return x - 1; }; };"
" result += o[m](41);"
"}"));
"}");
CHECK_EQ(42*5 + 40*5, context->Global()->Get(v8_str("result"))->Int32Value());
}
@ -10627,6 +10632,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
env->Enter();
v8::Handle<Value> value = NestedScope(env);
v8::Handle<String> str(value->ToString());
CHECK(!str.IsEmpty());
env->Exit();
env.Dispose();
}
@ -10635,6 +10641,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
THREADED_TEST(ExternalAllocatedMemory) {
v8::HandleScope outer;
v8::Persistent<Context> env(Context::New());
CHECK(!env.IsEmpty());
const int kSize = 1024*1024;
CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
@ -10973,6 +10980,7 @@ THREADED_TEST(AccessControlRepeatedContextCreation) {
i::FunctionTemplateInfo::cast(internal_template->constructor()));
CHECK(!constructor->access_check_info()->IsUndefined());
v8::Persistent<Context> context0(Context::New(NULL, global_template));
CHECK(!context0.IsEmpty());
CHECK(!constructor->access_check_info()->IsUndefined());
}
@ -13048,11 +13056,6 @@ static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
const int kLargeElementCount = kXSize * kYSize * 4;
ElementType* large_array_data =
static_cast<ElementType*>(malloc(kLargeElementCount * element_size));
i::Handle<ExternalArrayClass> large_array(
i::Handle<ExternalArrayClass>::cast(
FACTORY->NewExternalArray(kLargeElementCount,
array_type,
array_data)));
v8::Handle<v8::Object> large_obj = v8::Object::New();
// Set the elements to be the external array.
large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data,
@ -13453,8 +13456,8 @@ TEST(CaptureStackTrace) {
v8::Handle<v8::String> overview_src = v8::String::New(overview_source);
v8::Handle<Value> overview_result(
v8::Script::New(overview_src, origin)->Run());
ASSERT(!overview_result.IsEmpty());
ASSERT(overview_result->IsObject());
CHECK(!overview_result.IsEmpty());
CHECK(overview_result->IsObject());
// Test getting DETAILED information.
const char *detailed_source =
@ -13473,8 +13476,8 @@ TEST(CaptureStackTrace) {
v8::Handle<v8::Script> detailed_script(
v8::Script::New(detailed_src, &detailed_origin));
v8::Handle<Value> detailed_result(detailed_script->Run());
ASSERT(!detailed_result.IsEmpty());
ASSERT(detailed_result->IsObject());
CHECK(!detailed_result.IsEmpty());
CHECK(detailed_result->IsObject());
}
@ -13894,6 +13897,7 @@ static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) {
v8::HandleScope scope;
v8::TryCatch tc;
v8::Handle<v8::String> str(args[0]->ToString());
USE(str);
if (tc.HasCaught())
return tc.ReThrow();
return v8::Undefined();
@ -14038,6 +14042,17 @@ THREADED_TEST(ScriptOrigin) {
CHECK_EQ(0, script_origin_g.ResourceLineOffset()->Int32Value());
}
THREADED_TEST(FunctionGetInferredName) {
v8::HandleScope scope;
LocalContext env;
v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
v8::Handle<v8::String> script = v8::String::New(
"var foo = { bar : { baz : function() {}}}; var f = foo.bar.baz;");
v8::Script::Compile(script, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("f")));
CHECK_EQ("foo.bar.baz", *v8::String::AsciiValue(f->GetInferredName()));
}
THREADED_TEST(ScriptLineNumber) {
v8::HandleScope scope;
@ -15192,7 +15207,7 @@ TEST(RegExp) {
// RegExps are objects on which you can set properties.
re->Set(v8_str("property"), v8::Integer::New(32));
v8::Handle<v8::Value> value(CompileRun("re.property"));
ASSERT_EQ(32, value->Int32Value());
CHECK_EQ(32, value->Int32Value());
v8::TryCatch try_catch;
re = v8::RegExp::New(v8_str("foo["), v8::RegExp::kNone);

5
deps/v8/test/cctest/test-compiler.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -270,8 +270,7 @@ TEST(UncaughtThrow) {
CHECK(!fun.is_null());
bool has_pending_exception;
Handle<JSObject> global(Isolate::Current()->context()->global());
Handle<Object> result(
Execution::Call(fun, global, 0, NULL, &has_pending_exception));
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(has_pending_exception);
CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
ToObjectChecked()->Number());

30
deps/v8/test/cctest/test-debug.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -856,7 +856,7 @@ static void DebugEventRemoveBreakPoint(v8::DebugEvent event,
if (event == v8::Break) {
break_point_hit_count++;
v8::Handle<v8::Function> fun(v8::Handle<v8::Function>::Cast(data));
CHECK(data->IsFunction());
ClearBreakPoint(debug_event_remove_break_point);
}
}
@ -1447,8 +1447,7 @@ TEST(BreakPointSurviveGC) {
// Test IC store break point with garbage collection.
{
v8::Local<v8::Function> bar(
CompileFunction(&env, "function foo(){}", "foo"));
CompileFunction(&env, "function foo(){}", "foo");
foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
@ -1456,8 +1455,7 @@ TEST(BreakPointSurviveGC) {
// Test IC load break point with garbage collection.
{
v8::Local<v8::Function> bar(
CompileFunction(&env, "function foo(){}", "foo"));
CompileFunction(&env, "function foo(){}", "foo");
foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
SetBreakPoint(foo, 0);
}
@ -1465,8 +1463,7 @@ TEST(BreakPointSurviveGC) {
// Test IC call break point with garbage collection.
{
v8::Local<v8::Function> bar(
CompileFunction(&env, "function foo(){}", "foo"));
CompileFunction(&env, "function foo(){}", "foo");
foo = CompileFunction(&env,
"function bar(){};function foo(){bar();}",
"foo");
@ -1476,8 +1473,7 @@ TEST(BreakPointSurviveGC) {
// Test return break point with garbage collection.
{
v8::Local<v8::Function> bar(
CompileFunction(&env, "function foo(){}", "foo"));
CompileFunction(&env, "function foo(){}", "foo");
foo = CompileFunction(&env, "function foo(){}", "foo");
SetBreakPoint(foo, 0);
}
@ -1485,8 +1481,7 @@ TEST(BreakPointSurviveGC) {
// Test non IC break point with garbage collection.
{
v8::Local<v8::Function> bar(
CompileFunction(&env, "function foo(){}", "foo"));
CompileFunction(&env, "function foo(){}", "foo");
foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
SetBreakPoint(foo, 0);
}
@ -3751,8 +3746,7 @@ TEST(BreakOnException) {
v8::internal::Isolate::Current()->TraceException(false);
// Create functions for testing break on exception.
v8::Local<v8::Function> throws(
CompileFunction(&env, "function throws(){throw 1;}", "throws"));
CompileFunction(&env, "function throws(){throw 1;}", "throws");
v8::Local<v8::Function> caught =
CompileFunction(&env,
"function caught(){try {throws();} catch(e) {};}",
@ -5549,8 +5543,6 @@ TEST(DebuggerUnload) {
// Get the test functions again.
v8::Local<v8::Function> foo(v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("foo"))));
v8::Local<v8::Function> bar(v8::Local<v8::Function>::Cast(
env->Global()->Get(v8::String::New("foo"))));
foo->Call(env->Global(), 0, NULL);
CHECK_EQ(0, break_point_hit_count);
@ -6028,6 +6020,8 @@ TEST(DebugGetLoadedScripts) {
EmptyExternalStringResource source_ext_str;
v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
v8::Handle<v8::Script> evil_script(v8::Script::Compile(source));
// "use" evil_script to make the compiler happy.
(void) evil_script;
Handle<i::ExternalTwoByteString> i_source(
i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
// This situation can happen if source was an external string disposed
@ -6675,7 +6669,7 @@ static void BreakMessageHandler(const v8::Debug::Message& message) {
break_point_hit_count++;
v8::HandleScope scope;
v8::Handle<v8::String> json(message.GetJSON());
message.GetJSON();
SendContinueCommand();
} else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
@ -6686,7 +6680,7 @@ static void BreakMessageHandler(const v8::Debug::Message& message) {
isolate->stack_guard()->DebugBreak();
// Force serialization to trigger some internal JS execution.
v8::Handle<v8::String> json(message.GetJSON());
message.GetJSON();
// Restore previous state.
if (is_debug_break) {

4
deps/v8/test/cctest/test-deoptimization.cc

@ -1,4 +1,4 @@
// Copyright 2007-2010 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -237,7 +237,7 @@ TEST(DeoptimizeRecursive) {
v8::Local<v8::Function> fun =
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
Handle<v8::internal::JSFunction> f(v8::Utils::OpenHandle(*fun));
CHECK(!fun.IsEmpty());
}

15
deps/v8/test/cctest/test-mark-compact.cc

@ -526,12 +526,25 @@ static intptr_t MemoryInUse() {
TEST(BootUpMemoryUse) {
intptr_t initial_memory = MemoryInUse();
FLAG_crankshaft = false; // Avoid flakiness.
// Only Linux has the proc filesystem and only if it is mapped. If it's not
// there we just skip the test.
if (initial_memory >= 0) {
InitializeVM();
intptr_t booted_memory = MemoryInUse();
CHECK_LE(booted_memory - initial_memory, 16 * 1024 * 1024);
if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444.
} else {
CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6365.
} else {
CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424
}
}
}
}

4
deps/v8/test/cctest/test-parsing.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -230,7 +230,7 @@ TEST(Preparsing) {
CHECK_EQ(11, error_location.end_pos);
// Should not crash.
const char* message = pre_impl->BuildMessage();
i::Vector<const char*> args(pre_impl->BuildArgs());
pre_impl->BuildArgs();
CHECK_GT(strlen(message), 0);
}

20
deps/v8/test/mjsunit/compiler/regress-funarguments.js

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Test function.arguments.
function A() {}
@ -60,13 +62,16 @@ function hej(x) {
return o.g(x, "z");
}
function stress() {
for (var i=0; i<5000000; i++) o.g(i, "g");
for (var j=0; j<5000000; j++) hej(j);
function opt() {
for (var k=0; k<2; k++) {
for (var i=0; i<5; i++) o.g(i, "g");
for (var j=0; j<5; j++) hej(j);
}
%OptimizeFunctionOnNextCall(o.g);
%OptimizeFunctionOnNextCall(hej);
}
stress();
opt();
assertArrayEquals([0, "g"], o.g(0, "g"));
assertArrayEquals([1, "f"], o.g(1, "g"));
assertArrayEquals([0, "h"], hej(0));
@ -74,8 +79,7 @@ assertArrayEquals([1, "f"], hej(1));
o = new B();
stress();
opt();
assertArrayEquals([0, "f"], o.g(0, "g"));
assertArrayEquals([1, "g"], o.g(1, "g"));
assertArrayEquals([0, "f"], hej(0));

174
deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,6 +34,27 @@ var exception = false;
var testingConstructCall = false;
var input = [
{a: 1, b: 2},
{a: 3, b: 4},
{a: 5, b: 6},
{a: 7, b: 8},
{a: 9, b: 10}
];
var expected = [
{ locals: {a0: 1.01, b0: 2.02}, args: { names: ["i", "x0", "y0"], values: [0, 3.03, 4.04] } },
{ locals: {a1: 3.03, b1: 4.04}, args: { names: ["i", "x1", "y1"], values: [1, 5.05, 6.06] } },
{ locals: {a2: 5.05, b2: 6.06}, args: { names: ["i"], values: [2] } },
{ locals: {a3: 7.07, b3: 8.08}, args: { names: ["i", "x3", "y3", "z3"],
values: [3, 9.09, 10.10, undefined] }
},
{ locals: {a4: 9.09, b4: 10.10}, args: { names: ["i", "x4", "y4"], values: [4, 11.11, 12.12] } }
];
function arraySum(arr) {
return arr.reduce(function (a, b) { return a + b; }, 0);
}
function listener(event, exec_state, event_data, data) {
try {
@ -44,42 +65,63 @@ function listener(event, exec_state, event_data, data) {
for (var i = 0; i < exec_state.frameCount(); i++) {
var frame = exec_state.frame(i);
if (i < exec_state.frameCount() - 1) {
var expected_a = i * 2 + 1 + (i * 2 + 1) / 100;
var expected_b = i * 2 + 2 + (i * 2 + 2) / 100;
var expected_x = (i + 1) * 2 + 1 + ((i + 1) * 2 + 1) / 100;
var expected_y = (i + 1) * 2 + 2 + ((i + 1) * 2 + 2) / 100;
// All frames except the bottom one has normal variables a and b.
var a = ('a' === frame.localName(0)) ? 0 : 1;
var b = 1 - a;
assertEquals('a', frame.localName(a));
assertEquals('b', frame.localName(b));
assertEquals(expected_a, frame.localValue(a).value());
assertEquals(expected_b, frame.localValue(b).value());
// All frames except the bottom one has arguments variables x and y.
assertEquals('x', frame.argumentName(0));
assertEquals('y', frame.argumentName(1));
assertEquals(expected_x, frame.argumentValue(0).value());
assertEquals(expected_y, frame.argumentValue(1).value());
var expected_args = expected[i].args;
var expected_locals = expected[i].locals;
// All frames except the bottom one have expected locals.
var locals = {};
for (var j = 0; j < frame.localCount(); j++) {
locals[frame.localName(j)] = frame.localValue(j).value();
}
assertPropertiesEqual(expected_locals, locals);
// All frames except the bottom one have expected arguments.
for (var j = 0; j < expected_args.names.length; j++) {
assertEquals(expected_args.names[j], frame.argumentName(j));
assertEquals(expected_args.values[j], frame.argumentValue(j).value());
}
// All frames except the bottom one have two scopes.
assertEquals(2, frame.scopeCount());
assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
Object.keys(expected_locals).forEach(function (name) {
assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
}
// Evaluate in the inlined frame.
assertEquals(expected_a, frame.evaluate('a').value());
assertEquals(expected_x, frame.evaluate('x').value());
assertEquals(expected_x, frame.evaluate('arguments[0]').value());
assertEquals(expected_a + expected_b + expected_x + expected_y,
frame.evaluate('a + b + x + y').value());
assertEquals(expected_x + expected_y,
frame.evaluate('arguments[0] + arguments[1]').value());
Object.keys(expected_locals).forEach(function (name) {
assertEquals(expected_locals[name], frame.evaluate(name).value());
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
assertEquals(arg_value, frame.evaluate(arg_name).value());
assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
}
var expected_args_sum = arraySum(expected_args.values);
var expected_locals_sum =
arraySum(Object.keys(expected_locals).
map(function (k) { return expected_locals[k]; }));
assertEquals(expected_locals_sum + expected_args_sum,
frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
expected_args.names.join('+')).value());
var arguments_sum = expected_args.names.map(function(_, idx) {
return "arguments[" + idx + "]";
}).join('+');
assertEquals(expected_args_sum,
frame.evaluate(arguments_sum).value());
} else {
// The bottom frame only have the global scope.
assertEquals(1, frame.scopeCount());
@ -121,62 +163,64 @@ function listener(event, exec_state, event_data, data) {
listenerComplete = true;
}
} catch (e) {
exception = e
exception = e.toString() + e.stack;
};
};
f();f();f();
for (var i = 0; i < 4; i++) f(input.length - 1, 11.11, 12.12);
%OptimizeFunctionOnNextCall(f);
f();
f(input.length - 1, 11.11, 12.12);
// Add the debug event listener.
Debug.setListener(listener);
function h(x, y) {
var a = 1;
var b = 2;
a = a + a / 100;
b = b + b / 100;
function h(i, x0, y0) {
var a0 = input[i].a;
var b0 = input[i].b;
a0 = a0 + a0 / 100;
b0 = b0 + b0 / 100;
debugger; // Breakpoint.
};
function g3(x, y) {
var a = 3;
var b = 4;
a = a + a / 100;
b = b + b / 100;
h(a, b);
return a+b;
function g3(i, x1, y1) {
var a1 = input[i].a;
var b1 = input[i].b;
a1 = a1 + a1 / 100;
b1 = b1 + b1 / 100;
h(i - 1, a1, b1);
return a1+b1;
};
function g2(x, y) {
var a = 5;
var b = 6;
a = a + a / 100;
b = b + b / 100;
g3(a, b);
function g2(i) {
var a2 = input[i].a;
var b2 = input[i].b;
a2 = a2 + a2 / 100;
b2 = b2 + b2 / 100;
g3(i - 1, a2, b2);
};
function g1(x, y) {
var a = 7;
var b = 8;
a = a + a / 100;
b = b + b / 100;
g2(a, b);
function g1(i, x3, y3, z3) {
var a3 = input[i].a;
var b3 = input[i].b;
a3 = a3 + a3 / 100;
b3 = b3 + b3 / 100;
g2(i - 1, a3, b3);
};
function f(x, y) {
var a = 9;
var b = 10;
a = a + a / 100;
b = b + b / 100;
g1(a, b);
function f(i, x4, y4) {
var a4 = input[i].a;
var b4 = input[i].b;
a4 = a4 + a4 / 100;
b4 = b4 + b4 / 100;
g1(i - 1, a4, b4);
};
// Test calling f normally and as a constructor.
f(11.11, 12.12);
f(input.length - 1, 11.11, 12.12);
f(input.length - 1, 11.11, 12.12, "");
testingConstructCall = true;
new f(11.11, 12.12);
new f(input.length - 1, 11.11, 12.12);
new f(input.length - 1, 11.11, 12.12, "");
// Make sure that the debug event listener vas invoked.
assertFalse(exception, "exception in listener " + exception)

160
deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,6 +34,17 @@ var exception = false;
var testingConstructCall = false;
var expected = [
{ locals: {a0: 1, b0: 2}, args: { names: ["i", "x0", "y0"], values: [0, 3, 4] } },
{ locals: {a1: 3, b1: 4}, args: { names: ["i", "x1", "y1"], values: [1, 5, 6] } },
{ locals: {a2: 5, b2: 6}, args: { names: ["i"], values: [2] } },
{ locals: {a3: 7, b3: 8}, args: { names: ["i", "x3", "y3", "z3"], values: [3, 9, 10, undefined] } },
{ locals: {a4: 9, b4: 10}, args: { names: ["i", "x4", "y4"], values: [4, 11, 12] } }
];
function arraySum(arr) {
return arr.reduce(function (a, b) { return a + b; }, 0);
}
function listener(event, exec_state, event_data, data) {
try {
@ -44,42 +55,63 @@ function listener(event, exec_state, event_data, data) {
for (var i = 0; i < exec_state.frameCount(); i++) {
var frame = exec_state.frame(i);
if (i < exec_state.frameCount() - 1) {
var expected_a = i * 2 + 1;
var expected_b = i * 2 + 2;
var expected_x = (i + 1) * 2 + 1;
var expected_y = (i + 1) * 2 + 2;
// All frames except the bottom one has normal variables a and b.
var a = ('a' === frame.localName(0)) ? 0 : 1;
var b = 1 - a;
assertEquals('a', frame.localName(a));
assertEquals('b', frame.localName(b));
assertEquals(expected_a, frame.localValue(a).value());
assertEquals(expected_b, frame.localValue(b).value());
// All frames except the bottom one has arguments variables x and y.
assertEquals('x', frame.argumentName(0));
assertEquals('y', frame.argumentName(1));
assertEquals(expected_x, frame.argumentValue(0).value());
assertEquals(expected_y, frame.argumentValue(1).value());
var expected_args = expected[i].args;
var expected_locals = expected[i].locals;
// All frames except the bottom one have expected locals.
var locals = {};
for (var j = 0; j < frame.localCount(); j++) {
locals[frame.localName(j)] = frame.localValue(j).value();
}
assertPropertiesEqual(expected_locals, locals);
// All frames except the bottom one have expected arguments.
for (var j = 0; j < expected_args.names.length; j++) {
assertEquals(expected_args.names[j], frame.argumentName(j));
assertEquals(expected_args.values[j], frame.argumentValue(j).value());
}
// All frames except the bottom one have two scopes.
assertEquals(2, frame.scopeCount());
assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
assertEquals(expected_a, frame.scope(0).scopeObject().value()['a']);
assertEquals(expected_b, frame.scope(0).scopeObject().value()['b']);
assertEquals(expected_x, frame.scope(0).scopeObject().value()['x']);
assertEquals(expected_y, frame.scope(0).scopeObject().value()['y']);
Object.keys(expected_locals).forEach(function (name) {
assertEquals(expected_locals[name], frame.scope(0).scopeObject().value()[name]);
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
assertEquals(arg_value, frame.scope(0).scopeObject().value()[arg_name]);
}
// Evaluate in the inlined frame.
assertEquals(expected_a, frame.evaluate('a').value());
assertEquals(expected_x, frame.evaluate('x').value());
assertEquals(expected_x, frame.evaluate('arguments[0]').value());
assertEquals(expected_a + expected_b + expected_x + expected_y,
frame.evaluate('a + b + x + y').value());
assertEquals(expected_x + expected_y,
frame.evaluate('arguments[0] + arguments[1]').value());
Object.keys(expected_locals).forEach(function (name) {
assertEquals(expected_locals[name], frame.evaluate(name).value());
});
for (var j = 0; j < expected_args.names.length; j++) {
var arg_name = expected_args.names[j];
var arg_value = expected_args.values[j];
assertEquals(arg_value, frame.evaluate(arg_name).value());
assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
}
var expected_args_sum = arraySum(expected_args.values);
var expected_locals_sum =
arraySum(Object.keys(expected_locals).
map(function (k) { return expected_locals[k]; }));
assertEquals(expected_locals_sum + expected_args_sum,
frame.evaluate(Object.keys(expected_locals).join('+') + ' + ' +
expected_args.names.join('+')).value());
var arguments_sum = expected_args.names.map(function(_, idx) {
return "arguments[" + idx + "]";
}).join('+');
assertEquals(expected_args_sum,
frame.evaluate(arguments_sum).value());
} else {
// The bottom frame only have the global scope.
assertEquals(1, frame.scopeCount());
@ -121,51 +153,53 @@ function listener(event, exec_state, event_data, data) {
listenerComplete = true;
}
} catch (e) {
exception = e.stack;
exception = e.toString() + e.stack;
};
};
f();f();f();
for (var i = 0; i < 4; i++) f(expected.length - 1, 11, 12);
%OptimizeFunctionOnNextCall(f);
f();
f(expected.length - 1, 11, 12);
// Add the debug event listener.
Debug.setListener(listener);
function h(x, y) {
var a = 1;
var b = 2;
function h(i, x0, y0) {
var a0 = expected[i].locals.a0;
var b0 = expected[i].locals.b0;
debugger; // Breakpoint.
};
function g3(x, y) {
var a = 3;
var b = 4;
h(a, b);
};
function g2(x, y) {
var a = 5;
var b = 6;
g3(a, b);
};
function g1(x, y) {
var a = 7;
var b = 8;
g2(a, b);
};
function f(x, y) {
var a = 9;
var b = 10;
g1(a, b);
};
}
function g3(i, x1, y1) {
var a1 = expected[i].locals.a1;
var b1 = expected[i].locals.b1;
h(i - 1, a1, b1);
}
function g2(i) {
var a2 = expected[i].locals.a2;
var b2 = expected[i].locals.b2;
g3(i - 1, a2, b2);
}
function g1(i, x3, y3, z3) {
var a3 = expected[i].locals.a3;
var b3 = expected[i].locals.b3;
g2(i - 1, a3, b3);
}
function f(i, x4, y4) {
var a4 = expected[i].locals.a4;
var b4 = expected[i].locals.b4;
g1(i - 1, a4, b4);
}
// Test calling f normally and as a constructor.
f(11, 12);
f(expected.length - 1, 11, 12);
f(expected.length - 1, 11, 12, 0);
testingConstructCall = true;
new f(11, 12);
new f(expected.length - 1, 11, 12);
new f(expected.length - 1, 11, 12, 0);
// Make sure that the debug event listener vas invoked.
assertFalse(exception, "exception in listener " + exception)

103
deps/v8/test/mjsunit/regress/regress-1229.js

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,59 +29,116 @@
// Check that %NewObjectFromBound works correctly when called from optimized
// frame.
function foo(x, y, z) {
function foo1(x, y, z) {
assertEquals(1, x);
assertEquals(2, y);
assertEquals(3, z);
}
var foob = foo.bind({}, 1);
function foo2(x, y, z) {
assertEquals(1, x);
assertEquals(2, y);
assertEquals(undefined, z);
}
function f(y, z) {
return %NewObjectFromBound(foob);
function foo3(x, y, z) {
assertEquals(1, x);
assertEquals(2, y);
assertEquals(3, z);
}
var foob1 = foo1.bind({}, 1);
var foob2 = foo2.bind({}, 1);
var foob3 = foo3.bind({}, 1);
function f1(y, z) {
return %NewObjectFromBound(foob1);
}
function f2(y, z) {
return %NewObjectFromBound(foob2);
}
function f3(y, z) {
return %NewObjectFromBound(foob3);
}
// Check that %NewObjectFromBound looks at correct frame for inlined function.
function g(z, y) {
return f(y, z); /* f should be inlined into g, note rotated arguments */
function g1(z, y) {
return f1(y, z); /* f should be inlined into g, note rotated arguments */
}
function g2(z, y, x) {
return f2(y); /* f should be inlined into g, note argument count mismatch */
}
function g3(z, y, x) {
return f3(x, y, z); /* f should be inlined into g, note argument count mismatch */
}
// Check that %NewObjectFromBound looks at correct frame for inlined function.
function ff(x) { }
function h(z2, y2) {
function h1(z2, y2) {
var local_z = z2 >> 1;
ff(local_z);
var local_y = y2 >> 1;
ff(local_y);
return f(local_y, local_z); /* f should be inlined into h */
return f1(local_y, local_z); /* f should be inlined into h */
}
for (var i = 0; i < 5; i++) f(2, 3);
%OptimizeFunctionOnNextCall(f);
f(2, 3);
function h2(z2, y2, x2) {
var local_z = z2 >> 1;
ff(local_z);
var local_y = y2 >> 1;
ff(local_y);
return f2(local_y); /* f should be inlined into h */
}
function h3(z2, y2, x2) {
var local_z = z2 >> 1;
ff(local_z);
var local_y = y2 >> 1;
ff(local_y);
var local_x = x2 >> 1;
ff(local_x);
return f3(local_x, local_y, local_z); /* f should be inlined into h */
}
for (var i = 0; i < 5; i++) g(3, 2);
%OptimizeFunctionOnNextCall(g);
g(3, 2);
for (var i = 0; i < 5; i++) h(6, 4);
%OptimizeFunctionOnNextCall(h);
h(6, 4);
function invoke(f, args) {
for (var i = 0; i < 5; i++) f.apply(this, args);
%OptimizeFunctionOnNextCall(f);
f.apply(this, args);
}
invoke(f1, [2, 3]);
invoke(f2, [2]);
invoke(f3, [2, 3, 4]);
invoke(g1, [3, 2]);
invoke(g2, [3, 2, 4]);
invoke(g3, [4, 3, 2]);
invoke(h1, [6, 4]);
invoke(h2, [6, 4, 8]);
invoke(h3, [8, 6, 4]);
// Check that %_IsConstructCall returns correct value when inlined
var NON_CONSTRUCT_MARKER = {};
var CONSTRUCT_MARKER = {};
function baz() {
function baz(x) {
return (!%_IsConstructCall()) ? NON_CONSTRUCT_MARKER : CONSTRUCT_MARKER;
}
function bar(x, y, z) {
var non_construct = baz(0); /* baz should be inlined */
assertEquals(non_construct, NON_CONSTRUCT_MARKER);
var non_construct = baz(); /* baz should be inlined */
assertEquals(non_construct, NON_CONSTRUCT_MARKER);
var construct = new baz();
var non_construct = baz(0, 0); /* baz should be inlined */
assertEquals(non_construct, NON_CONSTRUCT_MARKER);
var construct = new baz(0);
assertEquals(construct, CONSTRUCT_MARKER);
}
for (var i = 0; i < 5; i++) new bar(1, 2, 3);
%OptimizeFunctionOnNextCall(bar);
bar(1, 2, 3);
invoke(bar, [1, 2, 3]);

5
deps/v8/tools/js2c.py

@ -128,12 +128,13 @@ def ExpandMacros(lines, macros):
end = pattern_match.end()
assert lines[end - 1] == '('
last_match = end
arg_index = 0
arg_index = [0] # Wrap state into array, to work around Python "scoping"
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
replacement = ExpandMacros(str.strip(), macros)
mapping[macro.args[arg_index]] = replacement
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
while end < len(lines) and height > 0:
# We don't count commas at higher nesting levels.
if lines[end] == ',' and height == 1:

4
deps/v8/tools/test.py

@ -1211,6 +1211,7 @@ def BuildOptions():
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--mips-arch-variant", help="mips architecture variant: mips32r1/mips32r2", default="mips32r2");
result.add_option("--shell", help="Path to V8 shell", default="d8")
result.add_option("--isolates", help="Whether to test isolates", default=False, action="store_true")
result.add_option("--store-unexpected-output",
@ -1272,6 +1273,9 @@ def ProcessOptions(options):
if options.snapshot:
options.scons_flags.append("snapshot=on")
global VARIANT_FLAGS
if options.mips_arch_variant:
options.scons_flags.append("mips_arch_variant=" + options.mips_arch_variant)
if options.stress_only:
VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
if options.nostress:

Loading…
Cancel
Save