Browse Source

Upgrade V8 to 3.8.6

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
60040a4f36
  1. 19
      deps/v8/ChangeLog
  2. 20
      deps/v8/include/v8.h
  3. 34
      deps/v8/src/api.cc
  4. 9
      deps/v8/src/arm/assembler-arm-inl.h
  5. 6
      deps/v8/src/arm/assembler-arm.cc
  6. 10
      deps/v8/src/arm/assembler-arm.h
  7. 13
      deps/v8/src/arm/builtins-arm.cc
  8. 44
      deps/v8/src/arm/code-stubs-arm.cc
  9. 2
      deps/v8/src/arm/cpu-arm.cc
  10. 7
      deps/v8/src/arm/deoptimizer-arm.cc
  11. 2
      deps/v8/src/arm/full-codegen-arm.cc
  12. 42
      deps/v8/src/arm/lithium-arm.cc
  13. 14
      deps/v8/src/arm/lithium-arm.h
  14. 78
      deps/v8/src/arm/lithium-codegen-arm.cc
  15. 10
      deps/v8/src/arm/lithium-codegen-arm.h
  16. 21
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  17. 119
      deps/v8/src/arm/macro-assembler-arm.cc
  18. 34
      deps/v8/src/arm/macro-assembler-arm.h
  19. 8
      deps/v8/src/arm/simulator-arm.cc
  20. 44
      deps/v8/src/arm/stub-cache-arm.cc
  21. 2
      deps/v8/src/assembler.h
  22. 44
      deps/v8/src/atomicops_internals_x86_macosx.h
  23. 152
      deps/v8/src/bootstrapper.cc
  24. 2
      deps/v8/src/bootstrapper.h
  25. 2
      deps/v8/src/builtins.cc
  26. 2
      deps/v8/src/builtins.h
  27. 6
      deps/v8/src/code-stubs.cc
  28. 2
      deps/v8/src/compiler.cc
  29. 4
      deps/v8/src/cpu-profiler.cc
  30. 6
      deps/v8/src/cpu-profiler.h
  31. 2
      deps/v8/src/cpu.h
  32. 2
      deps/v8/src/d8-debug.cc
  33. 189
      deps/v8/src/d8.cc
  34. 3
      deps/v8/src/d8.h
  35. 2
      deps/v8/src/debug-debugger.js
  36. 12
      deps/v8/src/debug.cc
  37. 6
      deps/v8/src/debug.h
  38. 19
      deps/v8/src/elements.cc
  39. 4
      deps/v8/src/execution.cc
  40. 2
      deps/v8/src/execution.h
  41. 48
      deps/v8/src/factory.cc
  42. 17
      deps/v8/src/factory.h
  43. 8
      deps/v8/src/flag-definitions.h
  44. 5
      deps/v8/src/frames.cc
  45. 4
      deps/v8/src/full-codegen.cc
  46. 44
      deps/v8/src/gdb-jit.cc
  47. 162
      deps/v8/src/handles.cc
  48. 65
      deps/v8/src/handles.h
  49. 2
      deps/v8/src/heap-inl.h
  50. 2
      deps/v8/src/heap-profiler.cc
  51. 2
      deps/v8/src/heap-profiler.h
  52. 103
      deps/v8/src/heap.cc
  53. 31
      deps/v8/src/heap.h
  54. 25
      deps/v8/src/hydrogen-instructions.cc
  55. 45
      deps/v8/src/hydrogen-instructions.h
  56. 99
      deps/v8/src/hydrogen.cc
  57. 2
      deps/v8/src/hydrogen.h
  58. 6
      deps/v8/src/ia32/assembler-ia32.cc
  59. 2
      deps/v8/src/ia32/builtins-ia32.cc
  60. 37
      deps/v8/src/ia32/code-stubs-ia32.cc
  61. 2
      deps/v8/src/ia32/cpu-ia32.cc
  62. 2
      deps/v8/src/ia32/deoptimizer-ia32.cc
  63. 2
      deps/v8/src/ia32/full-codegen-ia32.cc
  64. 94
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  65. 9
      deps/v8/src/ia32/lithium-codegen-ia32.h
  66. 55
      deps/v8/src/ia32/lithium-ia32.cc
  67. 19
      deps/v8/src/ia32/lithium-ia32.h
  68. 124
      deps/v8/src/ia32/macro-assembler-ia32.cc
  69. 20
      deps/v8/src/ia32/macro-assembler-ia32.h
  70. 42
      deps/v8/src/ia32/stub-cache-ia32.cc
  71. 6
      deps/v8/src/ic.cc
  72. 2
      deps/v8/src/incremental-marking.cc
  73. 5
      deps/v8/src/incremental-marking.h
  74. 4
      deps/v8/src/inspector.cc
  75. 8
      deps/v8/src/inspector.h
  76. 59
      deps/v8/src/isolate.cc
  77. 7
      deps/v8/src/isolate.h
  78. 5
      deps/v8/src/json-parser.h
  79. 2
      deps/v8/src/jsregexp.cc
  80. 4
      deps/v8/src/lithium-allocator.cc
  81. 14
      deps/v8/src/lithium.h
  82. 4
      deps/v8/src/liveedit.cc
  83. 4
      deps/v8/src/liveobjectlist-inl.h
  84. 48
      deps/v8/src/liveobjectlist.cc
  85. 10
      deps/v8/src/liveobjectlist.h
  86. 6
      deps/v8/src/log.cc
  87. 6
      deps/v8/src/log.h
  88. 1
      deps/v8/src/mark-compact.cc
  89. 2
      deps/v8/src/mips/assembler-mips-inl.h
  90. 6
      deps/v8/src/mips/assembler-mips.cc
  91. 13
      deps/v8/src/mips/builtins-mips.cc
  92. 37
      deps/v8/src/mips/code-stubs-mips.cc
  93. 4
      deps/v8/src/mips/constants-mips.h
  94. 2
      deps/v8/src/mips/cpu-mips.cc
  95. 7
      deps/v8/src/mips/deoptimizer-mips.cc
  96. 2
      deps/v8/src/mips/full-codegen-mips.cc
  97. 25
      deps/v8/src/mips/lithium-codegen-mips.cc
  98. 8
      deps/v8/src/mips/lithium-codegen-mips.h
  99. 4
      deps/v8/src/mips/lithium-gap-resolver-mips.cc
  100. 40
      deps/v8/src/mips/lithium-mips.cc

19
deps/v8/ChangeLog

@ -1,3 +1,22 @@
2012-01-16: Version 3.8.6
Add primitive WebGL array support to d8.
Improve heap size estimation (issue 1893).
Hash collision DOS workaround extended from string keys
to numeric keys.
Provide an API for iterating through all external strings referenced
from the JS heap.
Adjust position recorded for call expressions. http://crbug.com/109195
Fix GC crash related to instanceof. http://crbug.com/109448
Performance improvements and bug fixes.
2012-01-05: Version 3.8.5
Fix broken test that assumes that no GC can clear the regexp cache (GC

20
deps/v8/include/v8.h

@ -2848,6 +2848,17 @@ class V8EXPORT StartupDataDecompressor { // NOLINT
*/
typedef bool (*EntropySource)(unsigned char* buffer, size_t length);
/**
* Interface for iterating though all external resources in the heap.
*/
class V8EXPORT ExternalResourceVisitor { // NOLINT
public:
virtual ~ExternalResourceVisitor() {}
virtual void VisitExternalString(Handle<String> string) {}
};
/**
* Container class for static utility functions.
*/
@ -3203,6 +3214,13 @@ class V8EXPORT V8 {
*/
static void GetHeapStatistics(HeapStatistics* heap_statistics);
/**
* Iterates through all external resources referenced from current isolate
* heap. This method is not expected to be used except for debugging purposes
* and may be quite slow.
*/
static void VisitExternalResources(ExternalResourceVisitor* visitor);
/**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
@ -3816,7 +3834,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kJSObjectType = 0xa6;
static const int kJSObjectType = 0xa7;
static const int kFirstNonstringType = 0x80;
static const int kForeignType = 0x85;

34
deps/v8/src/api.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -2165,6 +2165,11 @@ bool Value::IsInt32() const {
if (obj->IsSmi()) return true;
if (obj->IsNumber()) {
double value = obj->Number();
static const i::DoubleRepresentation minus_zero(-0.0);
i::DoubleRepresentation rep(value);
if (rep.bits == minus_zero.bits) {
return false;
}
return i::FastI2D(i::FastD2I(value)) == value;
}
return false;
@ -2177,6 +2182,11 @@ bool Value::IsUint32() const {
if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
static const i::DoubleRepresentation minus_zero(-0.0);
i::DoubleRepresentation rep(value);
if (rep.bits == minus_zero.bits) {
return false;
}
return i::FastUI2D(i::FastD2UI(value)) == value;
}
return false;
@ -2739,7 +2749,7 @@ bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetElement(
i::Handle<i::Object> obj = i::JSObject::SetElement(
self,
index,
value_obj,
@ -2845,7 +2855,7 @@ Local<Value> v8::Object::GetPrototype() {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> result = i::GetPrototype(self);
i::Handle<i::Object> result(self->GetPrototype());
return Utils::ToLocal(result);
}
@ -2999,7 +3009,7 @@ bool v8::Object::Delete(v8::Handle<String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::DeleteProperty(self, key_obj)->IsTrue();
return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
}
@ -3020,7 +3030,7 @@ bool v8::Object::Delete(uint32_t index) {
ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return i::DeleteElement(self, index)->IsTrue();
return i::JSObject::DeleteElement(self, index)->IsTrue();
}
@ -3225,7 +3235,7 @@ int v8::Object::GetIdentityHash() {
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
return i::GetIdentityHash(self);
return i::JSObject::GetIdentityHash(self);
}
@ -3238,7 +3248,8 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
i::Handle<i::Object> result =
i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
return *result == *self;
}
@ -4038,6 +4049,13 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
}
void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
isolate->heap()->VisitExternalResources(visitor);
}
bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@ -5542,7 +5560,7 @@ void Debug::DisableAgent() {
void Debug::ProcessDebugMessages() {
i::Execution::ProcessDebugMesssages(true);
i::Execution::ProcessDebugMessages(true);
}
Local<Context> Debug::GetDebugContext() {

9
deps/v8/src/arm/assembler-arm-inl.h

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been modified
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
@ -46,6 +46,13 @@ namespace v8 {
namespace internal {
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
return reg.code();
}
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.

6
deps/v8/src/arm/assembler-arm.cc

@ -317,7 +317,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
own_buffer_ = false;
}
// Setup buffer pointers.
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -349,7 +349,7 @@ void Assembler::GetCode(CodeDesc* desc) {
CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@ -2446,7 +2446,7 @@ void Assembler::GrowBuffer() {
}
CHECK_GT(desc.buffer_size, 0); // no overflow
// Setup new buffer.
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();

10
deps/v8/src/arm/assembler-arm.h

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@ -176,14 +176,11 @@ struct DwVfpRegister {
static const int kNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
inline static int ToAllocationIndex(DwVfpRegister reg);
static DwVfpRegister FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return from_code(index + 1);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
@ -307,6 +304,7 @@ const DwVfpRegister d15 = { 15 };
static const DwVfpRegister& kFirstCalleeSavedDoubleReg = d8;
static const DwVfpRegister& kLastCalleeSavedDoubleReg = d15;
static const DwVfpRegister& kDoubleRegZero = d14;
static const DwVfpRegister& kScratchDoubleReg = d15;
// Coprocessor register

13
deps/v8/src/arm/builtins-arm.cc

@ -333,7 +333,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r5,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
// Set up return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
__ Jump(lr);
@ -376,7 +376,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
true,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r4);
// Setup return value, remove receiver and argument from stack and return.
// Set up return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Jump(lr);
@ -951,10 +951,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[4]: number of arguments (smi-tagged)
__ ldr(r3, MemOperand(sp, 4 * kPointerSize));
// Setup pointer to last argument.
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Setup number of arguments for function call below
// Set up number of arguments for function call below
__ mov(r0, Operand(r3, LSR, kSmiTagSize));
// Copy arguments and receiver to the expression stack.
@ -1082,10 +1082,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Set up the context from the function argument.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(r10, Operand(roots_array_start));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ push(r1);

44
deps/v8/src/arm/code-stubs-arm.cc

@ -156,13 +156,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
// Setup the object header.
// Set up the object header.
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
// Set up the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
@ -207,7 +207,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Load the serialized scope info from the stack.
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
// Setup the object header.
// Set up the object header.
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
// Set up the fixed slots.
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
@ -717,7 +717,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
// Get mantisssa[51:20].
// Get mantissa[51:20].
// Get the position of the first set bit.
__ CountLeadingZeros(dst1, int_scratch, scratch2);
@ -951,7 +951,7 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
// (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
// (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
@ -3842,7 +3842,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
// Set up argc and the builtin function in callee-saved registers.
__ mov(r4, Operand(r0));
__ mov(r5, Operand(r1));
@ -3919,7 +3919,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// Setup argv in r4.
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
if (CpuFeatures::IsSupported(VFP3)) {
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
@ -3942,7 +3942,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ ldr(r5, MemOperand(r5));
__ Push(r8, r7, r6, r5);
// Setup frame pointer for the frame to be pushed.
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
@ -4081,7 +4081,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register inline_site = r9;
const Register scratch = r2;
const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
@ -4132,7 +4132,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
__ str(map, MemOperand(scratch));
__ ldr(scratch, MemOperand(scratch));
__ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
@ -4401,7 +4402,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ str(r3, FieldMemOperand(r0, i));
}
// Setup the callee in-object property.
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
const int kCalleeOffset = JSObject::kHeaderSize +
@ -4414,7 +4415,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Heap::kArgumentsLengthIndex * kPointerSize;
__ str(r2, FieldMemOperand(r0, kLengthOffset));
// Setup the elements pointer in the allocated arguments object.
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
@ -4509,7 +4510,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Ret();
// Do the runtime call to allocate the arguments object.
// r2 = argument count (taggged)
// r2 = argument count (tagged)
__ bind(&runtime);
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@ -4582,7 +4583,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@ -4594,7 +4595,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Copy the fixed array slots.
Label loop;
// Setup r4 to point to the first array slot.
// Set up r4 to point to the first array slot.
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement r2 with kPointerSize on each iteration.
@ -5209,7 +5210,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// of the original receiver from the call site).
__ bind(&non_function);
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD);
@ -5730,7 +5731,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
// hash = character + (character << 10);
__ LoadRoot(hash, Heap::kStringHashSeedRootIndex);
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
// Untag smi seed and add the character.
__ add(hash, character, Operand(hash, LSR, kSmiTagSize));
// hash += hash << 10;
@ -5759,13 +5760,12 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// hash ^= hash >> 11;
__ eor(hash, hash, Operand(hash, LSR, 11));
// hash += hash << 15;
__ add(hash, hash, Operand(hash, LSL, 15), SetCC);
__ add(hash, hash, Operand(hash, LSL, 15));
uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
__ and_(hash, hash, Operand(kHashShiftCutOffMask));
__ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
// if (hash == 0) hash = 27;
__ mov(hash, Operand(27), LeaveCC, eq);
__ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
}

2
deps/v8/src/arm/cpu-arm.cc

@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
void CPU::Setup() {
void CPU::SetUp() {
CpuFeatures::Probe();
}

7
deps/v8/src/arm/deoptimizer-arm.cc

@ -319,7 +319,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
// Set up the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
@ -723,10 +723,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove sp
__ pop(ip); // remove lr
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate);
__ mov(r10, Operand(roots_array_start));
__ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(r7); // get continuation, leave pc on stack

2
deps/v8/src/arm/full-codegen-arm.cc

@ -1009,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Setup the four remaining stack slots.
// Set up the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ mov(r0, Operand(Smi::FromInt(0)));

42
deps/v8/src/arm/lithium-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1038,14 +1038,23 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
HValue* value = instr->value();
if (value->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
return AssignEnvironment(new LBranch(UseRegister(v)));
LBranch* result = new LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment.
Representation rep = value->representation();
HType type = value->type();
if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
return AssignEnvironment(result);
}
return result;
}
@ -1344,7 +1353,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
LMulI* mul = new LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@ -1413,6 +1427,15 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), r0);
LRandom* result = new LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, d7), instr);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@ -1529,7 +1552,7 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
return new LClassOfTestAndBranch(UseRegister(instr->value()),
TempRegister());
}
@ -1556,7 +1579,7 @@ LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineAsRegister(result));
return DefineAsRegister(result);
}
@ -1874,7 +1897,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}

14
deps/v8/src/arm/lithium-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -141,6 +141,7 @@ class LCodeGen;
V(Parameter) \
V(Power) \
V(PushArgument) \
V(Random) \
V(RegExpLiteral) \
V(Return) \
V(ShiftI) \
@ -1026,6 +1027,17 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
class LRandom: public LTemplateInstruction<1, 1, 0> {
public:
explicit LRandom(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)

78
deps/v8/src/arm/lithium-codegen-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1994,7 +1994,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input. Only input and temp2 may alias.
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@ -2002,7 +2002,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
ASSERT(!input.is(temp2));
ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@ -2141,7 +2143,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
__ mov(ip, Operand(factory()->the_hole_value()));
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
@ -2901,7 +2906,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ Call(ip);
// Setup deoptimization.
// Set up deoptimization.
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
@ -3190,6 +3195,30 @@ void LCodeGen::DoPower(LPower* instr) {
}
void LCodeGen::DoRandom(LRandom* instr) {
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(d7));
ASSERT(ToRegister(instr->InputAt(0)).is(r0));
__ PrepareCallCFunction(1, scratch0());
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand(0, RelocInfo::NONE));
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@ -3874,6 +3903,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
@ -3909,6 +3939,14 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Heap number to double register conversion.
__ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
if (deoptimize_on_minus_zero) {
__ vmov(ip, result_reg.low());
__ cmp(ip, Operand(0));
__ b(ne, &done);
__ vmov(ip, result_reg.high());
__ cmp(ip, Operand(HeapNumber::kSignMask));
DeoptimizeIf(eq, env);
}
__ jmp(&done);
// Smi to double register conversion
@ -4042,6 +4080,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
instr->environment());
}
@ -4155,14 +4194,26 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
void LCodeGen::DoCheckMapCommon(Register reg,
Register scratch,
Handle<Map> map,
CompareMapMode mode,
LEnvironment* env) {
Label success;
__ CompareMap(reg, scratch, map, &success, mode);
DeoptimizeIf(ne, env);
__ bind(&success);
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
Register scratch = scratch0();
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(instr->hydrogen()->map()));
DeoptimizeIf(ne, instr->environment());
Handle<Map> map = instr->hydrogen()->map();
DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
instr->environment());
}
@ -4231,9 +4282,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
__ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
__ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
DeoptimizeIf(ne, instr->environment());
DoCheckMapCommon(temp1, temp2,
Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
@ -4241,8 +4292,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
// Check the holder map.
__ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
__ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
DoCheckMapCommon(temp1, temp2,
Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
DeoptimizeIf(ne, instr->environment());
}

10
deps/v8/src/arm/lithium-codegen-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -119,6 +119,9 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@ -153,7 +156,7 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d15; }
DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -270,6 +273,7 @@ class LCodeGen BASE_EMBEDDED {
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@ -408,7 +412,7 @@ class LDeferredCode: public ZoneObject {
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }

21
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,7 +34,6 @@ namespace v8 {
namespace internal {
static const Register kSavedValueRegister = { 9 };
static const DoubleRegister kSavedDoubleValueRegister = { 0 };
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
@ -172,9 +171,9 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
@ -193,11 +192,9 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(saved_destination_),
kSavedDoubleValueRegister);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ vstr(kSavedDoubleValueRegister,
cgen_->ToMemOperand(saved_destination_));
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
@ -235,8 +232,8 @@ void LGapResolver::EmitMove(int index) {
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kSavedDoubleValueRegister.low(), source_operand);
__ vstr(kSavedDoubleValueRegister.low(), destination_operand);
__ vldr(kScratchDoubleReg.low(), source_operand);
__ vstr(kScratchDoubleReg.low(), destination_operand);
} else {
__ ldr(ip, source_operand);
__ str(ip, destination_operand);
@ -297,8 +294,8 @@ void LGapResolver::EmitMove(int index) {
__ ldr(kSavedValueRegister, source_high_operand);
__ str(kSavedValueRegister, destination_high_operand);
} else {
__ vldr(kSavedDoubleValueRegister, source_operand);
__ vstr(kSavedDoubleValueRegister, destination_operand);
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
}
}
} else {

119
deps/v8/src/arm/macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -396,14 +396,14 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
ldr(destination, MemOperand(roots, index << kPointerSizeLog2), cond);
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond) {
str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@ -496,13 +496,10 @@ void MacroAssembler::RecordWrite(Register object,
// registers are cp.
ASSERT(!address.is(cp) && !value.is(cp));
if (FLAG_debug_code) {
Label ok;
if (emit_debug_code()) {
ldr(ip, MemOperand(address));
cmp(ip, value);
b(eq, &ok);
stop("Wrong address or value passed to RecordWrite");
bind(&ok);
Check(eq, "Wrong address or value passed to RecordWrite");
}
Label done;
@ -551,7 +548,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (FLAG_debug_code) {
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
@ -820,12 +817,12 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Setup the frame structure on the stack.
// Set up the frame structure on the stack.
ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
mov(fp, Operand(sp)); // Set up new frame pointer.
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (emit_debug_code()) {
@ -1414,6 +1411,35 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
SmiUntag(scratch);
// Xor original key with a seed.
eor(t0, t0, Operand(scratch));
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
mvn(scratch, Operand(t0));
add(t0, scratch, Operand(t0, LSL, 15));
// hash = hash ^ (hash >> 12);
eor(t0, t0, Operand(t0, LSR, 12));
// hash = hash + (hash << 2);
add(t0, t0, Operand(t0, LSL, 2));
// hash = hash ^ (hash >> 4);
eor(t0, t0, Operand(t0, LSR, 4));
// hash = hash * 2057;
mov(scratch, Operand(t0, LSL, 11));
add(t0, t0, Operand(t0, LSL, 3));
add(t0, t0, scratch);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@ -1443,26 +1469,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// t2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
mvn(t1, Operand(t0));
add(t0, t1, Operand(t0, LSL, 15));
// hash = hash ^ (hash >> 12);
eor(t0, t0, Operand(t0, LSR, 12));
// hash = hash + (hash << 2);
add(t0, t0, Operand(t0, LSL, 2));
// hash = hash ^ (hash >> 4);
eor(t0, t0, Operand(t0, LSR, 4));
// hash = hash * 2057;
mov(t1, Operand(2057));
mul(t0, t0, t1);
// hash = hash ^ (hash >> 16);
eor(t0, t0, Operand(t0, LSR, 16));
GetNumberHash(t0, t1);
// Compute the capacity mask.
ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
sub(t1, t1, Operand(1));
@ -1473,17 +1483,17 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(t2, t0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(t2, t2, Operand(t1));
// Scale the index by multiplying by the element size.
ASSERT(NumberDictionary::kEntrySize == 3);
ASSERT(SeededNumberDictionary::kEntrySize == 3);
add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
// Check if the key is identical to the name.
add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
cmp(key, Operand(ip));
if (i != kProbes - 1) {
b(eq, &done);
@ -1496,14 +1506,14 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Check that the value is a normal property.
// t2: elements + (index * kPointerSize)
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ldr(t1, FieldMemOperand(t2, kDetailsOffset));
tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
b(ne, miss);
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
ldr(result, FieldMemOperand(t2, kValueOffset));
}
@ -1992,18 +2002,49 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
}
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
Label* early_success,
CompareMapMode mode) {
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
cmp(scratch, Operand(map));
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
b(eq, early_success);
cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
}
}
}
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type) {
SmiCheckType smi_check_type,
CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
mov(ip, Operand(map));
cmp(scratch, ip);
Label success;
CompareMap(obj, scratch, map, &success, mode);
b(ne, fail);
bind(&success);
}
@ -3460,7 +3501,7 @@ void MacroAssembler::EnsureNotWhite(
tst(mask_scratch, load_scratch);
b(ne, &done);
if (FLAG_debug_code) {
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
// LSL may overflow, making the check conservative.

34
deps/v8/src/arm/macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -52,7 +52,7 @@ inline Operand SmiUntagOperand(Register object) {
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
const Register kRootRegister = { 10 }; // Roots array pointer.
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@ -499,10 +499,16 @@ class MacroAssembler: public Assembler {
Register map,
Register scratch);
void InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
}
// ---------------------------------------------------------------------------
// JavaScript invokes
// Setup call kind marking in ecx. The method takes ecx as an
// Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@ -584,6 +590,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* miss);
void GetNumberHash(Register t0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,
@ -790,15 +797,26 @@ class MacroAssembler: public Assembler {
Register scratch4,
Label* fail);
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
// to be a heap object)
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
// compare sequences branches to early_success.
void CompareMap(Register obj,
Register scratch,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specificed map.
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type);
SmiCheckType smi_check_type,
CompareMapMode mode = REQUIRE_EXACT_MAP);
void CheckMap(Register obj,

8
deps/v8/src/arm/simulator-arm.cc

@ -741,7 +741,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
stack_ = reinterpret_cast<char*>(malloc(stack_size));
@ -750,7 +750,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
break_pc_ = NULL;
break_instr_ = 0;
// Setup architecture state.
// Set up architecture state.
// All registers are initialized to zero to start with.
for (int i = 0; i < num_registers; i++) {
registers_[i] = 0;
@ -3324,7 +3324,7 @@ void Simulator::Execute() {
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
va_list parameters;
va_start(parameters, argument_count);
// Setup arguments
// Set up arguments
// First four arguments passed in registers.
ASSERT(argument_count >= 4);
@ -3367,7 +3367,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
int32_t r10_val = get_register(r10);
int32_t r11_val = get_register(r11);
// Setup the callee-saved registers with a known value. To be able to check
// Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
int32_t callee_saved_value = icount_;
set_register(r4, callee_saved_value);

44
deps/v8/src/arm/stub-cache-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -376,13 +376,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// r0 : value
Label exit;
// Check that the receiver isn't a smi.
__ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the receiver hasn't changed.
__ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(Handle<Map>(object->map())));
__ b(ne, miss_label);
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -1019,10 +1015,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
Handle<Map> current_map(current->map());
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(current_map));
// Branch on the result of the map check.
__ b(ne, miss);
__ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@ -1053,9 +1048,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
__ b(ne, miss);
__ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@ -1150,7 +1144,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
__ EnterExitFrame(false, kApiStackSpace);
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object **args_) as the data.
// scratch2 (internal::Object** args_) as the data.
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
@ -2411,7 +2405,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
// Setup the context (function already in r1).
// Set up the context (function already in r1).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@ -2472,13 +2466,9 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -----------------------------------
Label miss;
// Check that the object isn't a smi.
__ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Handle<Map>(object->map())));
__ b(ne, &miss);
__ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -2520,13 +2510,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// -----------------------------------
Label miss;
// Check that the object isn't a smi.
__ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Handle<Map>(receiver->map())));
__ b(ne, &miss);
__ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {

2
deps/v8/src/assembler.h

@ -371,7 +371,7 @@ class RelocInfo BASE_EMBEDDED {
// routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually
// when accessed via the Iterator.
Object *reconstructed_obj_ptr_;
Object* reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address

44
deps/v8/src/atomicops_internals_x86_macosx.h

@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@ -49,7 +49,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
return prev_value;
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 old_value;
do {
@ -59,12 +59,12 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
return old_value;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
}
@ -73,7 +73,7 @@ inline void MemoryBarrier() {
OSMemoryBarrier();
}
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev_value;
@ -87,7 +87,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
return prev_value;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
return Acquire_CompareAndSwap(ptr, old_value, new_value);
@ -97,12 +97,12 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
@ -111,13 +111,13 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
@ -126,7 +126,7 @@ inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
// 64-bit implementation on 64-bit platform
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@ -140,7 +140,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
return prev_value;
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 old_value;
do {
@ -150,17 +150,17 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
return old_value;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
}
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev_value;
@ -174,7 +174,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
return prev_value;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
// The lib kern interface does not distinguish between
@ -186,12 +186,12 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
@ -200,13 +200,13 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
@ -264,7 +264,7 @@ inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
old_value, new_value);
}
inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
NoBarrier_Store(
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
@ -279,7 +279,7 @@ inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
}
inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
return NoBarrier_Load(
reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
}

152
deps/v8/src/bootstrapper.cc

@ -264,13 +264,13 @@ class Genesis BASE_EMBEDDED {
Handle<Map> CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function,
Handle<FixedArray> arguments_callbacks,
Handle<FixedArray> caller_callbacks);
Handle<AccessorPair> arguments_callbacks,
Handle<AccessorPair> caller_callbacks);
Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode propertyMode,
Handle<FixedArray> arguments,
Handle<FixedArray> caller);
Handle<AccessorPair> arguments,
Handle<AccessorPair> caller);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@ -378,7 +378,9 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
} else {
attributes = DONT_ENUM;
}
SetLocalPropertyNoThrow(target, symbol, function, attributes);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
target, symbol, function, attributes));
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
@ -538,8 +540,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode,
Handle<FixedArray> arguments,
Handle<FixedArray> caller) {
Handle<AccessorPair> arguments,
Handle<AccessorPair> caller) {
Handle<DescriptorArray> descriptors =
factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
? 4
@ -600,7 +602,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
throw_type_error_function->shared()->set_code(*code);
throw_type_error_function->shared()->DontAdaptArguments();
PreventExtensions(throw_type_error_function);
JSObject::PreventExtensions(throw_type_error_function);
}
return throw_type_error_function;
}
@ -609,8 +611,8 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
Handle<Map> Genesis::CreateStrictModeFunctionMap(
PrototypePropertyMode prototype_mode,
Handle<JSFunction> empty_function,
Handle<FixedArray> arguments_callbacks,
Handle<FixedArray> caller_callbacks) {
Handle<AccessorPair> arguments_callbacks,
Handle<AccessorPair> caller_callbacks) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
Handle<DescriptorArray> descriptors =
ComputeStrictFunctionInstanceDescriptor(prototype_mode,
@ -627,8 +629,8 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Create the callbacks arrays for ThrowTypeError functions.
// The get/set callacks are filled in after the maps are created below.
Factory* factory = empty->GetIsolate()->factory();
Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
Handle<AccessorPair> arguments(factory->NewAccessorPair());
Handle<AccessorPair> caller(factory->NewAccessorPair());
// Allocate map for the strict mode function instances.
Handle<Map> strict_mode_function_instance_map =
@ -663,11 +665,11 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
Handle<JSFunction> throw_function =
GetThrowTypeErrorFunction();
// Complete the callback fixed arrays.
arguments->set(0, *throw_function);
arguments->set(1, *throw_function);
caller->set(0, *throw_function);
caller->set(1, *throw_function);
// Complete the callbacks.
arguments->set_getter(*throw_function);
arguments->set_setter(*throw_function);
caller->set_getter(*throw_function);
caller->set_setter(*throw_function);
}
@ -753,11 +755,10 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetLocalPropertyNoThrow(
prototype,
factory()->constructor_symbol(),
isolate()->object_function(),
NONE);
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
prototype, factory()->constructor_symbol(),
isolate()->object_function(), NONE));
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
@ -834,7 +835,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
factory()->LookupAsciiSymbol("global"),
inner_global,
attributes);
// Setup the reference from the global object to the builtins object.
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
TransferNamedProperties(inner_global_from_snapshot, inner_global);
TransferIndexedProperties(inner_global_from_snapshot, inner_global);
@ -863,8 +864,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Heap* heap = isolate->heap();
Handle<String> object_name = Handle<String>(heap->Object_symbol());
SetLocalPropertyNoThrow(inner_global, object_name,
isolate->object_function(), DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
inner_global, object_name,
isolate->object_function(), DONT_ENUM));
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@ -1046,14 +1049,15 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{ // -- J S O N
Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = factory->NewFunction(
name,
factory->the_hole_value());
Handle<JSFunction> cons = factory->NewFunction(name,
factory->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, name, json_object, DONT_ENUM));
global_context()->set_json_object(*json_object);
}
@ -1083,12 +1087,14 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
SetLocalPropertyNoThrow(result, factory->length_symbol(),
factory->undefined_value(),
DONT_ENUM);
SetLocalPropertyNoThrow(result, factory->callee_symbol(),
factory->undefined_value(),
DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_symbol(),
factory->undefined_value(), DONT_ENUM));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->callee_symbol(),
factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
@ -1136,17 +1142,17 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Create the ThrowTypeError functions.
Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
Handle<AccessorPair> callee = factory->NewAccessorPair();
Handle<AccessorPair> caller = factory->NewAccessorPair();
Handle<JSFunction> throw_function =
GetThrowTypeErrorFunction();
// Install the ThrowTypeError functions.
callee->set(0, *throw_function);
callee->set(1, *throw_function);
caller->set(0, *throw_function);
caller->set(1, *throw_function);
callee->set_getter(*throw_function);
callee->set_setter(*throw_function);
caller->set_getter(*throw_function);
caller->set_setter(*throw_function);
// Create the descriptor array for the arguments object.
Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
@ -1183,9 +1189,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
SetLocalPropertyNoThrow(result, factory->length_symbol(),
factory->undefined_value(),
DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
result, factory->length_symbol(),
factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
@ -1353,7 +1360,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
if (cache != NULL) cache->Add(name, function_info);
}
// Setup the function context. Conceptually, we should clone the
// Set up the function context. Conceptually, we should clone the
// function before overwriting the context but since we're in a
// single-threaded environment it is not strictly necessary.
ASSERT(top_context->IsGlobalContext());
@ -1440,7 +1447,7 @@ bool Genesis::InstallNatives() {
builtins->set_global_context(*global_context());
builtins->set_global_receiver(*builtins);
// Setup the 'global' properties of the builtins object. The
// Set up the 'global' properties of the builtins object. The
// 'global' property that refers to the global object is the only
// way to get from code running in the builtins context to the
// global object.
@ -1448,9 +1455,11 @@ bool Genesis::InstallNatives() {
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
Handle<Object> global_obj(global_context()->global());
SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
builtins, global_symbol, global_obj, attributes));
// Setup the reference from the global object to the builtins object.
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
// Create a bridge function that has context in the global context.
@ -1674,7 +1683,7 @@ bool Genesis::InstallNatives() {
InstallNativeFunctions();
// Store the map for the string prototype after the natives has been compiled
// and the String function has been setup.
// and the String function has been set up.
Handle<JSFunction> string_function(global_context()->string_function());
ASSERT(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
@ -1911,25 +1920,28 @@ bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
Factory* factory = global_context->GetIsolate()->factory();
Isolate* isolate = global_context->GetIsolate();
Factory* factory = isolate->factory();
HandleScope scope;
Handle<JSGlobalObject> js_global(
JSGlobalObject::cast(global_context->global()));
Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
factory->LookupAsciiSymbol(FLAG_expose_natives_as);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, natives,
Handle<JSObject>(global->builtins()),
DONT_ENUM));
}
Handle<Object> Error = GetProperty(js_global, "Error");
Handle<Object> Error = GetProperty(global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject>::cast(Error), name,
stack_trace_limit, NONE));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -1948,7 +1960,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
Handle<String> debug_string =
factory->LookupAsciiSymbol(FLAG_expose_debug_as);
Handle<Object> global_proxy(debug->debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, debug_string, global_proxy, DONT_ENUM));
}
#endif
}
@ -2164,7 +2178,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
SetLocalPropertyNoThrow(to, key, value, details.attributes());
CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
break;
}
case CONSTANT_FUNCTION: {
@ -2172,7 +2188,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
SetLocalPropertyNoThrow(to, key, fun, details.attributes());
CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, fun, details.attributes()));
break;
}
case CALLBACKS: {
@ -2187,7 +2205,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<Object> callbacks(descs->GetCallbacksObject(i));
PropertyDetails d =
PropertyDetails(details.attributes(), CALLBACKS, details.index());
SetNormalizedProperty(to, key, callbacks, d);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
case MAP_TRANSITION:
@ -2224,7 +2242,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
SetLocalPropertyNoThrow(to, key, value, details.attributes());
CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
}
}
}

2
deps/v8/src/bootstrapper.h

@ -88,7 +88,7 @@ class SourceCodeCache BASE_EMBEDDED {
// context.
class Bootstrapper {
public:
// Requires: Heap::Setup has been called.
// Requires: Heap::SetUp has been called.
void Initialize(bool create_heap_objects);
void TearDown();

2
deps/v8/src/builtins.cc

@ -1719,7 +1719,7 @@ void Builtins::InitBuiltinFunctionTable() {
#undef DEF_FUNCTION_PTR_A
}
void Builtins::Setup(bool create_heap_objects) {
void Builtins::SetUp(bool create_heap_objects) {
ASSERT(!initialized_);
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();

2
deps/v8/src/builtins.h

@ -265,7 +265,7 @@ class Builtins {
// Generate all builtin code objects. Should be called once during
// isolate initialization.
void Setup(bool create_heap_objects);
void SetUp(bool create_heap_objects);
void TearDown();
// Garbage collection support.

6
deps/v8/src/code-stubs.cc

@ -40,7 +40,7 @@ namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
Heap* heap = Isolate::Current()->heap();
int index = heap->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
if (index != UnseededNumberDictionary::kNotFound) {
*code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
@ -132,9 +132,9 @@ Handle<Code> CodeStub::GetCode() {
AddToSpecialCache(new_object);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Handle<UnseededNumberDictionary> dict =
factory->DictionaryAtNumberPut(
Handle<NumberDictionary>(heap->code_stubs()),
Handle<UnseededNumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
heap->public_set_code_stubs(*dict);

2
deps/v8/src/compiler.cc

@ -628,7 +628,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// scope info. Please note, that the order of the shared function
// info initialization is important since set_scope_info might
// trigger a GC, causing the ASSERT below to be invalid if the code
// was flushed. By settting the code object last we avoid this.
// was flushed. By setting the code object last we avoid this.
Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
shared->set_scope_info(*scope_info);
shared->set_code(*code);

4
deps/v8/src/cpu-profiler.cc

@ -493,7 +493,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetup()) {
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
@ -562,7 +562,7 @@ void CpuProfiler::StopProcessor() {
}
void CpuProfiler::Setup() {
void CpuProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());

6
deps/v8/src/cpu-profiler.h

@ -204,7 +204,7 @@ namespace internal {
// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
static void Setup();
static void SetUp();
static void TearDown();
static void StartProfiling(const char* title);
@ -230,11 +230,11 @@ class CpuProfiler {
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo *shared,
SharedFunctionInfo* shared,
String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo *shared,
SharedFunctionInfo* shared,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);

2
deps/v8/src/cpu.h

@ -53,7 +53,7 @@ namespace internal {
class CPU : public AllStatic {
public:
// Initializes the cpu architecture support. Called once at VM startup.
static void Setup();
static void SetUp();
static bool SupportsCrankshaft();

2
deps/v8/src/d8-debug.cc

@ -169,7 +169,7 @@ void RemoteDebugger::Run() {
bool ok;
// Make sure that socket support is initialized.
ok = i::Socket::Setup();
ok = i::Socket::SetUp();
if (!ok) {
printf("Unable to initialize socket support %d\n", i::Socket::LastError());
return;

189
deps/v8/src/d8.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -281,63 +281,161 @@ Handle<Value> Shell::Load(const Arguments& args) {
return Undefined();
}
static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (value_in->IsUint32()) {
return value_in->Uint32Value();
}
Local<Value> number = value_in->ToNumber();
if (try_catch->HasCaught()) return 0;
ASSERT(number->IsNumber());
Local<Int32> int32 = number->ToInt32();
if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
int32_t raw_value = int32->Int32Value();
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
ThrowException(String::New("Array length must not be negative."));
return 0;
}
static const int kMaxLength = 0x3fffffff;
#ifndef V8_SHARED
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) {
ThrowException(
String::New("Array length exceeds maximum length."));
}
return static_cast<size_t>(raw_value);
}
const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
size_t element_size) {
TryCatch try_catch;
bool is_array_buffer_construct = element_size == 0;
if (is_array_buffer_construct) {
type = v8::kExternalByteArray;
element_size = 1;
}
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
if (args.Length() == 0) {
return ThrowException(
String::New("Array constructor needs one parameter."));
String::New("Array constructor must have at least one "
"parameter."));
}
static const int kMaxLength = 0x3fffffff;
#ifndef V8_SHARED
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
size_t length = 0;
TryCatch try_catch;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
} else {
Local<Number> number = args[0]->ToNumber();
if (number.IsEmpty()) {
ASSERT(try_catch.HasCaught());
return try_catch.Exception();
bool first_arg_is_array_buffer =
args[0]->IsObject() &&
args[0]->ToObject()->Get(
String::New(kArrayBufferMarkerPropName))->IsTrue();
// Currently, only the following constructors are supported:
// TypedArray(unsigned long length)
// TypedArray(ArrayBuffer buffer,
// optional unsigned long byteOffset,
// optional unsigned long length)
if (args.Length() > 3) {
return ThrowException(
String::New("Array constructor from ArrayBuffer must "
"have 1-3 parameters."));
}
Local<Value> length_value = (args.Length() < 3)
? (first_arg_is_array_buffer
? args[0]->ToObject()->Get(String::New("length"))
: args[0])
: args[2];
size_t length = convertToUint(length_value, &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
void* data = NULL;
size_t offset = 0;
Handle<Object> array = Object::New();
if (first_arg_is_array_buffer) {
Handle<Object> derived_from = args[0]->ToObject();
data = derived_from->GetIndexedPropertiesExternalArrayData();
size_t array_buffer_length = convertToUint(
derived_from->Get(String::New("length")),
&try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
if (data == NULL && array_buffer_length != 0) {
return ThrowException(
String::New("ArrayBuffer doesn't have data"));
}
ASSERT(number->IsNumber());
Local<Int32> int32 = number->ToInt32();
if (int32.IsEmpty()) {
if (try_catch.HasCaught()) {
return try_catch.Exception();
if (args.Length() > 1) {
offset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.Exception();
// The given byteOffset must be a multiple of the element size of the
// specific type, otherwise an exception is raised.
if (offset % element_size != 0) {
return ThrowException(
String::New("offset must be multiple of element_size"));
}
}
int32_t raw_length = int32->Int32Value();
if (try_catch.HasCaught()) {
return try_catch.Exception();
if (offset > array_buffer_length) {
return ThrowException(
String::New("byteOffset must be less than ArrayBuffer length."));
}
if (raw_length < 0) {
return ThrowException(String::New("Array length must not be negative."));
if (args.Length() == 2) {
// If length is not explicitly specified, the length of the ArrayBuffer
// minus the byteOffset must be a multiple of the element size of the
// specific type, or an exception is raised.
length = array_buffer_length - offset;
}
if (args.Length() != 3) {
if (length % element_size != 0) {
return ThrowException(
String::New("ArrayBuffer length minus the byteOffset must be a "
"multiple of the element size"));
}
length /= element_size;
}
if (raw_length > static_cast<int32_t>(kMaxLength)) {
// If a given byteOffset and length references an area beyond the end of
// the ArrayBuffer an exception is raised.
if (offset + (length * element_size) > array_buffer_length) {
return ThrowException(
String::New("Array length exceeds maximum length."));
String::New("length references an area beyond the end of the "
"ArrayBuffer"));
}
length = static_cast<size_t>(raw_length);
}
if (length > static_cast<size_t>(kMaxLength)) {
return ThrowException(String::New("Array length exceeds maximum length."));
// Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
}
void* data = calloc(length, element_size);
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
if (is_array_buffer_construct) {
array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
}
Handle<Object> array = Object::New();
Persistent<Object> persistent_array = Persistent<Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
array->SetIndexedPropertiesToExternalArrayData(data, type,
static_cast<int>(length));
if (data == NULL && length != 0) {
data = calloc(length, element_size);
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
}
}
array->SetIndexedPropertiesToExternalArrayData(
reinterpret_cast<uint8_t*>(data) + offset, type,
static_cast<int>(length));
array->Set(String::New("length"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"),
@ -347,11 +445,22 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
free(data);
HandleScope scope;
Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
Handle<Object> converted_object = object->ToObject();
Local<Value> prop_value = converted_object->Get(prop_name);
if (data != NULL && !prop_value->IsObject()) {
free(data);
}
object.Dispose();
}
Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, 0);
}
Handle<Value> Shell::Int8Array(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
@ -693,6 +802,8 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
FunctionTemplate::New(DisableProfiler));
// Bind the handlers for external arrays.
global_template->Set(String::New("ArrayBuffer"),
FunctionTemplate::New(ArrayBuffer));
global_template->Set(String::New("Int8Array"),
FunctionTemplate::New(Int8Array));
global_template->Set(String::New("Uint8Array"),

3
deps/v8/src/d8.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -289,6 +289,7 @@ class Shell : public i::AllStatic {
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadLine(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
static Handle<Value> Int8Array(const Arguments& args);
static Handle<Value> Uint8Array(const Arguments& args);
static Handle<Value> Int16Array(const Arguments& args);

2
deps/v8/src/debug-debugger.js

@ -1547,7 +1547,7 @@ DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
}
}
// Setup the VM for stepping.
// Set up the VM for stepping.
this.exec_state_.prepareStep(action, count);
}

12
deps/v8/src/debug.cc

@ -682,7 +682,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
}
void Debug::Setup(bool create_heap_objects) {
void Debug::SetUp(bool create_heap_objects) {
ThreadInit();
if (create_heap_objects) {
// Get code to handle debug break on return.
@ -827,8 +827,8 @@ bool Debug::Load() {
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
@ -1213,7 +1213,7 @@ void Debug::ClearAllBreakPoints() {
void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
PrepareForBreakPoints();
// Make sure the function has setup the debug info.
// Make sure the function has set up the debug info.
if (!EnsureDebugInfo(shared)) {
// Return if we failed to retrieve the debug info.
return;
@ -2855,7 +2855,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
command.Dispose();
// Return from debug event processing if either the VM is put into the
// runnning state (through a continue command) or auto continue is active
// running state (through a continue command) or auto continue is active
// and there are no more commands queued.
if (running && !HasCommands()) {
return;
@ -3065,7 +3065,7 @@ bool Debugger::StartAgent(const char* name, int port,
v8::Debug::DebugBreak();
}
if (Socket::Setup()) {
if (Socket::SetUp()) {
if (agent_ == NULL) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();

6
deps/v8/src/debug.h

@ -178,7 +178,9 @@ class ScriptCache : private HashMap {
private:
// Calculate the hash value from the key (script id).
static uint32_t Hash(int key) { return ComputeIntegerHash(key); }
static uint32_t Hash(int key) {
return ComputeIntegerHash(key, v8::internal::kZeroHashSeed);
}
// Scripts match if their keys (script id) match.
static bool ScriptMatch(void* key1, void* key2) { return key1 == key2; }
@ -222,7 +224,7 @@ class DebugInfoListNode {
// DebugInfo.
class Debug {
public:
void Setup(bool create_heap_objects);
void SetUp(bool create_heap_objects);
bool Load();
void Unload();
bool IsLoaded() { return !debug_context_.is_null(); }

19
deps/v8/src/elements.cc

@ -549,11 +549,11 @@ class PixelElementsAccessor
class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
NumberDictionary> {
SeededNumberDictionary> {
public:
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
static MaybeObject* SetLengthWithoutNormalize(NumberDictionary* dict,
static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
@ -619,9 +619,10 @@ class DictionaryElementsAccessor
if (is_arguments) {
backing_store = FixedArray::cast(backing_store->get(1));
}
NumberDictionary* dictionary = NumberDictionary::cast(backing_store);
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(backing_store);
int entry = dictionary->FindEntry(key);
if (entry != NumberDictionary::kNotFound) {
if (entry != SeededNumberDictionary::kNotFound) {
Object* result = dictionary->DeleteProperty(entry, mode);
if (result == heap->true_value()) {
MaybeObject* maybe_elements = dictionary->Shrink(key);
@ -654,7 +655,7 @@ class DictionaryElementsAccessor
protected:
friend class ElementsAccessorBase<DictionaryElementsAccessor,
NumberDictionary>;
SeededNumberDictionary>;
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
@ -662,12 +663,12 @@ class DictionaryElementsAccessor
return DeleteCommon(obj, key, mode);
}
static MaybeObject* Get(NumberDictionary* backing_store,
static MaybeObject* Get(SeededNumberDictionary* backing_store,
uint32_t key,
JSObject* obj,
Object* receiver) {
int entry = backing_store->FindEntry(key);
if (entry != NumberDictionary::kNotFound) {
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
PropertyDetails details = backing_store->DetailsAt(entry);
if (details.type() == CALLBACKS) {
@ -682,7 +683,7 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
static uint32_t GetKeyForIndex(NumberDictionary* dict,
static uint32_t GetKeyForIndex(SeededNumberDictionary* dict,
uint32_t index) {
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
@ -895,7 +896,7 @@ MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, BackingStoreClass>::
if (length->IsNumber()) {
uint32_t value;
if (length->ToArrayIndex(&value)) {
NumberDictionary* dictionary;
SeededNumberDictionary* dictionary;
MaybeObject* maybe_object = array->NormalizeElements();
if (!maybe_object->To(&dictionary)) return maybe_object;
Object* new_length;

4
deps/v8/src/execution.cc

@ -845,13 +845,13 @@ Object* Execution::DebugBreakHelper() {
// Clear the debug break request flag.
isolate->stack_guard()->Continue(DEBUGBREAK);
ProcessDebugMesssages(debug_command_only);
ProcessDebugMessages(debug_command_only);
// Return to continue execution.
return isolate->heap()->undefined_value();
}
void Execution::ProcessDebugMesssages(bool debug_command_only) {
void Execution::ProcessDebugMessages(bool debug_command_only) {
Isolate* isolate = Isolate::Current();
// Clear the debug command request flag.
isolate->stack_guard()->Continue(DEBUGCOMMAND);

2
deps/v8/src/execution.h

@ -136,7 +136,7 @@ class Execution : public AllStatic {
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
static void ProcessDebugMesssages(bool debug_command_only);
static void ProcessDebugMessages(bool debug_command_only);
#endif
// If the stack guard is triggered, but it is not an actual

48
deps/v8/src/factory.cc

@ -77,11 +77,21 @@ Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) {
}
Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) {
Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary(
int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
NumberDictionary::Allocate(at_least_space_for),
NumberDictionary);
SeededNumberDictionary::Allocate(at_least_space_for),
SeededNumberDictionary);
}
Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary(
int at_least_space_for) {
ASSERT(0 <= at_least_space_for);
CALL_HEAP_FUNCTION(isolate(),
UnseededNumberDictionary::Allocate(at_least_space_for),
UnseededNumberDictionary);
}
@ -131,6 +141,13 @@ Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
}
Handle<AccessorPair> Factory::NewAccessorPair() {
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateAccessorPair(),
AccessorPair);
}
// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(isolate(),
@ -698,7 +715,7 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
// Allocate the function
Handle<JSFunction> function = NewFunction(name, the_hole_value());
// Setup the code pointer in both the shared function info and in
// Set up the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code);
@ -729,7 +746,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// Allocate the function.
Handle<JSFunction> function = NewFunction(name, prototype);
// Setup the code pointer in both the shared function info and in
// Set up the code pointer in both the shared function info and in
// the function itself.
function->shared()->set_code(*code);
function->set_code(*code);
@ -751,7 +768,10 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// property that refers to the function.
SetPrototypeProperty(function, prototype);
// Currently safe because it is only invoked from Genesis.
SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
prototype, constructor_symbol(),
function, DONT_ENUM));
return function;
}
@ -1061,13 +1081,23 @@ Handle<String> Factory::Uint32ToString(uint32_t value) {
}
Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
Handle<NumberDictionary> dictionary,
Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
Handle<SeededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(isolate(),
dictionary->AtNumberPut(key, *value),
SeededNumberDictionary);
}
Handle<UnseededNumberDictionary> Factory::DictionaryAtNumberPut(
Handle<UnseededNumberDictionary> dictionary,
uint32_t key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(isolate(),
dictionary->AtNumberPut(key, *value),
NumberDictionary);
UnseededNumberDictionary);
}

17
deps/v8/src/factory.h

@ -54,7 +54,11 @@ class Factory {
int size,
PretenureFlag pretenure = NOT_TENURED);
Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for);
Handle<SeededNumberDictionary> NewSeededNumberDictionary(
int at_least_space_for);
Handle<UnseededNumberDictionary> NewUnseededNumberDictionary(
int at_least_space_for);
Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
@ -69,6 +73,8 @@ class Factory {
Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
int deopt_entry_count,
PretenureFlag pretenure);
// Allocates a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str);
@ -430,8 +436,13 @@ class Factory {
Handle<Object> stack_trace,
Handle<Object> stack_frames);
Handle<NumberDictionary> DictionaryAtNumberPut(
Handle<NumberDictionary>,
Handle<SeededNumberDictionary> DictionaryAtNumberPut(
Handle<SeededNumberDictionary>,
uint32_t key,
Handle<Object> value);
Handle<UnseededNumberDictionary> DictionaryAtNumberPut(
Handle<UnseededNumberDictionary>,
uint32_t key,
Handle<Object> value);

8
deps/v8/src/flag-definitions.h

@ -349,13 +349,13 @@ DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
"preallocate some memory to build stack traces.")
DEFINE_bool(randomize_string_hashes,
DEFINE_bool(randomize_hashes,
true,
"randomize string hashes to avoid predictable hash collisions "
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
DEFINE_int(string_hash_seed,
DEFINE_int(hash_seed,
0,
"Fixed seed to use to string hashing (0 means random)"
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
// v8.cc

5
deps/v8/src/frames.cc

@ -485,7 +485,7 @@ Code* ExitFrame::unchecked_code() const {
void ExitFrame::ComputeCallerState(State* state) const {
// Setup the caller state.
// Set up the caller state.
state->sp = caller_sp();
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address
@ -1303,7 +1303,8 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
isolate_->counters()->pc_to_code()->Increment();
ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
v8::internal::kZeroHashSeed);
uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
InnerPointerToCodeCacheEntry* entry = cache(index);
if (entry->inner_pointer == inner_pointer) {

4
deps/v8/src/full-codegen.cc

@ -1178,7 +1178,7 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
ExitFinallyBlock(); // Return to the calling code.
// Setup try handler.
// Set up try handler.
__ bind(&try_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER, stmt->index());
{ TryFinally try_body(this, &finally_entry);
@ -1284,7 +1284,7 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression *sub_expr;
Expression* sub_expr;
Handle<String> check;
if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
EmitLiteralCompareTypeof(expr, sub_expr, check);

44
deps/v8/src/gdb-jit.cc

@ -1556,23 +1556,23 @@ class DebugLineSection : public DebugSection {
class UnwindInfoSection : public DebugSection {
public:
explicit UnwindInfoSection(CodeDescription *desc);
virtual bool WriteBody(Writer *w);
explicit UnwindInfoSection(CodeDescription* desc);
virtual bool WriteBody(Writer* w);
int WriteCIE(Writer *w);
void WriteFDE(Writer *w, int);
int WriteCIE(Writer* w);
void WriteFDE(Writer* w, int);
void WriteFDEStateOnEntry(Writer *w);
void WriteFDEStateAfterRBPPush(Writer *w);
void WriteFDEStateAfterRBPSet(Writer *w);
void WriteFDEStateAfterRBPPop(Writer *w);
void WriteFDEStateOnEntry(Writer* w);
void WriteFDEStateAfterRBPPush(Writer* w);
void WriteFDEStateAfterRBPSet(Writer* w);
void WriteFDEStateAfterRBPPop(Writer* w);
void WriteLength(Writer *w,
void WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position);
private:
CodeDescription *desc_;
CodeDescription* desc_;
// DWARF3 Specification, Table 7.23
enum CFIInstructions {
@ -1623,7 +1623,7 @@ class UnwindInfoSection : public DebugSection {
};
void UnwindInfoSection::WriteLength(Writer *w,
void UnwindInfoSection::WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position) {
uint32_t align = (w->position() - initial_position) % kPointerSize;
@ -1639,7 +1639,7 @@ void UnwindInfoSection::WriteLength(Writer *w,
}
UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
#else
@ -1648,7 +1648,7 @@ UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
#endif
desc_(desc) { }
int UnwindInfoSection::WriteCIE(Writer *w) {
int UnwindInfoSection::WriteCIE(Writer* w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
uint32_t cie_position = w->position();
@ -1668,7 +1668,7 @@ int UnwindInfoSection::WriteCIE(Writer *w) {
}
void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
// The only FDE for this function. The CFA is the current RBP.
Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
int fde_position = w->position();
@ -1686,7 +1686,7 @@ void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
}
void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
// The first state, just after the control has been transferred to the the
// function.
@ -1713,7 +1713,7 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
}
void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
// The second state, just after RBP has been pushed.
// RBP / CFA for this function is now the current RSP, so just set the
@ -1734,7 +1734,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
}
void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
// The third state, after the RBP has been set.
// The CFA can now directly be set to RBP.
@ -1749,7 +1749,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
}
void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
// The fourth (final) state. The RBP has been popped (just before issuing a
// return).
@ -1769,7 +1769,7 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
}
bool UnwindInfoSection::WriteBody(Writer *w) {
bool UnwindInfoSection::WriteBody(Writer* w) {
uint32_t cie_position = WriteCIE(w);
WriteFDE(w, cie_position);
return true;
@ -1810,8 +1810,8 @@ extern "C" {
struct JITDescriptor {
uint32_t version_;
uint32_t action_flag_;
JITCodeEntry *relevant_entry_;
JITCodeEntry *first_entry_;
JITCodeEntry* relevant_entry_;
JITCodeEntry* first_entry_;
};
// GDB will place breakpoint into this function.
@ -1998,7 +1998,7 @@ void GDBJITInterface::AddCode(Handle<String> name,
}
}
static void AddUnwindInfo(CodeDescription *desc) {
static void AddUnwindInfo(CodeDescription* desc) {
#ifdef V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through

162
deps/v8/src/handles.cc

@ -208,42 +208,6 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
}
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties) {
CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
object->NormalizeProperties(
mode,
expected_additional_properties));
}
Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->NormalizeElements(),
NumberDictionary);
}
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
object->TransformToFastProperties(unused_property_fields));
}
Handle<NumberDictionary> NumberDictionarySet(
Handle<NumberDictionary> dictionary,
uint32_t index,
Handle<Object> value,
PropertyDetails details) {
CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
dictionary->Set(index, *value, details),
NumberDictionary);
}
void FlattenString(Handle<String> string) {
CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
}
@ -265,17 +229,6 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
}
Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetProperty(*key, *value, attributes, strict_mode),
Object);
}
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@ -303,16 +256,6 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
}
Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyDetails details) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetNormalizedProperty(*key, *value, details),
Object);
}
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
@ -322,30 +265,6 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
}
Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(
object->GetIsolate(),
object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
Object);
}
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
ASSERT(!isolate->has_pending_exception());
CHECK(!SetLocalPropertyIgnoreAttributes(
object, key, value, attributes).is_null());
CHECK(!isolate->has_pending_exception());
}
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@ -389,12 +308,6 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
}
Handle<Object> GetPrototype(Handle<Object> obj) {
Handle<Object> result(obj->GetPrototype());
return result;
}
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
const bool skip_hidden_prototypes = false;
CALL_HEAP_FUNCTION(obj->GetIsolate(),
@ -402,44 +315,6 @@ Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
}
Handle<Object> PreventExtensions(Handle<JSObject> object) {
CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
}
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->SetHiddenProperty(*key, *value),
Object);
}
int GetIdentityHash(Handle<JSReceiver> obj) {
CALL_AND_RETRY(obj->GetIsolate(),
obj->GetIdentityHash(ALLOW_CREATION),
return Smi::cast(__object__)->value(),
return 0);
}
Handle<Object> DeleteElement(Handle<JSObject> obj,
uint32_t index) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->DeleteElement(index, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> prop) {
CALL_HEAP_FUNCTION(obj->GetIsolate(),
obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
Object);
}
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
@ -457,43 +332,6 @@ Handle<String> SubString(Handle<String> str,
}
Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode) {
if (object->HasExternalArrayElements()) {
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
bool has_exception;
Handle<Object> number = Execution::ToNumber(value, &has_exception);
if (has_exception) return Handle<Object>();
value = number;
}
}
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetElement(index, *value, strict_mode, true),
Object);
}
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode) {
ASSERT(!object->HasExternalArrayElements());
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetElement(index, *value, strict_mode, false),
Object);
}
Handle<Object> TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->TransitionElementsKind(to_kind),
Object);
}
Handle<JSObject> Copy(Handle<JSObject> obj) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,

65
deps/v8/src/handles.h

@ -167,18 +167,6 @@ class HandleScope {
// an object of expected type, or the handle is an error if running out
// of space or encountering an internal error.
void NormalizeProperties(Handle<JSObject> object,
PropertyNormalizationMode mode,
int expected_additional_properties);
Handle<NumberDictionary> NormalizeElements(Handle<JSObject> object);
void TransformToFastProperties(Handle<JSObject> object,
int unused_property_fields);
MUST_USE_RESULT Handle<NumberDictionary> NumberDictionarySet(
Handle<NumberDictionary> dictionary,
uint32_t index,
Handle<Object> value,
PropertyDetails details);
// Flattens a string.
void FlattenString(Handle<String> str);
@ -186,12 +174,6 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
Handle<Object> SetProperty(Handle<JSReceiver> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode);
Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@ -203,40 +185,9 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyDetails details);
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key);
Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes);
// Used to set local properties on the object we totally control
// and which therefore has no accessors and alikes.
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes = NONE);
MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
StrictModeFlag strict_mode);
Handle<Object> TransitionElementsKind(Handle<JSObject> object,
ElementsKind to_kind);
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
@ -248,21 +199,8 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<String> name,
PropertyAttributes* attributes);
Handle<Object> GetPrototype(Handle<Object> obj);
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
// Sets a hidden property on an object. Returns obj on success, undefined
// if trying to set the property on a detached proxy.
Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
Handle<String> key,
Handle<Object> value);
int GetIdentityHash(Handle<JSReceiver> obj);
Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
Handle<JSObject> Copy(Handle<JSObject> obj);
@ -316,7 +254,6 @@ Handle<String> SubString(Handle<String> str,
int end,
PretenureFlag pretenure = NOT_TENURED);
// Sets the expected number of properties for the function's instances.
void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
@ -335,8 +272,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy(
Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype);
Handle<Object> PreventExtensions(Handle<JSObject> object);
Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
Handle<Object> key);

2
deps/v8/src/heap-inl.h

@ -463,7 +463,7 @@ MaybeObject* Heap::PrepareForCompare(String* str) {
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
ASSERT(HasBeenSetup());
ASSERT(HasBeenSetUp());
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
if (change_in_bytes >= 0) {
// Avoid overflow.

2
deps/v8/src/heap-profiler.cc

@ -51,7 +51,7 @@ void HeapProfiler::ResetSnapshots() {
}
void HeapProfiler::Setup() {
void HeapProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
isolate->set_heap_profiler(new HeapProfiler());

2
deps/v8/src/heap-profiler.h

@ -48,7 +48,7 @@ class HeapSnapshotsCollection;
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
static void Setup();
static void SetUp();
static void TearDown();
static HeapSnapshot* TakeSnapshot(const char* name,

103
deps/v8/src/heap.cc

@ -176,7 +176,7 @@ Heap::Heap()
intptr_t Heap::Capacity() {
if (!HasBeenSetup()) return 0;
if (!HasBeenSetUp()) return 0;
return new_space_.Capacity() +
old_pointer_space_->Capacity() +
@ -188,7 +188,7 @@ intptr_t Heap::Capacity() {
intptr_t Heap::CommittedMemory() {
if (!HasBeenSetup()) return 0;
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedMemory() +
old_pointer_space_->CommittedMemory() +
@ -200,14 +200,14 @@ intptr_t Heap::CommittedMemory() {
}
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetup()) return 0;
if (!HasBeenSetUp()) return 0;
return isolate()->memory_allocator()->SizeExecutable();
}
intptr_t Heap::Available() {
if (!HasBeenSetup()) return 0;
if (!HasBeenSetUp()) return 0;
return new_space_.Available() +
old_pointer_space_->Available() +
@ -218,7 +218,7 @@ intptr_t Heap::Available() {
}
bool Heap::HasBeenSetup() {
bool Heap::HasBeenSetUp() {
return old_pointer_space_ != NULL &&
old_data_space_ != NULL &&
code_space_ != NULL &&
@ -1354,6 +1354,28 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
class VisitorAdapter : public ObjectVisitor {
public:
explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
}
}
private:
v8::ExternalResourceVisitor* visitor_;
} visitor_adapter(visitor);
external_string_table_.Iterate(&visitor_adapter);
}
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
static inline void VisitPointer(Heap* heap, Object** p) {
@ -1869,6 +1891,19 @@ MaybeObject* Heap::AllocatePolymorphicCodeCache() {
}
MaybeObject* Heap::AllocateAccessorPair() {
Object* result;
{ MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
AccessorPair* accessors = AccessorPair::cast(result);
// Later we will have to distinguish between undefined and the hole...
// accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
// accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
return accessors;
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{type, size, k##camel_name##MapRootIndex},
@ -2428,18 +2463,18 @@ bool Heap::CreateInitialObjects() {
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_code_stubs(NumberDictionary::cast(obj));
set_code_stubs(UnseededNumberDictionary::cast(obj));
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_non_monomorphic_cache(NumberDictionary::cast(obj));
set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
{ MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
if (!maybe_obj->ToObject(&obj)) return false;
@ -3794,7 +3829,7 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
}
Map* new_map = Map::cast(obj);
// Setup the global object as a normalized object.
// Set up the global object as a normalized object.
global->set_map(new_map);
global->map()->clear_instance_descriptors();
global->set_properties(dictionary);
@ -4727,7 +4762,7 @@ bool Heap::IdleGlobalGC() {
#ifdef DEBUG
void Heap::Print() {
if (!HasBeenSetup()) return;
if (!HasBeenSetUp()) return;
isolate()->PrintStack();
AllSpaces spaces;
for (Space* space = spaces.next(); space != NULL; space = spaces.next())
@ -4792,7 +4827,7 @@ bool Heap::Contains(HeapObject* value) {
bool Heap::Contains(Address addr) {
if (OS::IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetup() &&
return HasBeenSetUp() &&
(new_space_.ToSpaceContains(addr) ||
old_pointer_space_->Contains(addr) ||
old_data_space_->Contains(addr) ||
@ -4810,7 +4845,7 @@ bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
bool Heap::InSpace(Address addr, AllocationSpace space) {
if (OS::IsOutsideAllocatedSpace(addr)) return false;
if (!HasBeenSetup()) return false;
if (!HasBeenSetUp()) return false;
switch (space) {
case NEW_SPACE:
@ -4835,7 +4870,7 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
void Heap::Verify() {
ASSERT(HasBeenSetup());
ASSERT(HasBeenSetUp());
store_buffer()->Verify();
@ -5262,7 +5297,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
bool Heap::ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size,
intptr_t max_executable_size) {
if (HasBeenSetup()) return false;
if (HasBeenSetUp()) return false;
if (max_semispace_size > 0) {
if (max_semispace_size < Page::kPageSize) {
@ -5551,7 +5586,7 @@ class HeapDebugUtils {
#endif
bool Heap::Setup(bool create_heap_objects) {
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
debug_utils_ = new HeapDebugUtils(this);
@ -5581,12 +5616,12 @@ bool Heap::Setup(bool create_heap_objects) {
MarkMapPointersAsEncoded(false);
// Setup memory allocator.
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
// Set up memory allocator.
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
return false;
// Setup new space.
if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
// Set up new space.
if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
return false;
}
@ -5597,7 +5632,7 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
if (old_pointer_space_ == NULL) return false;
if (!old_pointer_space_->Setup()) return false;
if (!old_pointer_space_->SetUp()) return false;
// Initialize old data space.
old_data_space_ =
@ -5606,14 +5641,14 @@ bool Heap::Setup(bool create_heap_objects) {
OLD_DATA_SPACE,
NOT_EXECUTABLE);
if (old_data_space_ == NULL) return false;
if (!old_data_space_->Setup()) return false;
if (!old_data_space_->SetUp()) return false;
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
// On 64-bit platform(s), we put all code objects in a 2 GB range of
// virtual address space, so that they can call each other with near calls.
if (code_range_size_ > 0) {
if (!isolate_->code_range()->Setup(code_range_size_)) {
if (!isolate_->code_range()->SetUp(code_range_size_)) {
return false;
}
}
@ -5621,7 +5656,7 @@ bool Heap::Setup(bool create_heap_objects) {
code_space_ =
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->Setup()) return false;
if (!code_space_->SetUp()) return false;
// Initialize map space.
map_space_ = new MapSpace(this,
@ -5629,28 +5664,28 @@ bool Heap::Setup(bool create_heap_objects) {
FLAG_max_map_space_pages,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup()) return false;
if (!map_space_->SetUp()) return false;
// Initialize global property cell space.
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
if (cell_space_ == NULL) return false;
if (!cell_space_->Setup()) return false;
if (!cell_space_->SetUp()) return false;
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
if (!lo_space_->SetUp()) return false;
// Setup the seed that is used to randomize the string hash function.
ASSERT(string_hash_seed() == 0);
if (FLAG_randomize_string_hashes) {
if (FLAG_string_hash_seed == 0) {
set_string_hash_seed(
// Set up the seed that is used to randomize the string hash function.
ASSERT(hash_seed() == 0);
if (FLAG_randomize_hashes) {
if (FLAG_hash_seed == 0) {
set_hash_seed(
Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
} else {
set_string_hash_seed(Smi::FromInt(FLAG_string_hash_seed));
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
}
}
@ -5668,7 +5703,7 @@ bool Heap::Setup(bool create_heap_objects) {
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
store_buffer()->Setup();
store_buffer()->SetUp();
return true;
}

31
deps/v8/src/heap.h

@ -96,7 +96,7 @@ inline Heap* _inline_get_heap_();
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, string_hash_seed, StringHashSeed) \
V(Smi, hash_seed, HashSeed) \
V(Map, string_map, StringMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
@ -146,8 +146,8 @@ inline Heap* _inline_get_heap_();
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
V(NumberDictionary, code_stubs, CodeStubs) \
V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
@ -434,7 +434,7 @@ class ExternalStringTable {
class Heap {
public:
// Configure heap size before setup. Return false if the heap has been
// setup already.
// set up already.
bool ConfigureHeap(int max_semispace_size,
intptr_t max_old_gen_size,
intptr_t max_executable_size);
@ -443,7 +443,7 @@ class Heap {
// Initializes the global object heap. If create_heap_objects is true,
// also creates the basic non-mutable objects.
// Returns whether it succeeded.
bool Setup(bool create_heap_objects);
bool SetUp(bool create_heap_objects);
// Destroys all memory allocated by the heap.
void TearDown();
@ -453,8 +453,8 @@ class Heap {
// jslimit_/real_jslimit_ variable in the StackGuard.
void SetStackLimits();
// Returns whether Setup has been called.
bool HasBeenSetup();
// Returns whether SetUp has been called.
bool HasBeenSetUp();
// Returns the maximum amount of memory reserved for the heap. For
// the young generation, we reserve 4 times the amount needed for a
@ -615,6 +615,9 @@ class Heap {
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
// Allocates a pre-tenured empty AccessorPair.
MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@ -1136,7 +1139,7 @@ class Heap {
inline AllocationSpace TargetSpaceId(InstanceType type);
// Sets the stub_cache_ (only used when expanding the dictionary).
void public_set_code_stubs(NumberDictionary* value) {
void public_set_code_stubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
}
@ -1148,7 +1151,7 @@ class Heap {
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
void public_set_non_monomorphic_cache(NumberDictionary* value) {
void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
}
@ -1409,6 +1412,8 @@ class Heap {
void ProcessWeakReferences(WeakObjectRetainer* retainer);
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
@ -1506,9 +1511,9 @@ class Heap {
return idle_notification_will_schedule_next_gc_;
}
uint32_t StringHashSeed() {
uint32_t seed = static_cast<uint32_t>(string_hash_seed()->value());
ASSERT(FLAG_randomize_string_hashes || seed == 0);
uint32_t HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
ASSERT(FLAG_randomize_hashes || seed == 0);
return seed;
}
@ -1911,7 +1916,7 @@ class Heap {
PromotionQueue promotion_queue_;
// Flag is set when the heap has been configured. The heap can be repeatedly
// configured through the API until it is setup.
// configured through the API until it is set up.
bool configured_;
ExternalStringTable external_string_table_;

25
deps/v8/src/hydrogen-instructions.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -788,6 +788,29 @@ HValue* HTypeof::Canonicalize() {
}
HValue* HBitwise::Canonicalize() {
if (!representation().IsInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
if (left()->IsConstant() &&
HConstant::cast(left())->HasInteger32Value() &&
HConstant::cast(left())->Integer32Value() == nop_constant) {
return right();
}
if (right()->IsConstant() &&
HConstant::cast(right())->HasInteger32Value() &&
HConstant::cast(right())->Integer32Value() == nop_constant) {
return left();
}
return this;
}
HValue* HChange::Canonicalize() {
return (from().Equals(to())) ? value() : this;
}
void HTypeof::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}

45
deps/v8/src/hydrogen-instructions.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -146,6 +146,7 @@ class LChunkBuilder;
V(Parameter) \
V(Power) \
V(PushArgument) \
V(Random) \
V(RegExpLiteral) \
V(Return) \
V(Sar) \
@ -1130,12 +1131,16 @@ class HChange: public HUnaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
Representation from() { return value()->representation(); }
Representation to() { return representation(); }
bool deoptimize_on_undefined() const {
return CheckFlag(kDeoptimizeOnUndefined);
}
bool deoptimize_on_minus_zero() const {
return CheckFlag(kBailoutOnMinusZero);
}
virtual Representation RequiredInputRepresentation(int index) {
return from();
}
@ -1921,8 +1926,11 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMap: public HTemplateInstruction<2> {
public:
HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
: map_(map) {
HCheckMap(HValue* value, Handle<Map> map,
HValue* typecheck = NULL,
CompareMapMode mode = REQUIRE_EXACT_MAP)
: map_(map),
mode_(mode) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@ -1930,6 +1938,9 @@ class HCheckMap: public HTemplateInstruction<2> {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
has_element_transitions_ =
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
}
virtual Representation RequiredInputRepresentation(int index) {
@ -1940,17 +1951,24 @@ class HCheckMap: public HTemplateInstruction<2> {
HValue* value() { return OperandAt(0); }
Handle<Map> map() const { return map_; }
CompareMapMode mode() const { return mode_; }
DECLARE_CONCRETE_INSTRUCTION(CheckMap)
protected:
virtual bool DataEquals(HValue* other) {
HCheckMap* b = HCheckMap::cast(other);
return map_.is_identical_to(b->map());
// Two CheckMaps instructions are DataEqual if their maps are identical and
// they have the same mode. The mode comparison can be ignored if the map
// has no elements transitions.
return map_.is_identical_to(b->map()) &&
(b->mode() == mode() || !has_element_transitions_);
}
private:
bool has_element_transitions_;
Handle<Map> map_;
CompareMapMode mode_;
};
@ -2985,6 +3003,23 @@ class HPower: public HTemplateInstruction<2> {
};
class HRandom: public HTemplateInstruction<1> {
public:
explicit HRandom(HValue* global_object) {
SetOperandAt(0, global_object);
set_representation(Representation::Double());
}
HValue* global_object() { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(Random)
};
class HAdd: public HArithmeticBinaryOperation {
public:
HAdd(HValue* context, HValue* left, HValue* right)
@ -3138,6 +3173,8 @@ class HBitwise: public HBitwiseBinaryOperation {
virtual bool IsCommutative() const { return true; }
virtual HValue* Canonicalize();
static HInstruction* NewHBitwise(Zone* zone,
Token::Value op,
HValue* context,

99
deps/v8/src/hydrogen.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -628,7 +628,11 @@ HGraph::HGraph(CompilationInfo* info)
Handle<Code> HGraph::Compile(CompilationInfo* info) {
int values = GetMaximumValueID();
if (values > LAllocator::max_initial_value_ids()) {
if (FLAG_trace_bailout) PrintF("Function is too big\n");
if (FLAG_trace_bailout) {
SmartArrayPointer<char> name(
info->shared_info()->DebugName()->ToCString());
PrintF("Function @\"%s\" is too big.\n", *name);
}
return Handle<Code>::null();
}
@ -2301,7 +2305,7 @@ HGraph* HGraphBuilder::CreateGraph() {
Bailout("function with illegal redeclaration");
return NULL;
}
SetupScope(scope);
SetUpScope(scope);
// Add an edge to the body entry. This is warty: the graph's start
// environment will be used by the Lithium translation as the initial
@ -2465,7 +2469,7 @@ HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
}
void HGraphBuilder::SetupScope(Scope* scope) {
void HGraphBuilder::SetUpScope(Scope* scope) {
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
@ -3572,7 +3576,8 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(new(zone()) HCheckMap(object, type));
AddInstruction(new(zone()) HCheckMap(object, type, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
}
int index = ComputeStoredFieldIndex(type, name, lookup);
@ -4117,7 +4122,8 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
bool smi_and_map_check) {
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(new(zone()) HCheckMap(object, type));
AddInstruction(new(zone()) HCheckMap(object, type, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
}
int index = lookup->GetLocalFieldIndexFromMap(*type);
@ -4157,7 +4163,8 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
true);
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
AddInstruction(new(zone()) HCheckNonSmi(obj));
AddInstruction(new(zone()) HCheckMap(obj, map));
AddInstruction(new(zone()) HCheckMap(obj, map, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
} else {
@ -4652,7 +4659,8 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
// its prototypes.
if (smi_and_map_check) {
AddInstruction(new(zone()) HCheckNonSmi(receiver));
AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
}
if (!expr->holder().is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
@ -5124,6 +5132,69 @@ bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
return true;
}
break;
case kMathRandom:
if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
Drop(1);
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
AddInstruction(global_object);
HRandom* result = new(zone()) HRandom(global_object);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
// Do not inline if the return representation is not certain.
if (!left->representation().Equals(right->representation())) {
Push(left);
Push(right);
return false;
}
Pop(); // Pop receiver.
Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
HCompareIDAndBranch* compare = NULL;
if (left->representation().IsTagged()) {
HChange* left_cvt =
new(zone()) HChange(left, Representation::Double(), false, true);
left_cvt->SetFlag(HValue::kBailoutOnMinusZero);
AddInstruction(left_cvt);
HChange* right_cvt =
new(zone()) HChange(right, Representation::Double(), false, true);
right_cvt->SetFlag(HValue::kBailoutOnMinusZero);
AddInstruction(right_cvt);
compare = new(zone()) HCompareIDAndBranch(left_cvt, right_cvt, op);
compare->SetInputRepresentation(Representation::Double());
} else {
compare = new(zone()) HCompareIDAndBranch(left, right, op);
compare->SetInputRepresentation(left->representation());
}
HBasicBlock* return_left = graph()->CreateBasicBlock();
HBasicBlock* return_right = graph()->CreateBasicBlock();
compare->SetSuccessorAt(0, return_left);
compare->SetSuccessorAt(1, return_right);
current_block()->Finish(compare);
set_current_block(return_left);
Push(left);
set_current_block(return_right);
Push(right);
HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
set_current_block(join);
ast_context()->ReturnValue(Pop());
return true;
}
break;
default:
// Not yet supported for inlining.
break;
@ -6195,9 +6266,11 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(new(zone()) HCheckMap(left, map));
AddInstruction(new(zone()) HCheckMap(left, map, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(new(zone()) HCheckMap(right, map));
AddInstruction(new(zone()) HCheckMap(right, map, NULL,
ALLOW_ELEMENT_TRANSITION_MAPS));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@ -6569,7 +6642,11 @@ void HGraphBuilder::GenerateLog(CallRuntime* call) {
// Fast support for Math.random().
void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
return Bailout("inlined runtime function: RandomHeapNumber");
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
AddInstruction(global_object);
HRandom* result = new(zone()) HRandom(global_object);
return ast_context()->ReturnInstruction(result, call->id());
}

2
deps/v8/src/hydrogen.h

@ -870,7 +870,7 @@ class HGraphBuilder: public AstVisitor {
Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetupScope(Scope* scope);
void SetUpScope(Scope* scope);
virtual void VisitStatements(ZoneList<Statement*>* statements);
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);

6
deps/v8/src/ia32/assembler-ia32.cc

@ -350,7 +350,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
#endif
// Setup buffer pointers.
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -377,7 +377,7 @@ void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Setup code descriptor.
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@ -2457,7 +2457,7 @@ void Assembler::GrowBuffer() {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
// Setup new buffer.
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();
desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());

2
deps/v8/src/ia32/builtins-ia32.cc

@ -333,7 +333,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(ebx);
__ push(ebx);
// Setup pointer to last argument.
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.

37
deps/v8/src/ia32/code-stubs-ia32.cc

@ -128,14 +128,14 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Setup the object header.
// Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->function_context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
// Set up the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
@ -179,7 +179,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Get the serialized scope info from the stack.
__ mov(ebx, Operand(esp, 2 * kPointerSize));
// Setup the object header.
// Set up the object header.
Factory* factory = masm->isolate()->factory();
__ mov(FieldOperand(eax, HeapObject::kMapOffset),
factory->block_context_map());
@ -202,7 +202,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
// Set up the fixed slots.
__ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
__ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
__ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
@ -3379,7 +3379,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ mov(FieldOperand(eax, i), edx);
}
// Setup the callee in-object property.
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ mov(edx, Operand(esp, 4 * kPointerSize));
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
@ -3392,7 +3392,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Heap::kArgumentsLengthIndex * kPointerSize),
ecx);
// Setup the elements pointer in the allocated arguments object.
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
@ -3571,7 +3571,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
@ -4950,7 +4950,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
// Setup frame.
// Set up frame.
__ push(ebp);
__ mov(ebp, esp);
@ -5081,8 +5081,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
static const int kDeltaToCmpImmediate = 2;
static const int kDeltaToMov = 8;
static const int kDeltaToMovImmediate = 9;
static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
ExternalReference roots_array_start =
@ -5147,12 +5147,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
__ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
__ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
__ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
__ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
__ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
}
__ mov(Operand(scratch, kDeltaToCmpImmediate), map);
__ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
__ mov(Operand(scratch, 0), map);
}
// Loop through the prototype chain of the object looking for the function
@ -6037,7 +6038,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
if (Serializer::enabled()) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ mov(scratch, Immediate(Heap::kStringHashSeedRootIndex));
__ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
__ mov(scratch, Operand::StaticArray(scratch,
times_pointer_size,
roots_array_start));
@ -6046,7 +6047,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
__ shl(scratch, 10);
__ add(hash, scratch);
} else {
int32_t seed = masm->isolate()->heap()->StringHashSeed();
int32_t seed = masm->isolate()->heap()->HashSeed();
__ lea(scratch, Operand(character, seed));
__ shl(scratch, 10);
__ lea(hash, Operand(scratch, character, times_1, seed));
@ -6091,14 +6092,12 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ shl(scratch, 15);
__ add(hash, scratch);
uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
__ and_(hash, kHashShiftCutOffMask);
__ and_(hash, String::kHashBitMask);
// if (hash == 0) hash = 27;
Label hash_not_zero;
__ test(hash, hash);
__ j(not_zero, &hash_not_zero, Label::kNear);
__ mov(hash, Immediate(27));
__ mov(hash, Immediate(StringHasher::kZeroHash));
__ bind(&hash_not_zero);
}

2
deps/v8/src/ia32/cpu-ia32.cc

@ -41,7 +41,7 @@
namespace v8 {
namespace internal {
void CPU::Setup() {
void CPU::SetUp() {
CpuFeatures::Probe();
}

2
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -406,7 +406,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
// Set up the frame pointer and the context pointer.
// All OSR stack frames are dynamically aligned to an 8-byte boundary.
int frame_pointer = input_->GetRegister(ebp.code());
if ((frame_pointer & 0x4) == 0) {

2
deps/v8/src/ia32/full-codegen-ia32.cc

@ -967,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Setup the four remaining stack slots.
// Set up the four remaining stack slots.
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));

94
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1831,7 +1831,7 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch(
// Branches to a label or falls through with the answer in the z flag. Trashes
// the temp registers, but not the input. Only input and temp2 may alias.
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String>class_name,
@ -1839,7 +1839,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
Register temp,
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
ASSERT(!input.is(temp2));
ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (class_name->IsEqualTo(CStrVector("Function"))) {
@ -1899,12 +1900,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
if (input.is(temp)) {
// Swap.
Register swapper = temp;
temp = temp2;
temp2 = swapper;
}
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
@ -1979,7 +1975,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = ToRegister(instr->TempAt(0));
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
__ cmp(map, factory()->the_hole_value()); // Patched to cached map.
Handle<JSGlobalPropertyCell> cache_cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map.
__ j(not_equal, &cache_miss, Label::kNear);
__ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
__ jmp(&done);
@ -3016,6 +3014,29 @@ void LCodeGen::DoPower(LPower* instr) {
}
void LCodeGen::DoRandom(LRandom* instr) {
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
ASSERT(ToRegister(instr->InputAt(0)).is(eax));
__ PrepareCallCFunction(1, ebx);
__ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
__ mov(Operand(esp, 0), eax);
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// Convert 32 random bits in eax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(xmm2, ebx);
__ movd(xmm1, eax);
__ cvtss2sd(xmm2, xmm2);
__ xorps(xmm1, xmm2);
__ subsd(xmm1, xmm2);
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
@ -3678,8 +3699,10 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
Label load_smi, done;
@ -3708,6 +3731,15 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
}
// Heap number to XMM conversion.
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = xmm0;
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
__ movmskpd(temp_reg, result_reg);
__ test_b(temp_reg, 1);
DeoptimizeIf(not_zero, env);
}
__ jmp(&done, Label::kNear);
// Smi to XMM conversion
@ -3830,14 +3862,23 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
LOperand* temp = instr->TempAt(0);
ASSERT(temp == NULL || temp->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg, result_reg,
bool deoptimize_on_minus_zero =
instr->hydrogen()->deoptimize_on_minus_zero();
Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
deoptimize_on_minus_zero,
instr->environment());
}
@ -4033,13 +4074,23 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
LEnvironment* env) {
Label success;
__ CompareMap(reg, map, &success, mode);
DeoptimizeIf(not_equal, env);
__ bind(&success);
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
instr->hydrogen()->map());
DeoptimizeIf(not_equal, instr->environment());
Handle<Map> map = instr->hydrogen()->map();
DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
}
@ -4102,9 +4153,9 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
@ -4112,9 +4163,8 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
}
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Handle<Map>(current_prototype->map()));
DeoptimizeIf(not_equal, instr->environment());
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@ -4139,7 +4189,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(not_equal, instr->environment());
}
// Setup the parameters to the stub/runtime call.
// Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
@ -4247,7 +4297,7 @@ void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
// Setup the parameters to the stub/runtime call.
// Set up the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));

9
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -110,6 +110,9 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
@ -265,8 +268,10 @@ class LCodeGen BASE_EMBEDDED {
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
Register temp,
XMMRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@ -380,7 +385,7 @@ class LDeferredCode: public ZoneObject {
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }

55
deps/v8/src/ia32/lithium-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1047,22 +1047,31 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
ASSERT(v->IsConstant());
ASSERT(!v->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
HValue* value = instr->value();
if (value->EmitAtUses()) {
ASSERT(value->IsConstant());
ASSERT(!value->representation().IsDouble());
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new(zone()) LGoto(successor->block_id());
}
// Untagged integers or doubles, smis and booleans don't require a
// deoptimization environment nor a temp register.
Representation rep = value->representation();
HType type = value->type();
if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
return new(zone()) LBranch(UseRegister(value), NULL);
}
ToBooleanStub::Types expected = instr->expected_input_types();
// We need a temporary register when we have to access the map *or* we have
// no type info yet, in which case we handle all cases (including the ones
// involving maps).
bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(new(zone()) LBranch(UseRegister(v), temp));
return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
}
@ -1388,7 +1397,11 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
temp = TempRegister();
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
return AssignEnvironment(DefineSameAsFirst(mul));
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
AssignEnvironment(mul);
}
return DefineSameAsFirst(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
} else {
@ -1456,6 +1469,15 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), eax);
LRandom* result = new(zone()) LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
@ -1588,9 +1610,9 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
return new(zone()) LClassOfTestAndBranch(UseTempRegister(instr->value()),
TempRegister(),
TempRegister());
return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
TempRegister(),
TempRegister());
}
@ -1616,7 +1638,7 @@ LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new(zone()) LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
return DefineSameAsFirst(result);
}
@ -1660,7 +1682,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
// Temp register only necessary for minus zero check.
LOperand* temp = instr->deoptimize_on_minus_zero()
? TempRegister()
: NULL;
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
@ -1956,7 +1982,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}

19
deps/v8/src/ia32/lithium-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -135,6 +135,7 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(Power) \
V(Random) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -1043,6 +1044,17 @@ class LPower: public LTemplateInstruction<1, 2, 0> {
};
class LRandom: public LTemplateInstruction<1, 1, 0> {
public:
explicit LRandom(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(Random, "random")
DECLARE_HYDROGEN_ACCESSOR(Random)
};
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@ -1612,10 +1624,11 @@ class LSmiTag: public LTemplateInstruction<1, 1, 0> {
};
class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
public:
explicit LNumberUntagD(LOperand* value) {
explicit LNumberUntagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")

124
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -487,15 +487,48 @@ void MacroAssembler::StoreNumberToDoubleElements(
}
void MacroAssembler::CompareMap(Register obj,
Handle<Map> map,
Label* early_success,
CompareMapMode mode) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
Map* transitioned_fast_element_map(
map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
ASSERT(transitioned_fast_element_map == NULL ||
map->elements_kind() != FAST_ELEMENTS);
if (transitioned_fast_element_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_fast_element_map));
}
Map* transitioned_double_map(
map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
ASSERT(transitioned_double_map == NULL ||
map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
if (transitioned_double_map != NULL) {
j(equal, early_success, Label::kNear);
cmp(FieldOperand(obj, HeapObject::kMapOffset),
Handle<Map>(transitioned_double_map));
}
}
}
void MacroAssembler::CheckMap(Register obj,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type) {
SmiCheckType smi_check_type,
CompareMapMode mode) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
Label success;
CompareMap(obj, map, &success, mode);
j(not_equal, fail);
bind(&success);
}
@ -616,7 +649,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFramePrologue() {
// Setup the frame structure on the stack.
// Set up the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
@ -668,7 +701,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
void MacroAssembler::EnterExitFrame(bool save_doubles) {
EnterExitFramePrologue();
// Setup argc and argv in callee-saved registers.
// Set up argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
@ -959,6 +992,50 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// Note: r0 will contain hash code
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// Xor original key with a seed.
if (Serializer::enabled()) {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(Heap::kHashSeedRootIndex));
mov(scratch,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
SmiUntag(scratch);
xor_(r0, scratch);
} else {
int32_t seed = isolate()->heap()->HashSeed();
xor_(r0, Immediate(seed));
}
// hash = ~hash + (hash << 15);
mov(scratch, r0);
not_(r0);
shl(scratch, 15);
add(r0, scratch);
// hash = hash ^ (hash >> 12);
mov(scratch, r0);
shr(scratch, 12);
xor_(r0, scratch);
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(scratch, r0);
shr(scratch, 4);
xor_(r0, scratch);
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(scratch, r0);
shr(scratch, 16);
xor_(r0, scratch);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
@ -984,33 +1061,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
mov(r1, r0);
not_(r0);
shl(r1, 15);
add(r0, r1);
// hash = hash ^ (hash >> 12);
mov(r1, r0);
shr(r1, 12);
xor_(r0, r1);
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(r1, r0);
shr(r1, 4);
xor_(r0, r1);
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(r1, r0);
shr(r1, 16);
xor_(r0, r1);
GetNumberHash(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
shr(r1, kSmiTagSize); // convert smi to int
dec(r1);
@ -1021,19 +1075,19 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
ASSERT(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
NumberDictionary::kElementsStartOffset));
SeededNumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
j(equal, &done);
} else {
@ -1044,7 +1098,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset =
NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
@ -1052,7 +1106,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}

20
deps/v8/src/ia32/macro-assembler-ia32.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -251,7 +251,7 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
// Setup call kind marking in ecx. The method takes ecx as an
// Set up call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
@ -356,13 +356,24 @@ class MacroAssembler: public Assembler {
Label* fail,
bool specialize_for_processor);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
// result of map compare. If multiple map compares are required, the compare
// sequences branches to early_success.
void CompareMap(Register obj,
Handle<Map> map,
Label* early_success,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object)
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
// against maps that are ElementsKind transition maps of the specificed map.
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
SmiCheckType smi_check_type);
SmiCheckType smi_check_type,
CompareMapMode mode = REQUIRE_EXACT_MAP);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
@ -486,6 +497,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* miss);
void GetNumberHash(Register r0, Register scratch);
void LoadFromNumberDictionary(Label* miss,
Register elements,

42
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -695,13 +695,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Register name_reg,
Register scratch,
Label* miss_label) {
// Check that the object isn't a smi.
__ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, miss_label);
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -878,13 +874,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (in_new_space) {
// Save the map in scratch1 for later.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Immediate(current_map));
} else {
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(current_map));
}
// Branch on the result of the map check.
__ j(not_equal, miss);
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@ -916,9 +909,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
// Check the holder map.
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
Immediate(Handle<Map>(holder->map())));
__ j(not_equal, miss);
__ CheckMap(reg, Handle<Map>(holder->map()),
miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@ -2338,7 +2330,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
__ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
}
// Setup the context (function already in edi).
// Set up the context (function already in edi).
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
@ -2403,13 +2395,9 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
// -----------------------------------
Label miss;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss);
__ CheckMap(edx, Handle<Map>(object->map()),
&miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -2453,13 +2441,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// -----------------------------------
Label miss;
// Check that the object isn't a smi.
__ JumpIfSmi(edx, &miss);
// Check that the map of the object hasn't changed.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
Immediate(Handle<Map>(receiver->map())));
__ j(not_equal, &miss);
__ CheckMap(edx, Handle<Map>(receiver->map()),
&miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (receiver->IsJSGlobalProxy()) {

6
deps/v8/src/ic.cc

@ -1267,7 +1267,8 @@ MaybeObject* StoreIC::Store(State state,
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
Handle<Object> result =
JSObject::SetElement(receiver, index, value, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}
@ -1644,7 +1645,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
Handle<Object> result = SetElement(receiver, index, value, strict_mode);
Handle<Object> result =
JSObject::SetElement(receiver, index, value, strict_mode);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
return *value;
}

2
deps/v8/src/incremental-marking.cc

@ -418,7 +418,7 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
NumberDictionary* stubs = heap->code_stubs();
UnseededNumberDictionary* stubs = heap->code_stubs();
int capacity = stubs->Capacity();
for (int i = 0; i < capacity; i++) {

5
deps/v8/src/incremental-marking.h

@ -56,6 +56,7 @@ class IncrementalMarking {
}
bool should_hurry() { return should_hurry_; }
void set_should_hurry(bool val) { should_hurry_ = val; }
inline bool IsStopped() { return state() == STOPPED; }
@ -219,10 +220,6 @@ class IncrementalMarking {
void UncommitMarkingDeque();
private:
void set_should_hurry(bool val) {
should_hurry_ = val;
}
int64_t SpaceLeftInOldSpace();
void ResetStepCounters();

4
deps/v8/src/inspector.cc

@ -38,11 +38,11 @@ namespace internal {
//============================================================================
// The Inspector.
void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
// Dump the object pointer.
OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
if (obj->IsHeapObject()) {
HeapObject *hobj = HeapObject::cast(obj);
HeapObject* hobj = HeapObject::cast(obj);
OS::FPrint(out, " size %d :", hobj->Size());
}

8
deps/v8/src/inspector.h

@ -41,14 +41,14 @@ namespace internal {
class Inspector {
public:
static void DumpObjectType(FILE* out, Object *obj, bool print_more);
static void DumpObjectType(FILE* out, Object *obj) {
static void DumpObjectType(FILE* out, Object* obj, bool print_more);
static void DumpObjectType(FILE* out, Object* obj) {
DumpObjectType(out, obj, false);
}
static void DumpObjectType(Object *obj, bool print_more) {
static void DumpObjectType(Object* obj, bool print_more) {
DumpObjectType(stdout, obj, print_more);
}
static void DumpObjectType(Object *obj) {
static void DumpObjectType(Object* obj) {
DumpObjectType(stdout, obj, false);
}
};

59
deps/v8/src/isolate.cc

@ -570,7 +570,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
// Create a JSObject to hold the information for the StackFrame.
Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
Handle<JSFunction> fun = frames[i].function();
Handle<Script> script(Script::cast(fun->shared()->script()));
@ -591,16 +591,24 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
// tag.
column_offset += script->column_offset()->value();
}
SetLocalPropertyNoThrow(stackFrame, column_key,
Handle<Smi>(Smi::FromInt(column_offset + 1)));
CHECK_NOT_EMPTY_HANDLE(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, column_key,
Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
}
SetLocalPropertyNoThrow(stackFrame, line_key,
Handle<Smi>(Smi::FromInt(line_number + 1)));
CHECK_NOT_EMPTY_HANDLE(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, line_key,
Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
}
if (options & StackTrace::kScriptName) {
Handle<Object> script_name(script->name(), this);
SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, script_key, script_name, NONE));
}
if (options & StackTrace::kScriptNameOrSourceURL) {
@ -616,8 +624,10 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
if (caught_exception) {
result = factory()->undefined_value();
}
SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
result);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, script_name_or_source_url_key,
result, NONE));
}
if (options & StackTrace::kFunctionName) {
@ -625,23 +635,30 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
if (fun_name->ToBoolean()->IsFalse()) {
fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
}
SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, function_key, fun_name, NONE));
}
if (options & StackTrace::kIsEval) {
int type = Smi::cast(script->compilation_type())->value();
Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
factory()->true_value() : factory()->false_value();
SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, eval_key, is_eval, NONE));
}
if (options & StackTrace::kIsConstructor) {
Handle<Object> is_constructor = (frames[i].is_constructor()) ?
factory()->true_value() : factory()->false_value();
SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, constructor_key,
is_constructor, NONE));
}
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
frames_seen++;
}
it.Advance();
@ -1734,10 +1751,10 @@ bool Isolate::Init(Deserializer* des) {
regexp_stack_->isolate_ = this;
// Enable logging before setting up the heap
logger_->Setup();
logger_->SetUp();
CpuProfiler::Setup();
HeapProfiler::Setup();
CpuProfiler::SetUp();
HeapProfiler::SetUp();
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
@ -1754,10 +1771,10 @@ bool Isolate::Init(Deserializer* des) {
stack_guard_.InitThread(lock);
}
// Setup the object heap.
// SetUp the object heap.
const bool create_heap_objects = (des == NULL);
ASSERT(!heap_.HasBeenSetup());
if (!heap_.Setup(create_heap_objects)) {
ASSERT(!heap_.HasBeenSetUp());
if (!heap_.SetUp(create_heap_objects)) {
V8::SetFatalError();
return false;
}
@ -1765,7 +1782,7 @@ bool Isolate::Init(Deserializer* des) {
InitializeThreadLocal();
bootstrapper_->Initialize(create_heap_objects);
builtins_.Setup(create_heap_objects);
builtins_.SetUp(create_heap_objects);
// Only preallocate on the first initialization.
if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@ -1784,7 +1801,7 @@ bool Isolate::Init(Deserializer* des) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
debug_->Setup(create_heap_objects);
debug_->SetUp(create_heap_objects);
#endif
stub_cache_->Initialize(create_heap_objects);
@ -1805,7 +1822,7 @@ bool Isolate::Init(Deserializer* des) {
deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->Setup();
runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.

7
deps/v8/src/isolate.h

@ -122,6 +122,13 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
} \
} while (false)
#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
do { \
ASSERT(!(isolate)->has_pending_exception()); \
CHECK(!(call).is_null()); \
CHECK(!(isolate)->has_pending_exception()); \
} while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())

5
deps/v8/src/json-parser.h

@ -303,11 +303,12 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
uint32_t index;
if (key->AsArrayIndex(&index)) {
SetOwnElement(json_object, index, value, kNonStrictMode);
JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
} else if (key->Equals(isolate()->heap()->Proto_symbol())) {
SetPrototype(json_object, value);
} else {
SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
JSObject::SetLocalPropertyIgnoreAttributes(
json_object, key, value, NONE);
}
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {

2
deps/v8/src/jsregexp.cc

@ -2636,7 +2636,7 @@ void TextNode::MakeCaseIndependent(bool is_ascii) {
TextElement elm = elms_->at(i);
if (elm.type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
// None of the standard character classses is different in the case
// None of the standard character classes is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard()) continue;
ZoneList<CharacterRange>* ranges = cc->ranges();

4
deps/v8/src/lithium-allocator.cc

@ -49,13 +49,13 @@ namespace internal {
#define DEFINE_OPERAND_CACHE(name, type) \
name name::cache[name::kNumCachedOperands]; \
void name::SetupCache() { \
void name::SetUpCache() { \
for (int i = 0; i < kNumCachedOperands; i++) { \
cache[i].ConvertTo(type, i); \
} \
} \
static bool name##_initialize() { \
name::SetupCache(); \
name::SetUpCache(); \
return true; \
} \
static bool name##_cache_initialized = name##_initialize();

14
deps/v8/src/lithium.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -265,7 +265,7 @@ class LConstantOperand: public LOperand {
return reinterpret_cast<LConstantOperand*>(op);
}
static void SetupCache();
static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@ -300,7 +300,7 @@ class LStackSlot: public LOperand {
return reinterpret_cast<LStackSlot*>(op);
}
static void SetupCache();
static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@ -324,7 +324,7 @@ class LDoubleStackSlot: public LOperand {
return reinterpret_cast<LDoubleStackSlot*>(op);
}
static void SetupCache();
static void SetUpCache();
private:
static const int kNumCachedOperands = 128;
@ -348,7 +348,7 @@ class LRegister: public LOperand {
return reinterpret_cast<LRegister*>(op);
}
static void SetupCache();
static void SetUpCache();
private:
static const int kNumCachedOperands = 16;
@ -372,7 +372,7 @@ class LDoubleRegister: public LOperand {
return reinterpret_cast<LDoubleRegister*>(op);
}
static void SetupCache();
static void SetUpCache();
private:
static const int kNumCachedOperands = 16;
@ -523,8 +523,6 @@ class LEnvironment: public ZoneObject {
LOperand** spilled_double_registers_;
LEnvironment* outer_;
friend class LCodegen;
};

4
deps/v8/src/liveedit.cc

@ -54,7 +54,7 @@ void SetElementNonStrict(Handle<JSObject> object,
// are element setters causing exceptions and the debugger context has none
// of these.
Handle<Object> no_failure;
no_failure = SetElement(object, index, value, kNonStrictMode);
no_failure = JSObject::SetElement(object, index, value, kNonStrictMode);
ASSERT(!no_failure.is_null());
USE(no_failure);
}
@ -1228,7 +1228,7 @@ class RelocInfoBuffer {
V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
}
// Setup new buffer.
// Set up new buffer.
byte* new_buffer = NewArray<byte>(new_buffer_size);
// Copy the data.

4
deps/v8/src/liveobjectlist-inl.h

@ -59,7 +59,7 @@ void LiveObjectList::IterateElements(ObjectVisitor* v) {
}
void LiveObjectList::ProcessNonLive(HeapObject *obj) {
void LiveObjectList::ProcessNonLive(HeapObject* obj) {
// Only do work if we have at least one list to process.
if (last()) DoProcessNonLive(obj);
}
@ -93,7 +93,7 @@ LiveObjectList* LiveObjectList::FindLolForId(int id,
template <typename T>
inline LiveObjectList::Element*
LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
LiveObjectList *lol = last();
LiveObjectList* lol = last();
while (lol != NULL) {
Element* elements = lol->elements_;
for (int i = 0; i < lol->obj_count_; i++) {

48
deps/v8/src/liveobjectlist.cc

@ -165,7 +165,7 @@ const char* GetObjectTypeDesc(HeapObject* heap_obj) {
}
bool IsOfType(LiveObjectType type, HeapObject *obj) {
bool IsOfType(LiveObjectType type, HeapObject* obj) {
// Note: there are types that are more general (e.g. JSObject) that would
// have passed the Is##type_() test for more specialized types (e.g.
// JSFunction). If we find a more specialized match but we're looking for
@ -211,7 +211,7 @@ static AllocationSpace FindSpaceFor(String* space_str) {
}
static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
Heap* heap = ISOLATE->heap();
if (space != LO_SPACE) {
return heap->InSpace(heap_obj, space);
@ -498,7 +498,7 @@ static void GenerateObjectDesc(HeapObject* obj,
length);
} else if (obj->IsString()) {
String *str = String::cast(obj);
String* str = String::cast(obj);
// Only grab up to 160 chars in case they are double byte.
// We'll only dump 80 of them after we compact them.
const int kMaxCharToDump = 80;
@ -842,7 +842,7 @@ class LiveObjectSummary {
bool found_root_;
bool found_weak_root_;
LolFilter *filter_;
LolFilter* filter_;
};
@ -857,8 +857,8 @@ class SummaryWriter {
// A summary writer for filling in a summary of lol lists and diffs.
class LolSummaryWriter: public SummaryWriter {
public:
LolSummaryWriter(LiveObjectList *older_lol,
LiveObjectList *newer_lol)
LolSummaryWriter(LiveObjectList* older_lol,
LiveObjectList* newer_lol)
: older_(older_lol), newer_(newer_lol) {
}
@ -944,7 +944,7 @@ LiveObjectList::~LiveObjectList() {
int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
int size = 0;
int count = 0;
LiveObjectList *lol = this;
LiveObjectList* lol = this;
do {
// Only compute total size if requested i.e. when size_p is not null.
if (size_p != NULL) {
@ -1183,7 +1183,7 @@ MaybeObject* LiveObjectList::Capture() {
// only time we'll actually delete the lol is when we Reset() or if the lol is
// invisible, and its element count reaches 0.
bool LiveObjectList::Delete(int id) {
LiveObjectList *lol = last();
LiveObjectList* lol = last();
while (lol != NULL) {
if (lol->id() == id) {
break;
@ -1246,8 +1246,8 @@ MaybeObject* LiveObjectList::Dump(int older_id,
newer_id = temp;
}
LiveObjectList *newer_lol = FindLolForId(newer_id, last());
LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
LiveObjectList* newer_lol = FindLolForId(newer_id, last());
LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
// If the id is defined, and we can't find a LOL for it, then we have an
// invalid id.
@ -1365,8 +1365,8 @@ MaybeObject* LiveObjectList::Summarize(int older_id,
newer_id = temp;
}
LiveObjectList *newer_lol = FindLolForId(newer_id, last());
LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
LiveObjectList* newer_lol = FindLolForId(newer_id, last());
LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
// If the id is defined, and we can't find a LOL for it, then we have an
// invalid id.
@ -1626,7 +1626,7 @@ MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
// Deletes all captured lols.
void LiveObjectList::Reset() {
LiveObjectList *lol = last();
LiveObjectList* lol = last();
// Just delete the last. Each lol will delete it's prev automatically.
delete lol;
@ -1715,8 +1715,8 @@ class LolVisitor: public ObjectVisitor {
inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
LolFilter* filter,
LiveObjectSummary *summary,
void (*SetRootFound)(LiveObjectSummary *s),
LiveObjectSummary* summary,
void (*SetRootFound)(LiveObjectSummary* s),
int start,
int dump_limit,
int* total_count,
@ -1762,12 +1762,12 @@ inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
}
inline void SetFoundRoot(LiveObjectSummary *summary) {
inline void SetFoundRoot(LiveObjectSummary* summary) {
summary->set_found_root();
}
inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
summary->set_found_weak_root();
}
@ -1779,7 +1779,7 @@ int LiveObjectList::GetRetainers(Handle<HeapObject> target,
int dump_limit,
int* total_count,
LolFilter* filter,
LiveObjectSummary *summary,
LiveObjectSummary* summary,
JSFunction* arguments_function,
Handle<Object> error) {
HandleScope scope;
@ -2267,7 +2267,7 @@ Object* LiveObjectList::GetPath(int obj_id1,
}
void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
// We should only be called if we have at least one lol to search.
ASSERT(last() != NULL);
Element* element = last()->Find(obj);
@ -2284,7 +2284,7 @@ void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) {
int count = lol->obj_count_;
for (int i = 0; i < count; i++) {
HeapObject** p = &elements[i].obj_;
v->VisitPointer(reinterpret_cast<Object **>(p));
v->VisitPointer(reinterpret_cast<Object** >(p));
}
lol = lol->prev_;
}
@ -2389,11 +2389,11 @@ void LiveObjectList::GCEpiloguePrivate() {
PurgeDuplicates();
// After the GC, sweep away all free'd Elements and compact.
LiveObjectList *prev = NULL;
LiveObjectList *next = NULL;
LiveObjectList* prev = NULL;
LiveObjectList* next = NULL;
// Iterating from the youngest lol to the oldest lol.
for (LiveObjectList *lol = last(); lol; lol = prev) {
for (LiveObjectList* lol = last(); lol; lol = prev) {
Element* elements = lol->elements_;
prev = lol->prev(); // Save the prev.
@ -2446,7 +2446,7 @@ void LiveObjectList::GCEpiloguePrivate() {
const int kMaxUnusedSpace = 64;
if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
// Shrink the list.
Element *new_elements = NewArray<Element>(new_count);
Element* new_elements = NewArray<Element>(new_count);
memcpy(new_elements, elements, new_count * sizeof(Element));
DeleteArray<Element>(elements);

10
deps/v8/src/liveobjectlist.h

@ -77,7 +77,7 @@ class LiveObjectList {
inline static void GCEpilogue();
inline static void GCPrologue();
inline static void IterateElements(ObjectVisitor* v);
inline static void ProcessNonLive(HeapObject *obj);
inline static void ProcessNonLive(HeapObject* obj);
inline static void UpdateReferencesForScavengeGC();
// Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
@ -125,7 +125,7 @@ class LiveObjectList {
static void GCEpiloguePrivate();
static void IterateElementsPrivate(ObjectVisitor* v);
static void DoProcessNonLive(HeapObject *obj);
static void DoProcessNonLive(HeapObject* obj);
static int CompareElement(const Element* a, const Element* b);
@ -138,7 +138,7 @@ class LiveObjectList {
int dump_limit,
int* total_count,
LolFilter* filter,
LiveObjectSummary *summary,
LiveObjectSummary* summary,
JSFunction* arguments_function,
Handle<Object> error);
@ -151,7 +151,7 @@ class LiveObjectList {
bool is_tracking_roots);
static bool NeedLOLProcessing() { return (last() != NULL); }
static void NullifyNonLivePointer(HeapObject **p) {
static void NullifyNonLivePointer(HeapObject** p) {
// Mask out the low bit that marks this as a heap object. We'll use this
// cleared bit as an indicator that this pointer needs to be collected.
//
@ -202,7 +202,7 @@ class LiveObjectList {
int id_;
int capacity_;
int obj_count_;
Element *elements_;
Element* elements_;
// Statics for managing all the lists.
static uint32_t next_element_id_;

6
deps/v8/src/log.cc

@ -1615,7 +1615,7 @@ void Logger::LogAccessorCallbacks() {
}
bool Logger::Setup() {
bool Logger::SetUp() {
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
is_initialized_ = true;
@ -1708,9 +1708,9 @@ FILE* Logger::TearDown() {
void Logger::EnableSlidingStateWindow() {
// If the ticker is NULL, Logger::Setup has not been called yet. In
// If the ticker is NULL, Logger::SetUp has not been called yet. In
// that case, we set the sliding_state_window flag so that the
// sliding window computation will be started when Logger::Setup is
// sliding window computation will be started when Logger::SetUp is
// called.
if (ticker_ == NULL) {
FLAG_sliding_state_window = true;

6
deps/v8/src/log.h

@ -150,14 +150,14 @@ class Logger {
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set.
bool Setup();
bool SetUp();
void EnsureTickerStarted();
void EnsureTickerStopped();
Sampler* sampler();
// Frees resources acquired in Setup.
// Frees resources acquired in SetUp.
// When a temporary file is used for the log, returns its stream descriptor,
// leaving the file open.
FILE* TearDown();
@ -411,7 +411,7 @@ class Logger {
NameMap* address_to_name_map_;
// Guards against multiple calls to TearDown() that can happen in some tests.
// 'true' between Setup() and TearDown().
// 'true' between SetUp() and TearDown().
bool is_initialized_;
// Support for 'incremental addresses' in compressed logs:

1
deps/v8/src/mark-compact.cc

@ -3641,6 +3641,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
reinterpret_cast<intptr_t>(p));
}
space->MarkPageForLazySweeping(p);
continue;
}

2
deps/v8/src/mips/assembler-mips-inl.h

@ -133,7 +133,7 @@ Object* RelocInfo::target_object() {
}
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
Assembler::target_address_at(pc_)));

6
deps/v8/src/mips/assembler-mips.cc

@ -301,7 +301,7 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
own_buffer_ = false;
}
// Setup buffer pointers.
// Set up buffer pointers.
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@ -337,7 +337,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Setup code descriptor.
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
@ -1970,7 +1970,7 @@ void Assembler::GrowBuffer() {
}
CHECK_GT(desc.buffer_size, 0); // No overflow.
// Setup new buffer.
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
desc.instr_size = pc_offset();

13
deps/v8/src/mips/builtins-mips.cc

@ -339,7 +339,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
t1,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a3, t0);
// Setup return value, remove receiver from stack and return.
// Set up return value, remove receiver from stack and return.
__ mov(v0, a2);
__ Addu(sp, sp, Operand(kPointerSize));
__ Ret();
@ -382,7 +382,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, a2, t0);
// Setup return value, remove receiver and argument from stack and return.
// Set up return value, remove receiver and argument from stack and return.
__ mov(v0, a3);
__ Addu(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@ -981,10 +981,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[4]: number of arguments (smi-tagged)
__ lw(a3, MemOperand(sp, 4 * kPointerSize));
// Setup pointer to last argument.
// Set up pointer to last argument.
__ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Setup number of arguments for function call below.
// Set up number of arguments for function call below.
__ srl(a0, a3, kSmiTagSize);
// Copy arguments and receiver to the expression stack.
@ -1114,10 +1114,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Set up the context from the function argument.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
__ li(s6, Operand(roots_array_start));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
__ Push(a1, a2);

37
deps/v8/src/mips/code-stubs-mips.cc

@ -157,13 +157,13 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Load the function from the stack.
__ lw(a3, MemOperand(sp, 0));
// Setup the object header.
// Set up the object header.
__ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
// Setup the fixed slots.
// Set up the fixed slots.
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
@ -208,7 +208,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
// Load the serialized scope info from the stack.
__ lw(a1, MemOperand(sp, 1 * kPointerSize));
// Setup the object header.
// Set up the object header.
__ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ li(a2, Operand(Smi::FromInt(length)));
@ -229,7 +229,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
// Setup the fixed slots.
// Set up the fixed slots.
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
@ -726,7 +726,7 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
// Get mantisssa[51:20].
// Get mantissa[51:20].
// Get the position of the first set bit.
__ clz(dst1, int_scratch);
@ -971,7 +971,7 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// non zero bits left. So we need the (30 - exponent) last bits of the
// 31 higher bits of the mantissa to be null.
// Because bits [21:0] are null, we can check instead that the
// (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
// (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
@ -4005,7 +4005,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles_);
// Setup argc and the builtin function in callee-saved registers.
// Set up argc and the builtin function in callee-saved registers.
__ mov(s0, a0);
__ mov(s2, a1);
@ -4097,7 +4097,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
// Setup frame pointer for the frame to be pushed.
// Set up frame pointer for the frame to be pushed.
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
@ -4584,7 +4584,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ sw(a3, FieldMemOperand(v0, i));
}
// Setup the callee in-object property.
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
const int kCalleeOffset = JSObject::kHeaderSize +
@ -4597,7 +4597,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Heap::kArgumentsLengthIndex * kPointerSize;
__ sw(a2, FieldMemOperand(v0, kLengthOffset));
// Setup the elements pointer in the allocated arguments object.
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
@ -4699,7 +4699,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ Ret();
// Do the runtime call to allocate the arguments object.
// a2 = argument count (taggged)
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@ -4774,7 +4774,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the parameters pointer from the stack.
__ lw(a2, MemOperand(sp, 1 * kPointerSize));
// Setup the elements pointer in the allocated arguments object and
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
@ -4786,7 +4786,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Copy the fixed array slots.
Label loop;
// Setup t0 to point to the first array slot.
// Set up t0 to point to the first array slot.
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement a2 with kPointerSize on each iteration.
@ -5425,7 +5425,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// of the original receiver from the call site).
__ bind(&non_function);
__ sw(a1, MemOperand(sp, argc_ * kPointerSize));
__ li(a0, Operand(argc_)); // Setup the number of arguments.
__ li(a0, Operand(argc_)); // Set up the number of arguments.
__ mov(a2, zero_reg);
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(t1, CALL_AS_METHOD);
@ -5927,7 +5927,7 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character) {
// hash = seed + character + ((seed + character) << 10);
__ LoadRoot(hash, Heap::kStringHashSeedRootIndex);
__ LoadRoot(hash, Heap::kHashSeedRootIndex);
// Untag smi seed and add the character.
__ SmiUntag(hash);
__ addu(hash, hash, character);
@ -5954,7 +5954,7 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash) {
Register hash) {
// hash += hash << 3;
__ sll(at, hash, 3);
__ addu(hash, hash, at);
@ -5965,12 +5965,11 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
__ sll(at, hash, 15);
__ addu(hash, hash, at);
uint32_t kHashShiftCutOffMask = (1 << (32 - String::kHashShift)) - 1;
__ li(at, Operand(kHashShiftCutOffMask));
__ li(at, Operand(String::kHashBitMask));
__ and_(hash, hash, at);
// if (hash == 0) hash = 27;
__ ori(at, zero_reg, 27);
__ ori(at, zero_reg, StringHasher::kZeroHash);
__ movz(hash, at, hash);
}

4
deps/v8/src/mips/constants-mips.h

@ -125,7 +125,7 @@ class Registers {
struct RegisterAlias {
int reg;
const char *name;
const char* name;
};
static const int32_t kMaxValue = 0x7fffffff;
@ -147,7 +147,7 @@ class FPURegisters {
struct RegisterAlias {
int creg;
const char *name;
const char* name;
};
private:

2
deps/v8/src/mips/cpu-mips.cc

@ -47,7 +47,7 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
void CPU::SetUp() {
CpuFeatures::Probe();
}

7
deps/v8/src/mips/deoptimizer-mips.cc

@ -326,7 +326,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
// Set up the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
@ -733,10 +733,7 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
// Set up the roots register.
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate);
__ li(roots, Operand(roots_array_start));
__ InitializeRootRegister();
__ pop(at); // Get continuation, leave pc on stack.
__ pop(ra);

2
deps/v8/src/mips/full-codegen-mips.cc

@ -1017,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
__ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Setup the four remaining stack slots.
// Set up the four remaining stack slots.
__ push(v0); // Map.
__ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
__ li(a0, Operand(Smi::FromInt(0)));

25
deps/v8/src/mips/lithium-codegen-mips.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -2794,7 +2794,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(at);
// Setup deoptimization.
// Set up deoptimization.
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
// Restore context.
@ -3095,6 +3095,27 @@ void LCodeGen::DoPower(LPower* instr) {
}
void LCodeGen::DoRandom(LRandom* instr) {
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(f0));
ASSERT(ToRegister(instr->InputAt(0)).is(a0));
__ PrepareCallCFunction(1, a1);
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
__ li(a2, Operand(0x41300000));
// Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
__ Move(f12, v0, a2);
// Move 0x4130000000000000 to FPU.
__ Move(f14, zero_reg, a2);
// Subtract to get the result.
__ sub_d(f0, f12, f14);
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,

8
deps/v8/src/mips/lithium-codegen-mips.h

@ -148,9 +148,9 @@ class LCodeGen BASE_EMBEDDED {
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return lithiumScratchReg; }
Register scratch1() { return lithiumScratchReg2; }
DoubleRegister double_scratch0() { return lithiumScratchDouble; }
Register scratch0() { return kLithiumScratchReg; }
Register scratch1() { return kLithiumScratchReg2; }
DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -423,7 +423,7 @@ class LDeferredCode: public ZoneObject {
virtual void Generate() = 0;
virtual LInstruction* instr() = 0;
void SetExit(Label *exit) { external_exit_ = exit; }
void SetExit(Label* exit) { external_exit_ = exit; }
Label* entry() { return &entry_; }
Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
int instruction_index() const { return instruction_index_; }

4
deps/v8/src/mips/lithium-gap-resolver-mips.cc

@ -33,8 +33,8 @@
namespace v8 {
namespace internal {
static const Register kSavedValueRegister = lithiumScratchReg;
static const DoubleRegister kSavedDoubleValueRegister = lithiumScratchDouble;
static const Register kSavedValueRegister = kLithiumScratchReg;
static const DoubleRegister kSavedDoubleValueRegister = kLithiumScratchDouble;
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner),

40
deps/v8/src/mips/lithium-mips.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -1038,14 +1038,23 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
HValue* value = instr->value();
if (value->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
}
return AssignEnvironment(new LBranch(UseRegister(v)));
LBranch* result = new LBranch(UseRegister(value));
// Tagged values that are not known smis or booleans require a
// deoptimization environment.
Representation rep = value->representation();
HType type = value->type();
if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
return AssignEnvironment(result);
}
return result;
}
@ -1345,7 +1354,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
} else {
left = UseRegisterAtStart(instr->LeastConstantOperand());
}
return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
LMulI* mul = new LMulI(left, right, temp);
if (instr->CheckFlag(HValue::kCanOverflow) ||
instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
AssignEnvironment(mul);
}
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
@ -1414,6 +1428,15 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
}
LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->global_object()->representation().IsTagged());
LOperand* global_object = UseFixed(instr->global_object(), a0);
LRandom* result = new LRandom(global_object);
return MarkAsCall(DefineFixedDouble(result, f0), instr);
}
LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Representation r = instr->GetInputRepresentation();
ASSERT(instr->left()->representation().IsTagged());
@ -1558,7 +1581,7 @@ LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineAsRegister(result));
return DefineAsRegister(result);
}
@ -1877,7 +1900,8 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineAsRegister(result));
if (instr->RequiresHoleCheck()) AssignEnvironment(result);
return DefineAsRegister(result);
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save