diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 602ad80488..cdadfba3a2 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,18 @@ +2010-07-14: Version 2.2.24 + + Added API for capturing stack traces for uncaught exceptions. + + Fixed crash bug when preparsing from a non-external V8 string + (issue 775). + + Fixed JSON.parse bug causing input not to be converted to string + (issue 764). + + Added ES5 Object.freeze and Object.isFrozen. + + Performance improvements on all platforms. + + 2010-07-07: Version 2.2.23 API change: Convert Unicode code points outside the basic multilingual @@ -11,6 +26,7 @@ Performance improvements on all platforms. + 2010-07-05: Version 2.2.22 Added ES5 Object.isExtensible and Object.preventExtensions. diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ca4a247fe8..9e4cebb73a 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -693,6 +693,13 @@ class V8EXPORT Message { */ Handle GetScriptData() const; + /** + * Exception stack trace. By default stack traces are not captured for + * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows + * to change this option. + */ + Handle GetStackTrace() const; + /** * Returns the number, 1-based, of the line where the error occurred. */ @@ -2458,6 +2465,15 @@ class V8EXPORT V8 { */ static void RemoveMessageListeners(MessageCallback that); + /** + * Tells V8 to capture current stack trace when uncaught exception occurs + * and report it to the message listeners. The option is off by default. + */ + static void SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit = 10, + StackTrace::StackTraceOptions options = StackTrace::kOverview); + /** * Sets V8 flags from a string. */ diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index e41db94730..9fbfe56dae 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -549,7 +549,7 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) { if (frame->function() != *function) continue; // If there is an arguments variable in the stack, we return that. - int index = ScopeInfo<>::StackSlotIndex(frame->code(), + int index = ScopeInfo<>::StackSlotIndex(function->shared()->scope_info(), Heap::arguments_symbol()); if (index >= 0) { Handle arguments = Handle(frame->GetExpression(index)); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 0f64dd45ec..07d9eb0ac7 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1438,6 +1438,22 @@ v8::Handle Message::GetScriptData() const { } +v8::Handle Message::GetStackTrace() const { + if (IsDeadCheck("v8::Message::GetStackTrace()")) { + return Local(); + } + ENTER_V8; + HandleScope scope; + i::Handle obj = + i::Handle::cast(Utils::OpenHandle(this)); + i::Handle stackFramesObj = GetProperty(obj, "stackFrames"); + if (!stackFramesObj->IsJSArray()) return v8::Handle(); + i::Handle stackTrace = + i::Handle::cast(stackFramesObj); + return scope.Close(Utils::StackTraceToLocal(stackTrace)); +} + + static i::Handle CallV8HeapFunction(const char* name, i::Handle recv, int argc, @@ -1583,7 +1599,9 @@ Local StackTrace::CurrentStackTrace(int frame_limit, StackTraceOptions options) { if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local(); ENTER_V8; - return i::Top::CaptureCurrentStackTrace(frame_limit, options); + i::Handle stackTrace = + i::Top::CaptureCurrentStackTrace(frame_limit, options); + return Utils::StackTraceToLocal(stackTrace); } @@ -3782,6 +3800,17 @@ void V8::RemoveMessageListeners(MessageCallback that) { } +void V8::SetCaptureStackTraceForUncaughtExceptions( + bool capture, + int frame_limit, + StackTrace::StackTraceOptions options) { + i::Top::SetCaptureStackTraceForUncaughtExceptions( + capture, + frame_limit, + options); +} + + void V8::SetCounterFunction(CounterLookupCallback callback) { if (IsDeadCheck("v8::V8::SetCounterFunction()")) return; i::StatsTable::SetCounterFunction(callback); diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index f5ff43a656..0dc6b77106 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1801,11 +1801,119 @@ void Assembler::vstr(const DwVfpRegister src, } +static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { + uint64_t i; + memcpy(&i, &d, 8); + + *lo = i & 0xffffffff; + *hi = i >> 32; +} + +// Only works for little endian floating point formats. +// We don't support VFP on the mixed endian floating point platform. +static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + + // VMOV can accept an immediate of the form: + // + // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 + // + // The immediate is encoded using an 8-bit quantity, comprised of two + // 4-bit fields. For an 8-bit immediate of the form: + // + // [abcdefgh] + // + // where a is the MSB and h is the LSB, an immediate 64-bit double can be + // created of the form: + // + // [aBbbbbbb,bbcdefgh,00000000,00000000, + // 00000000,00000000,00000000,00000000] + // + // where B = ~b. + // + + uint32_t lo, hi; + DoubleAsTwoUInt32(d, &lo, &hi); + + // The most obvious constraint is the long block of zeroes. + if ((lo != 0) || ((hi & 0xffff) != 0)) { + return false; + } + + // Bits 62:55 must be all clear or all set. + if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) { + return false; + } + + // Bit 63 must be NOT bit 62. + if (((hi ^ (hi << 1)) & (0x40000000)) == 0) { + return false; + } + + // Create the encoded immediate in the form: + // [00000000,0000abcd,00000000,0000efgh] + *encoding = (hi >> 16) & 0xf; // Low nybble. + *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble. + *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble. + + return true; +} + + +void Assembler::vmov(const DwVfpRegister dst, + double imm, + const Condition cond) { + // Dd = immediate + // Instruction details available in ARM DDI 0406B, A8-640. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + + uint32_t enc; + if (FitsVMOVDoubleImmediate(imm, &enc)) { + // The double can be encoded in the instruction. + emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc); + } else { + // Synthesise the double from ARM immediates. This could be implemented + // using vldr from a constant pool. + uint32_t lo, hi; + DoubleAsTwoUInt32(imm, &lo, &hi); + + if (lo == hi) { + // If the lo and hi parts of the double are equal, the literal is easier + // to create. This is the case with 0.0. + mov(ip, Operand(lo)); + vmov(dst, ip, ip); + } else { + // Move the low part of the double into the lower of the corresponsing S + // registers of D register dst. + mov(ip, Operand(lo)); + vmov(dst.low(), ip, cond); + + // Move the high part of the double into the higher of the corresponsing S + // registers of D register dst. + mov(ip, Operand(hi)); + vmov(dst.high(), ip, cond); + } + } +} + + +void Assembler::vmov(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { + // Sd = Sm + // Instruction details available in ARM DDI 0406B, A8-642. + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0xB*B20 | + dst.code()*B12 | 0x5*B9 | B6 | src.code()); +} + + void Assembler::vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Dd = Dm // Instruction details available in ARM DDI 0406B, A8-642. + ASSERT(CpuFeatures::IsEnabled(VFP3)); emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code()); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 6a4fb23e85..226fb87403 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -130,6 +130,20 @@ struct DwVfpRegister { // Supporting d0 to d15, can be later extended to d31. bool is_valid() const { return 0 <= code_ && code_ < 16; } bool is(DwVfpRegister reg) const { return code_ == reg.code_; } + SwVfpRegister low() const { + SwVfpRegister reg; + reg.code_ = code_ * 2; + + ASSERT(reg.is_valid()); + return reg; + } + SwVfpRegister high() const { + SwVfpRegister reg; + reg.code_ = (code_ * 2) + 1; + + ASSERT(reg.is_valid()); + return reg; + } int code() const { ASSERT(is_valid()); return code_; @@ -931,6 +945,12 @@ class Assembler : public Malloced { int offset, // Offset must be a multiple of 4. const Condition cond = al); + void vmov(const DwVfpRegister dst, + double imm, + const Condition cond = al); + void vmov(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); void vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond = al); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index fa6efcd3c1..6e0604bc6e 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -4343,9 +4343,7 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) { __ bind(&powi); // Load 1.0 into d0. - __ mov(scratch2, Operand(0x3ff00000)); - __ mov(scratch1, Operand(0)); - __ vmov(d0, scratch1, scratch2); + __ vmov(d0, 1.0); // Get the absolute untagged value of the exponent and use that for the // calculation. @@ -4405,9 +4403,7 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) { AVOID_NANS_AND_INFINITIES); // Load 1.0 into d2. - __ mov(scratch2, Operand(0x3ff00000)); - __ mov(scratch1, Operand(0)); - __ vmov(d2, scratch1, scratch2); + __ vmov(d2, 1.0); // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x). __ vdiv(d0, d2, d0); @@ -4874,12 +4870,8 @@ void CodeGenerator::GenerateRandomHeapNumber( __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r4, Operand(r0)); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 002e4c1368..3df7b4e08e 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -37,6 +37,26 @@ namespace arm { namespace v8i = v8::internal; +double Instr::DoubleImmedVmov() const { + // Reconstruct a double from the immediate encoded in the vmov instruction. + // + // instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh] + // double: [aBbbbbbb,bbcdefgh,00000000,00000000, + // 00000000,00000000,00000000,00000000] + // + // where B = ~b. Only the high 16 bits are affected. + uint64_t high16; + high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh. + high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx. + high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx. + high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx. + + uint64_t imm = high16 << 48; + double d; + memcpy(&d, &imm, 8); + return d; +} + // These register names are defined in a way to match the native disassembler // formatting. See for example the command "objdump -d ". diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index fa9adbd704..2ac9a41326 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -333,6 +333,9 @@ class Instr { inline bool HasH() const { return HField() == 1; } inline bool HasLink() const { return LinkField() == 1; } + // Decoding the double immediate in the vmov instruction. + double DoubleImmedVmov() const; + // Instructions are read of out a code stream. The only way to get a // reference to an instruction is to convert a pointer. There is no way // to allocate or create instances of class Instr. diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index a52417beef..37401ed28f 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -412,6 +412,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) { PrintCondition(instr); return 4; } + case 'd': { // 'd: vmov double immediate. + double d = instr->DoubleImmedVmov(); + out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, + "#%g", d); + return 1; + } case 'f': { // 'f: bitfield instructions - v7 and above. uint32_t lsbit = instr->Bits(11, 7); uint32_t width = instr->Bits(20, 16) + 1; @@ -1052,7 +1058,7 @@ void Decoder::DecodeTypeVFP(Instr* instr) { if (instr->SzField() == 0x1) { Format(instr, "vmov.f64'cond 'Dd, 'Dm"); } else { - Unknown(instr); // Not used by V8. + Format(instr, "vmov.f32'cond 'Sd, 'Sm"); } } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -1066,6 +1072,12 @@ void Decoder::DecodeTypeVFP(Instr* instr) { DecodeVCMP(instr); } else if (((instr->Opc2Field() == 0x1)) && (instr->Opc3Field() == 0x3)) { Format(instr, "vsqrt.f64'cond 'Dd, 'Dm"); + } else if (instr->Opc3Field() == 0x0) { + if (instr->SzField() == 0x1) { + Format(instr, "vmov.f64'cond 'Dd, 'd"); + } else { + Unknown(instr); // Not used by V8. + } } else { Unknown(instr); // Not used by V8. } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 080cb83392..3fb946aa65 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -2161,12 +2161,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList* args) { __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); - // To allocate a heap number, and ensure that it is not a smi, we - // call the runtime function FUnaryMinus on 0, returning the double - // -0.0. A new, distinct heap number is returned each time. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ CallRuntime(Runtime::kNumberUnaryMinus, 1); + // Allocate a heap number. + __ CallRuntime(Runtime::kNumberAlloc, 0); __ mov(r4, Operand(r0)); __ bind(&heapnumber_allocated); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 2896cc96e7..f251b31f0b 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -873,88 +873,6 @@ void MacroAssembler::PopTryHandler() { } -Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, - JSObject* holder, Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss) { - // Make sure there's no overlap between scratch and the other - // registers. - ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); - - // Keep track of the current object in register reg. - Register reg = object_reg; - int depth = 0; - - if (save_at_depth == depth) { - str(reg, MemOperand(sp)); - } - - // Check the maps in the prototype chain. - // Traverse the prototype chain from the object and do map checks. - while (object != holder) { - depth++; - - // Only global objects and objects that do not require access - // checks are allowed in stubs. - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - - // Get the map of the current object. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - cmp(scratch, Operand(Handle(object->map()))); - - // Branch on the result of the map check. - b(ne, miss); - - // Check access rights to the global object. This has to happen - // after the map check so that we know that the object is - // actually a global object. - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - // Restore scratch register to be the map of the object. In the - // new space case below, we load the prototype from the map in - // the scratch register. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - } - - reg = holder_reg; // from now the object is in holder_reg - JSObject* prototype = JSObject::cast(object->GetPrototype()); - if (Heap::InNewSpace(prototype)) { - // The prototype is in new space; we cannot store a reference - // to it in the code. Load it from the map. - ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset)); - } else { - // The prototype is in old space; load it directly. - mov(reg, Operand(Handle(prototype))); - } - - if (save_at_depth == depth) { - str(reg, MemOperand(sp)); - } - - // Go to the next object in the prototype chain. - object = prototype; - } - - // Check the holder map. - ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); - cmp(scratch, Operand(Handle(object->map()))); - b(ne, miss); - - // Log the check depth. - LOG(IntEvent("check-maps-depth", depth + 1)); - - // Perform security check for access to the global object and return - // the holder register. - ASSERT(object == holder); - ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - if (object->IsJSGlobalProxy()) { - CheckAccessGlobalProxy(reg, scratch, miss); - } - return reg; -} - - void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss) { diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index f1f7de7fe4..156e132698 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -316,24 +316,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Inline caching support - // Generates code that verifies that the maps of objects in the - // prototype chain of object hasn't changed since the code was - // generated and branches to the miss label if any map has. If - // necessary the function also generates code for security check - // in case of global object holders. The scratch and holder - // registers are always clobbered, but the object register is only - // clobbered if it the same as the holder register. The function - // returns a register containing the holder - either object_reg or - // holder_reg. - // The function can optionally (when save_at_depth != - // kInvalidProtoDepth) save the object at the given depth by moving - // it to [sp]. - Register CheckMaps(JSObject* object, Register object_reg, - JSObject* holder, Register holder_reg, - Register scratch, - int save_at_depth, - Label* miss); - // Generate code for checking access rights - used for security checks // on access to global objects across environments. The holder register // is left untouched, whereas both scratch registers are clobbered. diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index e8910f4860..c67c7aacaa 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -799,7 +799,6 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { CodeDesc code_desc; masm_->GetCode(&code_desc); Handle code = Factory::NewCode(code_desc, - NULL, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject()); PROFILE(RegExpCodeCreateEvent(*code, *source)); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index f09ce0035f..3345e4559b 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -2281,7 +2281,7 @@ void Simulator::DecodeTypeVFP(Instr* instr) { if (instr->SzField() == 0x1) { set_d_register_from_double(vd, get_double_from_d_register(vm)); } else { - UNREACHABLE(); // Not used by V8. + set_s_register_from_float(vd, get_float_from_s_register(vm)); } } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -2298,6 +2298,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = sqrt(dm_value); set_d_register_from_double(vd, dd_value); + } else if (instr->Opc3Field() == 0x0) { + // vmov immediate. + if (instr->SzField() == 0x1) { + set_d_register_from_double(vd, instr->DoubleImmedVmov()); + } else { + UNREACHABLE(); // Not used by v8. + } } else { UNREACHABLE(); // Not used by V8. } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 0e649ccd13..a0b6bdb413 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -83,6 +83,112 @@ static void ProbeTable(MacroAssembler* masm, } +// Helper function used to check that the dictionary doesn't contain +// the property. This function may return false negatives, so miss_label +// must always call a backup property check that is complete. +// This function is safe to call if the receiver has fast properties. +// Name must be a symbol and receiver must be a heap object. +static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, + Label* miss_label, + Register receiver, + String* name, + Register scratch0, + Register scratch1) { + ASSERT(name->IsSymbol()); + __ IncrementCounter(&Counters::negative_lookups, 1, scratch0, scratch1); + __ IncrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); + + Label done; + + const int kInterceptorOrAccessCheckNeededMask = + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); + + // Bail out if the receiver has a named interceptor or requires access checks. + Register map = scratch1; + __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); + __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); + __ b(ne, miss_label); + + // Check that receiver is a JSObject. + __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(lt, miss_label); + + // Load properties array. + Register properties = scratch0; + __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + // Check that the properties array is a dictionary. + __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); + Register tmp = properties; + __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); + __ cmp(map, tmp); + __ b(ne, miss_label); + + // Restore the temporarily used register. + __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + + // Compute the capacity mask. + const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + + // Generate an unrolled loop that performs a few probes before + // giving up. + static const int kProbes = 4; + const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + + // If names of slots in range from 1 to kProbes - 1 for the hash value are + // not equal to the name and kProbes-th slot is not used (its name is the + // undefined value), it guarantees the hash table doesn't contain the + // property. It's true even if some slots represent deleted properties + // (their names are the null value). + for (int i = 0; i < kProbes; i++) { + // scratch0 points to properties hash. + // Compute the masked index: (hash + i + i * i) & mask. + Register index = scratch1; + // Capacity is smi 2^n. + __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); + __ sub(index, index, Operand(1)); + __ and_(index, index, Operand( + Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ add(index, index, Operand(index, LSL, 1)); // index *= 3. + + Register entity_name = scratch1; + // Having undefined at this place means the name is not contained. + ASSERT_EQ(kSmiTagSize, 1); + Register tmp = properties; + __ add(tmp, properties, Operand(index, LSL, 1)); + __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); + + ASSERT(!tmp.is(entity_name)); + __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); + __ cmp(entity_name, tmp); + if (i != kProbes - 1) { + __ b(eq, &done); + + // Stop if found the property. + __ cmp(entity_name, Operand(Handle(name))); + __ b(eq, miss_label); + + // Restore the properties. + __ ldr(properties, + FieldMemOperand(receiver, JSObject::kPropertiesOffset)); + } else { + // Give up probing if still not found the undefined value. + __ b(ne, miss_label); + } + } + __ bind(&done); + __ DecrementCounter(&Counters::negative_lookups_miss, 1, scratch0, scratch1); +} + + void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, @@ -517,6 +623,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, Label* miss) { ASSERT(holder->HasNamedInterceptor()); ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); @@ -532,6 +639,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, holder, lookup, name, @@ -543,6 +651,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { receiver, scratch1, scratch2, + scratch3, name, holder, miss); @@ -555,6 +664,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, JSObject* interceptor_holder, LookupResult* lookup, String* name, @@ -596,7 +706,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, scratch1, - scratch2, name, depth1, miss); + scratch2, scratch3, name, depth1, miss); // Invoke an interceptor and if it provides a value, // branch to |regular_invoke|. @@ -612,7 +722,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { if (interceptor_holder != lookup->holder()) { stub_compiler_->CheckPrototypes(interceptor_holder, receiver, lookup->holder(), scratch1, - scratch2, name, depth2, miss); + scratch2, scratch3, name, depth2, miss); } else { // CheckPrototypes has a side effect of fetching a 'holder' // for API (object which is instanceof for the signature). It's @@ -648,12 +758,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, + Register scratch3, String* name, JSObject* interceptor_holder, Label* miss_label) { Register holder = stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, + scratch1, scratch2, scratch3, name, miss_label); // Call a runtime function to load the interceptor property. @@ -738,36 +849,134 @@ Register StubCompiler::CheckPrototypes(JSObject* object, Register object_reg, JSObject* holder, Register holder_reg, - Register scratch, + Register scratch1, + Register scratch2, String* name, int save_at_depth, - Label* miss, - Register extra) { - // Check that the maps haven't changed. - Register result = - masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, - save_at_depth, miss); + Label* miss) { + // Make sure there's no overlap between holder and object registers. + ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); + ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) + && !scratch2.is(scratch1)); + + // Keep track of the current object in register reg. + Register reg = object_reg; + int depth = 0; + + if (save_at_depth == depth) { + __ str(reg, MemOperand(sp)); + } + + // Check the maps in the prototype chain. + // Traverse the prototype chain from the object and do map checks. + JSObject* current = object; + while (current != holder) { + depth++; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + + JSObject* prototype = JSObject::cast(current->GetPrototype()); + if (!current->HasFastProperties() && + !current->IsJSGlobalObject() && + !current->IsJSGlobalProxy()) { + if (!name->IsSymbol()) { + Object* lookup_result = Heap::LookupSymbol(name); + if (lookup_result->IsFailure()) { + set_failure(Failure::cast(lookup_result)); + return reg; + } else { + name = String::cast(lookup_result); + } + } + ASSERT(current->property_dictionary()->FindEntry(name) == + StringDictionary::kNotFound); + + GenerateDictionaryNegativeLookup(masm(), + miss, + reg, + name, + scratch1, + scratch2); + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + reg = holder_reg; // from now the object is in holder_reg + __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // Get the map of the current object. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ cmp(scratch1, Operand(Handle(current->map()))); + + // Branch on the result of the map check. + __ b(ne, miss); + + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + // Restore scratch register to be the map of the object. In the + // new space case below, we load the prototype from the map in + // the scratch register. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + } + + reg = holder_reg; // from now the object is in holder_reg + if (Heap::InNewSpace(prototype)) { + // The prototype is in new space; we cannot store a reference + // to it in the code. Load it from the map. + __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + } else { + // The prototype is in old space; load it directly. + __ mov(reg, Operand(Handle(prototype))); + } + } + + if (save_at_depth == depth) { + __ str(reg, MemOperand(sp)); + } + + // Go to the next object in the prototype chain. + current = prototype; + } + + // Check the holder map. + __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); + __ cmp(scratch1, Operand(Handle(current->map()))); + __ b(ne, miss); + + // Log the check depth. + LOG(IntEvent("check-maps-depth", depth + 1)); + + // Perform security check for access to the global object and return + // the holder register. + ASSERT(current == holder); + ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); + if (current->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(reg, scratch1, miss); + } // If we've skipped any global objects, it's not enough to verify // that their maps haven't changed. We also need to check that the // property cell for the property is still empty. - while (object != holder) { - if (object->IsGlobalObject()) { + current = object; + while (current != holder) { + if (current->IsGlobalObject()) { Object* cell = GenerateCheckPropertyCell(masm(), - GlobalObject::cast(object), + GlobalObject::cast(current), name, - scratch, + scratch1, miss); if (cell->IsFailure()) { set_failure(Failure::cast(cell)); - return result; + return reg; } } - object = JSObject::cast(object->GetPrototype()); + current = JSObject::cast(current->GetPrototype()); } // Return the register containing the holder. - return result; + return reg; } @@ -776,6 +985,7 @@ void StubCompiler::GenerateLoadField(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, int index, String* name, Label* miss) { @@ -785,7 +995,8 @@ void StubCompiler::GenerateLoadField(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, + name, miss); GenerateFastPropertyLoad(masm(), r0, reg, holder, index); __ Ret(); } @@ -796,6 +1007,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, Register receiver, Register scratch1, Register scratch2, + Register scratch3, Object* value, String* name, Label* miss) { @@ -805,7 +1017,8 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, scratch3, name, miss); // Return the constant value. __ mov(r0, Operand(Handle(value))); @@ -819,6 +1032,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, AccessorInfo* callback, String* name, Label* miss, @@ -829,7 +1043,8 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object, // Check that the maps haven't changed. Register reg = - CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, + name, miss); // Push the arguments on the JS stack of the caller. __ push(receiver); // Receiver. @@ -854,6 +1069,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, Register name_reg, Register scratch1, Register scratch2, + Register scratch3, String* name, Label* miss) { ASSERT(interceptor_holder->HasNamedInterceptor()); @@ -881,7 +1097,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); // Save necessary data before invoking an interceptor. @@ -930,6 +1147,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, lookup->holder(), scratch1, scratch2, + scratch3, name, miss); } @@ -975,7 +1193,8 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, // Call the runtime system to load the interceptor. // Check that the maps haven't changed. Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, name, miss); + scratch1, scratch2, scratch3, + name, miss); PushInterceptorArguments(masm(), receiver, holder_reg, name_reg, interceptor_holder); @@ -1053,7 +1272,7 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, __ b(eq, &miss); // Do the right check and compute the holder register. - Register reg = CheckPrototypes(object, r0, holder, r1, r3, name, &miss); + Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss); GenerateFastPropertyLoad(masm(), r1, reg, holder, index); GenerateCallFunction(masm(), object, arguments(), &miss); @@ -1098,7 +1317,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, __ b(eq, &miss); // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); if (object->IsGlobalObject()) { __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); @@ -1149,7 +1368,7 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, __ b(eq, &miss); // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, name, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); if (object->IsGlobalObject()) { __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); @@ -1246,7 +1465,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, } // Check that the maps haven't changed. - CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, name, + CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, depth, &miss); // Patch the receiver on the stack with the global proxy if @@ -1270,7 +1489,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::STRING_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; @@ -1290,7 +1509,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::NUMBER_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; } @@ -1313,7 +1532,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateDirectLoadGlobalFunctionPrototype( masm(), Context::BOOLEAN_FUNCTION_INDEX, r0); CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3, - r1, name, &miss); + r1, r4, name, &miss); } break; } @@ -1372,6 +1591,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object, r1, r3, r4, + r0, &miss); // Move returned value, the function to call, to r1. @@ -1418,7 +1638,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, } // Check that the maps haven't changed. - CheckPrototypes(object, r0, holder, r3, r1, name, &miss); + CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss); // Get the value from the cell. __ mov(r3, Operand(Handle(cell))); @@ -1642,7 +1862,7 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name, __ b(eq, &miss); // Check the maps of the full prototype chain. - CheckPrototypes(object, r0, last, r3, r1, name, &miss); + CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss); // If the last object in the prototype chain is a global object, // check that the global property cell is empty. @@ -1679,7 +1899,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss); + GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1700,7 +1920,7 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name, Label miss; Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, + bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name, &miss, &failure); if (!success) return failure; @@ -1723,7 +1943,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, // ----------------------------------- Label miss; - GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss); + GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1751,6 +1971,7 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, r2, r3, r1, + r4, name, &miss); __ bind(&miss); @@ -1782,7 +2003,7 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } // Check that the map of the global has not changed. - CheckPrototypes(object, r0, holder, r3, r4, name, &miss); + CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss); // Get the value from the cell. __ mov(r3, Operand(Handle(cell))); @@ -1823,7 +2044,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss); + GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1847,7 +2068,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ b(ne, &miss); Failure* failure = Failure::InternalError(); - bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, + bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name, &miss, &failure); if (!success) return failure; @@ -1873,7 +2094,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(r0, Operand(Handle(name))); __ b(ne, &miss); - GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss); + GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1905,6 +2126,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, r0, r2, r3, + r4, name, &miss); __ bind(&miss); diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index bbd69ecaba..e1d4489d44 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -812,6 +812,9 @@ void Genesis::InitializeGlobal(Handle inner_global, initial_map->set_instance_size( initial_map->instance_size() + 5 * kPointerSize); initial_map->set_instance_descriptors(*descriptors); + initial_map->set_scavenger( + Heap::GetScavenger(initial_map->instance_type(), + initial_map->instance_size())); } { // -- J S O N diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index ad52ea18b8..3a0393efbc 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -1475,7 +1475,7 @@ void Builtins::Setup(bool create_heap_objects) { // During startup it's OK to always allocate and defer GC to later. // This simplifies things because we don't need to retry. AlwaysAllocateScope __scope__; - code = Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); + code = Heap::CreateCode(desc, flags, masm.CodeObject()); if (code->IsFailure()) { v8::internal::V8::FatalProcessOutOfMemory("CreateCode"); } diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 9d5969bb46..78062b4036 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -102,8 +102,7 @@ Handle CodeStub::GetCode() { static_cast(GetCodeKind()), InLoop(), GetICState()); - Handle new_object = - Factory::NewCode(desc, NULL, flags, masm.CodeObject()); + Handle new_object = Factory::NewCode(desc, flags, masm.CodeObject()); RecordCodeGeneration(*new_object, &masm); if (has_custom_cache()) { @@ -140,8 +139,7 @@ Object* CodeStub::TryGetCode() { static_cast(GetCodeKind()), InLoop(), GetICState()); - Object* new_object = - Heap::CreateCode(desc, NULL, flags, masm.CodeObject()); + Object* new_object = Heap::CreateCode(desc, flags, masm.CodeObject()); if (new_object->IsFailure()) return new_object; code = Code::cast(new_object); RecordCodeGeneration(code, &masm); diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 8864c95a6f..84b73a4ecb 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -162,9 +162,7 @@ Handle CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, // Allocate and install the code. CodeDesc desc; masm->GetCode(&desc); - ZoneScopeInfo sinfo(info->scope()); - Handle code = - Factory::NewCode(desc, &sinfo, flags, masm->CodeObject()); + Handle code = Factory::NewCode(desc, flags, masm->CodeObject()); #ifdef ENABLE_DISASSEMBLER bool print_code = Bootstrapper::IsActive() diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ebb9743562..ec6b5ffb4a 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -40,6 +40,7 @@ #include "oprofile-agent.h" #include "rewriter.h" #include "scopes.h" +#include "scopeinfo.h" namespace v8 { namespace internal { @@ -156,7 +157,12 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { #ifdef ENABLE_DEBUGGER_SUPPORT Handle MakeCodeForLiveEdit(CompilationInfo* info) { Handle context = Handle::null(); - return MakeCode(context, info); + Handle code = MakeCode(context, info); + if (!info->shared_info().is_null()) { + info->shared_info()->set_scope_info( + *ScopeInfo<>::CreateHeapObject(info->scope())); + } + return code; } #endif @@ -252,9 +258,11 @@ static Handle MakeFunctionInfo(bool is_global, // Allocate function. Handle result = - Factory::NewSharedFunctionInfo(lit->name(), - lit->materialized_literal_count(), - code); + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + code, + ScopeInfo<>::CreateHeapObject(info.scope())); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); Compiler::SetFunctionInfo(result, lit, true, script); @@ -275,9 +283,6 @@ static Handle MakeFunctionInfo(bool is_global, } -static StaticResource safe_string_input_buffer; - - Handle Compiler::Compile(Handle source, Handle script_name, int line_offset, @@ -306,9 +311,7 @@ Handle Compiler::Compile(Handle source, // No cache entry found. Do pre-parsing and compile the script. ScriptDataImpl* pre_data = input_pre_data; if (pre_data == NULL && source_length >= FLAG_min_preparse_length) { - Access buf(&safe_string_input_buffer); - buf->Reset(source.location()); - pre_data = PreParse(source, buf.value(), extension); + pre_data = PreParse(source, NULL, extension); } // Create a script object describing the script to be compiled. @@ -445,8 +448,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) { info->script(), code); - // Update the shared function info with the compiled code. + // Update the shared function info with the compiled code and the scope info. shared->set_code(*code); + shared->set_scope_info(*ScopeInfo<>::CreateHeapObject(info->scope())); // Set the expected number of properties for instances. SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count()); @@ -481,6 +485,8 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, bool allow_lazy = literal->AllowsLazyCompilation() && !LiveEditFunctionTracker::IsActive(); + Handle scope_info(ScopeInfo<>::EmptyHeapObject()); + // Generate code Handle code; if (FLAG_lazy && allow_lazy) { @@ -562,13 +568,15 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, literal->start_position(), script, code); + scope_info = ScopeInfo<>::CreateHeapObject(info.scope()); } // Create a shared function info object. Handle result = Factory::NewSharedFunctionInfo(literal->name(), literal->materialized_literal_count(), - code); + code, + scope_info); SetFunctionInfo(result, literal, false, script); // Set the expected number of properties for instances and return diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 19920d22ee..1eab24c28e 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -120,9 +120,9 @@ Handle Context::Lookup(Handle name, ContextLookupFlags flags, // we have context-local slots // check non-parameter locals in context - Handle code(context->closure()->code()); + Handle scope_info(context->closure()->shared()->scope_info()); Variable::Mode mode; - int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode); + int index = ScopeInfo<>::ContextSlotIndex(*scope_info, *name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) { // slot found @@ -150,11 +150,11 @@ Handle Context::Lookup(Handle name, ContextLookupFlags flags, } // check parameter locals in context - int param_index = ScopeInfo<>::ParameterIndex(*code, *name); + int param_index = ScopeInfo<>::ParameterIndex(*scope_info, *name); if (param_index >= 0) { // slot found. int index = - ScopeInfo<>::ContextSlotIndex(*code, + ScopeInfo<>::ContextSlotIndex(*scope_info, Heap::arguments_shadow_symbol(), NULL); ASSERT(index >= 0); // arguments must exist and be in the heap context @@ -170,7 +170,7 @@ Handle Context::Lookup(Handle name, ContextLookupFlags flags, // check intermediate context (holding only the function name variable) if (follow_context_chain) { - int index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name); + int index = ScopeInfo<>::FunctionContextSlotIndex(*scope_info, *name); if (index >= 0) { // slot found if (FLAG_trace_contexts) { @@ -216,18 +216,18 @@ bool Context::GlobalIfNotShadowedByEval(Handle name) { ASSERT(context->is_function_context()); // Check non-parameter locals. - Handle code(context->closure()->code()); + Handle scope_info(context->closure()->shared()->scope_info()); Variable::Mode mode; - int index = ScopeInfo<>::ContextSlotIndex(*code, *name, &mode); + int index = ScopeInfo<>::ContextSlotIndex(*scope_info, *name, &mode); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); if (index >= 0) return false; // Check parameter locals. - int param_index = ScopeInfo<>::ParameterIndex(*code, *name); + int param_index = ScopeInfo<>::ParameterIndex(*scope_info, *name); if (param_index >= 0) return false; // Check context only holding the function name variable. - index = ScopeInfo<>::FunctionContextSlotIndex(*code, *name); + index = ScopeInfo<>::FunctionContextSlotIndex(*scope_info, *name); if (index >= 0) return false; context = Context::cast(context->closure()->context()); } diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index b8e0252aae..4922a62860 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -759,7 +759,7 @@ bool Debug::CompileDebuggerScript(int index) { if (caught_exception) { Handle message = MessageHandler::MakeMessageObject( "error_loading_debugger", NULL, Vector >::empty(), - Handle()); + Handle(), Handle()); MessageHandler::ReportMessage(NULL, message); return false; } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 39e881ac3d..18be639f39 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -277,6 +277,8 @@ Handle Factory::CopyMap(Handle src, copy->set_inobject_properties(inobject_properties); copy->set_unused_property_fields(inobject_properties); copy->set_instance_size(copy->instance_size() + instance_size_delta); + copy->set_scavenger(Heap::GetScavenger(copy->instance_type(), + copy->instance_size())); return copy; } @@ -541,10 +543,9 @@ Handle Factory::NewFunctionWithoutPrototype(Handle name, Handle Factory::NewCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle self_ref) { - CALL_HEAP_FUNCTION(Heap::CreateCode(desc, sinfo, flags, self_ref), Code); + CALL_HEAP_FUNCTION(Heap::CreateCode(desc, flags, self_ref), Code); } @@ -680,9 +681,13 @@ Handle Factory::NewJSArrayWithElements(Handle elements, Handle Factory::NewSharedFunctionInfo( - Handle name, int number_of_literals, Handle code) { + Handle name, + int number_of_literals, + Handle code, + Handle scope_info) { Handle shared = NewSharedFunctionInfo(name); shared->set_code(*code); + shared->set_scope_info(*scope_info); int literals_array_size = number_of_literals; // If the function contains object, regexp or array literals, // allocate extra space for a literals array prefix containing the diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 56deda5ab5..0576d74a6e 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -34,9 +34,6 @@ namespace v8 { namespace internal { -// Forward declarations. -class ZoneScopeInfo; - // Interface for handle based allocation. class Factory : public AllStatic { @@ -241,7 +238,6 @@ class Factory : public AllStatic { PretenureFlag pretenure = TENURED); static Handle NewCode(const CodeDesc& desc, - ZoneScopeInfo* sinfo, Code::Flags flags, Handle self_reference); @@ -352,7 +348,10 @@ class Factory : public AllStatic { } static Handle NewSharedFunctionInfo( - Handle name, int number_of_literals, Handle code); + Handle name, + int number_of_literals, + Handle code, + Handle scope_info); static Handle NewSharedFunctionInfo(Handle name); static Handle DictionaryAtNumberPut( diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 67a20d3cb8..8b601b67b7 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -532,8 +532,11 @@ void JavaScriptFrame::Print(StringStream* accumulator, if (IsConstructor()) accumulator->Add("new "); accumulator->PrintFunction(function, receiver, &code); + Handle scope_info(ScopeInfo<>::EmptyHeapObject()); + if (function->IsJSFunction()) { Handle shared(JSFunction::cast(function)->shared()); + scope_info = Handle(shared->scope_info()); Object* script_obj = shared->script(); if (script_obj->IsScript()) { Handle