Browse Source

Upgrade V8 to 2.1.7

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
b35d72df76
  1. 16
      deps/v8/ChangeLog
  2. 4
      deps/v8/src/SConscript
  3. 31
      deps/v8/src/arm/assembler-arm-inl.h
  4. 10
      deps/v8/src/arm/assembler-arm.cc
  5. 29
      deps/v8/src/arm/assembler-arm.h
  6. 5
      deps/v8/src/arm/constants-arm.h
  7. 10
      deps/v8/src/arm/debug-arm.cc
  8. 32
      deps/v8/src/arm/macro-assembler-arm.cc
  9. 2
      deps/v8/src/arm/macro-assembler-arm.h
  10. 8
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  11. 1
      deps/v8/src/arm/virtual-frame-arm.h
  12. 22
      deps/v8/src/array.js
  13. 17
      deps/v8/src/ast.h
  14. 119
      deps/v8/src/cached-powers.h
  15. 22
      deps/v8/src/checks.h
  16. 101
      deps/v8/src/circular-queue-inl.h
  17. 131
      deps/v8/src/circular-queue.cc
  18. 130
      deps/v8/src/circular-queue.h
  19. 16
      deps/v8/src/conversions.cc
  20. 2
      deps/v8/src/counters.h
  21. 50
      deps/v8/src/cpu-profiler-inl.h
  22. 201
      deps/v8/src/cpu-profiler.cc
  23. 188
      deps/v8/src/cpu-profiler.h
  24. 287
      deps/v8/src/data-flow.cc
  25. 49
      deps/v8/src/data-flow.h
  26. 58
      deps/v8/src/diy-fp.cc
  27. 117
      deps/v8/src/diy-fp.h
  28. 169
      deps/v8/src/double.h
  29. 17
      deps/v8/src/fast-codegen.cc
  30. 508
      deps/v8/src/fast-dtoa.cc
  31. 55
      deps/v8/src/fast-dtoa.h
  32. 33
      deps/v8/src/frames.cc
  33. 5
      deps/v8/src/globals.h
  34. 24
      deps/v8/src/handles.cc
  35. 2
      deps/v8/src/handles.h
  36. 40
      deps/v8/src/heap.h
  37. 138
      deps/v8/src/ia32/codegen-ia32.cc
  38. 6
      deps/v8/src/ia32/codegen-ia32.h
  39. 17
      deps/v8/src/ia32/fast-codegen-ia32.cc
  40. 10
      deps/v8/src/ia32/macro-assembler-ia32.cc
  41. 11
      deps/v8/src/ia32/macro-assembler-ia32.h
  42. 23
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  43. 3
      deps/v8/src/ia32/virtual-frame-ia32.h
  44. 214
      deps/v8/src/jsregexp.cc
  45. 31
      deps/v8/src/jsregexp.h
  46. 32
      deps/v8/src/liveedit.cc
  47. 2
      deps/v8/src/math.js
  48. 11
      deps/v8/src/objects-inl.h
  49. 3
      deps/v8/src/objects.h
  50. 4
      deps/v8/src/parser.cc
  51. 6
      deps/v8/src/platform.h
  52. 2461
      deps/v8/src/powers-ten.h
  53. 29
      deps/v8/src/profile-generator-inl.h
  54. 157
      deps/v8/src/profile-generator.cc
  55. 122
      deps/v8/src/profile-generator.h
  56. 4
      deps/v8/src/rewriter.cc
  57. 83
      deps/v8/src/runtime.cc
  58. 2
      deps/v8/src/runtime.h
  59. 4
      deps/v8/src/serialize.cc
  60. 11
      deps/v8/src/top.cc
  61. 3
      deps/v8/src/top.h
  62. 1
      deps/v8/src/utils.h
  63. 2
      deps/v8/src/v8.cc
  64. 11
      deps/v8/src/v8threads.cc
  65. 15
      deps/v8/src/v8threads.h
  66. 2
      deps/v8/src/version.cc
  67. 5
      deps/v8/src/virtual-frame-inl.h
  68. 38
      deps/v8/src/x64/assembler-x64.cc
  69. 7
      deps/v8/src/x64/assembler-x64.h
  70. 368
      deps/v8/src/x64/codegen-x64.cc
  71. 59
      deps/v8/src/x64/codegen-x64.h
  72. 4
      deps/v8/src/x64/disasm-x64.cc
  73. 119
      deps/v8/src/x64/ic-x64.cc
  74. 16
      deps/v8/src/x64/regexp-macro-assembler-x64.cc
  75. 1
      deps/v8/src/x64/virtual-frame-x64.h
  76. 6
      deps/v8/test/cctest/SConscript
  77. 100048
      deps/v8/test/cctest/gay-shortest.cc
  78. 44
      deps/v8/test/cctest/gay-shortest.h
  79. 127
      deps/v8/test/cctest/test-circular-queue.cc
  80. 202
      deps/v8/test/cctest/test-cpu-profiler.cc
  81. 67
      deps/v8/test/cctest/test-diy-fp.cc
  82. 204
      deps/v8/test/cctest/test-double.cc
  83. 116
      deps/v8/test/cctest/test-fast-dtoa.cc
  84. 97
      deps/v8/test/cctest/test-profile-generator.cc
  85. 37
      deps/v8/test/mjsunit/compiler/loopcount.js
  86. 49
      deps/v8/test/mjsunit/math-round.js
  87. 33
      deps/v8/test/mjsunit/regress/regress-646.js
  88. 286
      deps/v8/tools/generate-ten-powers.scm
  89. 11
      deps/v8/tools/gyp/v8.gyp
  90. 20
      deps/v8/tools/v8.xcodeproj/project.pbxproj
  91. 24
      deps/v8/tools/visual_studio/v8_base.vcproj
  92. 24
      deps/v8/tools/visual_studio/v8_base_arm.vcproj
  93. 24
      deps/v8/tools/visual_studio/v8_base_x64.vcproj
  94. 12
      deps/v8/tools/visual_studio/v8_cctest.vcproj
  95. 12
      deps/v8/tools/visual_studio/v8_cctest_arm.vcproj
  96. 12
      deps/v8/tools/visual_studio/v8_cctest_x64.vcproj

16
deps/v8/ChangeLog

@ -1,9 +1,23 @@
2010-03-17: Version 2.1.5
2010-03-22: Version 2.1.7
Fixed issue 650.
Fixed a bug where __proto__ was sometimes enumerated (issue 646).
Performance improvements for arithmetic operations.
Performance improvements for string operations.
Print script name and line number information in stack trace.
2010-03-17: Version 2.1.6
Performance improvements for arithmetic operations.
Performance improvements for string operations.
2010-03-10: Version 2.1.4
Fixed code cache lookup for keyed IC's (issue http://crbug.com/37853).

4
deps/v8/src/SConscript

@ -43,6 +43,7 @@ SOURCES = {
bootstrapper.cc
builtins.cc
checks.cc
circular-queue.cc
code-stubs.cc
codegen.cc
compilation-cache.cc
@ -50,11 +51,13 @@ SOURCES = {
contexts.cc
conversions.cc
counters.cc
cpu-profiler.cc
data-flow.cc
dateparser.cc
debug-agent.cc
debug.cc
disassembler.cc
diy-fp.cc
execution.cc
factory.cc
flags.cc
@ -63,6 +66,7 @@ SOURCES = {
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
fast-dtoa.cc
handles.cc
hashmap.cc
heap-profiler.cc

31
deps/v8/src/arm/assembler-arm-inl.h

@ -144,12 +144,21 @@ void RelocInfo::set_call_object(Object* target) {
bool RelocInfo::IsPatchedReturnSequence() {
// On ARM a "call instruction" is actually two instructions.
// mov lr, pc
// ldr pc, [pc, #XXX]
return (Assembler::instr_at(pc_) == kMovLrPc)
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
== kLdrPCPattern);
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
#ifdef USE_BLX
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
return ((current_instr & kLdrPCMask) == kLdrPCPattern)
&& ((next_instr & kBlxRegMask) == kBlxRegPattern);
#else
// A patched return sequence is:
// mov lr, pc
// ldr pc, [pc, #-4]
return (current_instr == kMovLrPc)
&& ((next_instr & kLdrPCMask) == kLdrPCPattern);
#endif
}
@ -225,6 +234,16 @@ Address Assembler::target_address_address_at(Address pc) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
#ifdef USE_BLX
// If we have a blx instruction, the instruction before it is
// what needs to be patched.
if ((instr & kBlxRegMask) == kBlxRegPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
#endif
// Verify that the instruction to patch is a
// ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);

10
deps/v8/src/arm/assembler-arm.cc

@ -240,8 +240,14 @@ static const Instr kPopRegPattern =
al | B26 | L | 4 | PostIndex | sp.code() * B16;
// mov lr, pc
const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
// ldr pc, [pc, #XXX]
const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = CondMask | 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;

29
deps/v8/src/arm/assembler-arm.h

@ -509,7 +509,10 @@ typedef int32_t Instr;
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
class Assembler : public Malloced {
@ -590,12 +593,34 @@ class Assembler : public Malloced {
static const int kInstrSize = sizeof(Instr);
// Distance between the instruction referring to the address of the call
// target (ldr pc, [target addr in const pool]) and the return address
// target and the return address.
#ifdef USE_BLX
// Call sequence is:
// ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
static const int kCallTargetAddressOffset = 2 * kInstrSize;
#else
// Call sequence is:
// mov lr, pc
// ldr pc, [pc, #...] @ call address
// @ return address
static const int kCallTargetAddressOffset = kInstrSize;
#endif
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
#ifdef USE_BLX
// Return sequence is:
// ldr ip, [pc, #0] @ emited address and start
// blx ip
static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
#else
// Return sequence is:
// mov lr, pc @ start of sequence
// ldr pc, [pc, #-4] @ emited address
static const int kPatchReturnSequenceAddressOffset = kInstrSize;
#endif
// Difference between address of current opcode and value read from pc
// register.

5
deps/v8/src/arm/constants-arm.h

@ -72,6 +72,11 @@
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
#endif
namespace assembler {
namespace arm {

10
deps/v8/src/arm/debug-arm.cc

@ -46,13 +46,23 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// add sp, sp, #4
// bx lr
// to a call to the debug break return code.
// #if USE_BLX
// ldr ip, [pc, #0]
// blx ip
// #else
// mov lr, pc
// ldr pc, [pc, #-4]
// #endif
// <debug break return code entry point address>
// bktp 0
CodePatcher patcher(rinfo()->pc(), 4);
#ifdef USE_BLX
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
#else
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
patcher.Emit(Debug::debug_break_return()->entry());
patcher.masm()->bkpt(0);
}

32
deps/v8/src/arm/macro-assembler-arm.cc

@ -58,11 +58,6 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
#endif
// Using blx may yield better code, so use it when required or when available
#if defined(USE_THUMB_INTERWORK) || defined(CAN_USE_ARMV5_INSTRUCTIONS)
#define USE_BLX 1
#endif
// Using bx does not yield better code, so use it only when required
#if defined(USE_THUMB_INTERWORK)
#define USE_BX 1
@ -117,16 +112,33 @@ void MacroAssembler::Call(Register target, Condition cond) {
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond) {
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
// blx ip
// The two instructions (ldr and blx) could be separated by a literal
// pool and the code would still work. The issue comes from the
// patching code which expect the ldr to be just above the blx.
BlockConstPoolFor(2);
// Statement positions are expected to be recorded when the target
// address is loaded. The mov method will automatically record
// positions when pc is the target, since this is not the case here
// we have to do it explicitly.
WriteRecordedPositions();
mov(ip, Operand(target, rmode), LeaveCC, cond);
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond);
// If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
// 'blx ip'; however, the code would not be shorter than the above sequence
// and the target address of the call would be referenced by the first
// instruction rather than the second one, which would make it harder to patch
// (two instructions before the return address, instead of one).
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
}

2
deps/v8/src/arm/macro-assembler-arm.h

@ -415,7 +415,7 @@ class MacroAssembler: public Assembler {
Register object2,
Register scratch1,
Register scratch2,
Label *failure);
Label* failure);
// Checks if both objects are sequential ASCII strings and jumps to label
// if either is not.

8
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -648,16 +648,17 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ ldr(r0, MemOperand(frame_pointer(), kInputStart));
// Find negative length (offset of start relative to end).
__ sub(current_input_offset(), r0, end_of_input_address());
// Set r0 to address of char before start of input
// Set r0 to address of char before start of the input string
// (effectively string position -1).
__ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
__ sub(r0, current_input_offset(), Operand(char_size()));
__ sub(r0, r0, Operand(r1, LSL, (mode_ == UC16) ? 1 : 0));
// Store this value in a local variable, for use when clearing
// position registers.
__ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
__ tst(r1, Operand(r1));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0), LeaveCC, ne);
@ -700,12 +701,15 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// copy captures to output
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
__ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
__ ldr(r2, MemOperand(frame_pointer(), kStartIndex));
__ sub(r1, end_of_input_address(), r1);
// r1 is length of input in bytes.
if (mode_ == UC16) {
__ mov(r1, Operand(r1, LSR, 1));
}
// r1 is length of input in characters.
__ add(r1, r1, Operand(r2));
// r1 is length of string in characters.
ASSERT_EQ(0, num_saved_registers_ % 2);
// Always an even number of capture registers. This allows us to

1
deps/v8/src/arm/virtual-frame-arm.h

@ -365,6 +365,7 @@ class VirtualFrame : public ZoneObject {
inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, NumberInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;

22
deps/v8/src/array.js

@ -994,11 +994,16 @@ function ArrayIndexOf(element, index) {
// If index is still negative, search the entire array.
if (index < 0) index = 0;
}
if (!IS_UNDEFINED(element)) {
for (var i = index; i < length; i++) {
if (this[i] === element) return i;
}
return -1;
}
// Lookup through the array.
for (var i = index; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (current === element) return i;
if (IS_UNDEFINED(this[i]) && i in this) {
return i;
}
}
return -1;
@ -1018,10 +1023,15 @@ function ArrayLastIndexOf(element, index) {
else if (index >= length) index = length - 1;
}
// Lookup through the array.
if (!IS_UNDEFINED(element)) {
for (var i = index; i >= 0; i--) {
if (this[i] === element) return i;
}
return -1;
}
for (var i = index; i >= 0; i--) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (current === element) return i;
if (IS_UNDEFINED(this[i]) && i in this) {
return i;
}
}
return -1;

17
deps/v8/src/ast.h

@ -196,10 +196,7 @@ class Expression: public AstNode {
kTestValue
};
Expression()
: bitfields_(0),
def_(NULL),
defined_vars_(NULL) {}
Expression() : bitfields_(0) {}
virtual Expression* AsExpression() { return this; }
@ -233,15 +230,6 @@ class Expression: public AstNode {
// Static type information for this expression.
StaticType* type() { return &type_; }
// Data flow information.
DefinitionInfo* var_def() { return def_; }
void set_var_def(DefinitionInfo* def) { def_ = def; }
ZoneList<DefinitionInfo*>* defined_vars() { return defined_vars_; }
void set_defined_vars(ZoneList<DefinitionInfo*>* defined_vars) {
defined_vars_ = defined_vars;
}
// AST analysis results
// True if the expression rooted at this node can be compiled by the
@ -284,9 +272,6 @@ class Expression: public AstNode {
uint32_t bitfields_;
StaticType type_;
DefinitionInfo* def_;
ZoneList<DefinitionInfo*>* defined_vars_;
// Using template BitField<type, start, size>.
class SideEffectFreeField : public BitField<bool, 0, 1> {};
class NoNegativeZeroField : public BitField<bool, 1, 1> {};

119
deps/v8/src/cached-powers.h

@ -0,0 +1,119 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CACHED_POWERS_H_
#define V8_CACHED_POWERS_H_
#include "diy-fp.h"
namespace v8 {
namespace internal {
struct CachedPower {
uint64_t significand;
int16_t binary_exponent;
int16_t decimal_exponent;
};
// The following defines implement the interface between this file and the
// generated 'powers_ten.h'.
// GRISU_CACHE_NAME(1) contains all possible cached powers.
// GRISU_CACHE_NAME(i) contains GRISU_CACHE_NAME(1) where only every 'i'th
// element is kept. More formally GRISU_CACHE_NAME(i) contains the elements j*i
// with 0 <= j < k with k such that j*k < the size of GRISU_CACHE_NAME(1).
// The higher 'i' is the fewer elements we use.
// Given that there are less elements, the exponent-distance between two
// elements in the cache grows. The variable GRISU_CACHE_MAX_DISTANCE(i) stores
// the maximum distance between two elements.
#define GRISU_CACHE_STRUCT CachedPower
#define GRISU_CACHE_NAME(i) kCachedPowers##i
#define GRISU_CACHE_MAX_DISTANCE(i) kCachedPowersMaxDistance##i
#define GRISU_CACHE_OFFSET kCachedPowerOffset
#define GRISU_UINT64_C V8_2PART_UINT64_C
// The following include imports the precompiled cached powers.
#include "powers-ten.h" // NOLINT
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// We can't use a function since we reference variables depending on the 'i'.
// This way the compiler is able to see at compile time that only one
// cache-array variable is used and thus can remove all the others.
#define COMPUTE_FOR_CACHE(i) \
if (!found && (gamma - alpha + 1 >= GRISU_CACHE_MAX_DISTANCE(i))) { \
int kQ = DiyFp::kSignificandSize; \
double k = ceiling((alpha - e + kQ - 1) * kD_1_LOG2_10); \
int index = (GRISU_CACHE_OFFSET + static_cast<int>(k) - 1) / i + 1; \
cached_power = GRISU_CACHE_NAME(i)[index]; \
found = true; \
} \
static void GetCachedPower(int e, int alpha, int gamma, int* mk, DiyFp* c_mk) {
// The following if statement should be optimized by the compiler so that only
// one array is referenced and the others are not included in the object file.
bool found = false;
CachedPower cached_power;
COMPUTE_FOR_CACHE(20);
COMPUTE_FOR_CACHE(19);
COMPUTE_FOR_CACHE(18);
COMPUTE_FOR_CACHE(17);
COMPUTE_FOR_CACHE(16);
COMPUTE_FOR_CACHE(15);
COMPUTE_FOR_CACHE(14);
COMPUTE_FOR_CACHE(13);
COMPUTE_FOR_CACHE(12);
COMPUTE_FOR_CACHE(11);
COMPUTE_FOR_CACHE(10);
COMPUTE_FOR_CACHE(9);
COMPUTE_FOR_CACHE(8);
COMPUTE_FOR_CACHE(7);
COMPUTE_FOR_CACHE(6);
COMPUTE_FOR_CACHE(5);
COMPUTE_FOR_CACHE(4);
COMPUTE_FOR_CACHE(3);
COMPUTE_FOR_CACHE(2);
COMPUTE_FOR_CACHE(1);
if (!found) {
UNIMPLEMENTED();
// Silence compiler warnings.
cached_power.significand = 0;
cached_power.binary_exponent = 0;
cached_power.decimal_exponent = 0;
}
*c_mk = DiyFp(cached_power.significand, cached_power.binary_exponent);
*mk = cached_power.decimal_exponent;
ASSERT((alpha <= c_mk->e() + e) && (c_mk->e() + e <= gamma));
}
#undef GRISU_REDUCTION
#undef GRISU_CACHE_STRUCT
#undef GRISU_CACHE_NAME
#undef GRISU_CACHE_MAX_DISTANCE
#undef GRISU_CACHE_OFFSET
#undef GRISU_UINT64_C
} } // namespace v8::internal
#endif // V8_CACHED_POWERS_H_

22
deps/v8/src/checks.h

@ -80,6 +80,7 @@ static inline void CheckEqualsHelper(const char* file, int line,
}
}
// Helper function used by the CHECK_EQ function when given int64_t
// arguments. Should not be called directly.
static inline void CheckEqualsHelper(const char* file, int line,
@ -202,6 +203,27 @@ static inline void CheckEqualsHelper(const char* file,
}
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
const char* value_source,
double value) {
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
volatile double* exp = new double[1];
*exp = expected;
volatile double* val = new double[1];
*val = value;
if (*exp == *val) {
V8_Fatal(file, line,
"CHECK_NE(%s, %s) failed\n# Value: %f",
expected_source, value_source, *val);
}
delete[] exp;
delete[] val;
}
namespace v8 {
class Value;
template <class T> class Handle;

101
deps/v8/src/circular-queue-inl.h

@ -0,0 +1,101 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CIRCULAR_BUFFER_INL_H_
#define V8_CIRCULAR_BUFFER_INL_H_
#include "circular-queue.h"
namespace v8 {
namespace internal {
template<typename Record>
CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
: buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
enqueue_semaphore_(OS::CreateSemaphore((buffer_end_ - buffer_) - 1)),
enqueue_pos_(buffer_),
dequeue_pos_(buffer_) {
// To be able to distinguish between a full and an empty queue
// state, the queue must be capable of containing at least 2
// records.
ASSERT((buffer_end_ - buffer_) >= 2);
}
template<typename Record>
CircularQueue<Record>::~CircularQueue() {
DeleteArray(buffer_);
delete enqueue_semaphore_;
}
template<typename Record>
void CircularQueue<Record>::Dequeue(Record* rec) {
ASSERT(!IsEmpty());
*rec = *dequeue_pos_;
dequeue_pos_ = Next(dequeue_pos_);
// Tell we have a spare record.
enqueue_semaphore_->Signal();
}
template<typename Record>
void CircularQueue<Record>::Enqueue(const Record& rec) {
// Wait until we have at least one spare record.
enqueue_semaphore_->Wait();
ASSERT(Next(enqueue_pos_) != dequeue_pos_);
*enqueue_pos_ = rec;
enqueue_pos_ = Next(enqueue_pos_);
}
template<typename Record>
Record* CircularQueue<Record>::Next(Record* curr) {
return ++curr != buffer_end_ ? curr : buffer_;
}
void* SamplingCircularQueue::Enqueue() {
Cell* enqueue_pos = reinterpret_cast<Cell*>(
Thread::GetThreadLocal(producer_key_));
WrapPositionIfNeeded(&enqueue_pos);
Thread::SetThreadLocal(producer_key_, enqueue_pos + record_size_);
return enqueue_pos;
}
void SamplingCircularQueue::WrapPositionIfNeeded(
SamplingCircularQueue::Cell** pos) {
if (**pos == kEnd) *pos = buffer_;
}
} } // namespace v8::internal
#endif // V8_CIRCULAR_BUFFER_INL_H_

131
deps/v8/src/circular-queue.cc

@ -0,0 +1,131 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "circular-queue-inl.h"
namespace v8 {
namespace internal {
SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
int buffer_size_in_chunks)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
record_size_in_bytes),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
// The distance ensures that producer and consumer never step on
// each other's chunks and helps eviction of produced data from
// the CPU cache (having that chunk size is bigger than the cache.)
producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2);
// Only need to keep the first cell of a chunk clean.
for (int i = 0; i < buffer_size_; i += chunk_size_) {
buffer_[i] = kClear;
}
buffer_[buffer_size_] = kEnd;
}
SamplingCircularQueue::~SamplingCircularQueue() {
DeleteArray(buffer_);
}
void SamplingCircularQueue::SetUpProducer() {
producer_key_ = Thread::CreateThreadLocalKey();
Thread::SetThreadLocal(producer_key_, buffer_);
}
void SamplingCircularQueue::TearDownProducer() {
Thread::DeleteThreadLocalKey(producer_key_);
}
void SamplingCircularQueue::SetUpConsumer() {
consumer_key_ = Thread::CreateThreadLocalKey();
ConsumerPosition* cp = new ConsumerPosition;
cp->dequeue_chunk_pos = buffer_;
cp->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
cp->dequeue_pos = NULL;
Thread::SetThreadLocal(consumer_key_, cp);
}
void SamplingCircularQueue::TearDownConsumer() {
delete reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
Thread::DeleteThreadLocalKey(consumer_key_);
}
void* SamplingCircularQueue::StartDequeue() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
if (cp->dequeue_pos != NULL) {
return cp->dequeue_pos;
} else {
if (*cp->dequeue_chunk_poll_pos != kClear) {
cp->dequeue_pos = cp->dequeue_chunk_pos;
cp->dequeue_end_pos = cp->dequeue_pos + chunk_size_;
return cp->dequeue_pos;
} else {
return NULL;
}
}
}
void SamplingCircularQueue::FinishDequeue() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
cp->dequeue_pos += record_size_;
if (cp->dequeue_pos < cp->dequeue_end_pos) return;
// Move to next chunk.
cp->dequeue_pos = NULL;
*cp->dequeue_chunk_pos = kClear;
cp->dequeue_chunk_pos += chunk_size_;
WrapPositionIfNeeded(&cp->dequeue_chunk_pos);
cp->dequeue_chunk_poll_pos += chunk_size_;
WrapPositionIfNeeded(&cp->dequeue_chunk_poll_pos);
}
void SamplingCircularQueue::FlushResidualRecords() {
ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>(
Thread::GetThreadLocal(consumer_key_));
// Eliminate producer / consumer distance.
cp->dequeue_chunk_poll_pos = cp->dequeue_chunk_pos;
}
} } // namespace v8::internal

130
deps/v8/src/circular-queue.h

@ -0,0 +1,130 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CIRCULAR_QUEUE_H_
#define V8_CIRCULAR_QUEUE_H_
namespace v8 {
namespace internal {
// Lock-based blocking circular queue for small records. Intended for
// transfer of small records between a single producer and a single
// consumer. Blocks on enqueue operation if the queue is full.
template<typename Record>
class CircularQueue {
public:
inline explicit CircularQueue(int desired_buffer_size_in_bytes);
inline ~CircularQueue();
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
private:
INLINE(Record* Next(Record* curr));
Record* buffer_;
Record* const buffer_end_;
Semaphore* enqueue_semaphore_;
Record* enqueue_pos_;
Record* dequeue_pos_;
DISALLOW_COPY_AND_ASSIGN(CircularQueue);
};
// Lock-free cache-friendly sampling circular queue for large
// records. Intended for fast transfer of large records between a
// single producer and a single consumer. If the queue is full,
// previous unread records are overwritten. The queue is designed with
// a goal in mind to evade cache lines thrashing by preventing
// simultaneous reads and writes to adjanced memory locations.
//
// IMPORTANT: as a producer never checks for chunks cleanness, it is
// possible that it can catch up and overwrite a chunk that a consumer
// is currently reading, resulting in a corrupt record being read.
class SamplingCircularQueue {
public:
// Executed on the application thread.
SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
int buffer_size_in_chunks);
~SamplingCircularQueue();
// Executed on the producer (sampler) or application thread.
void SetUpProducer();
// Enqueue returns a pointer to a memory location for storing the next
// record.
INLINE(void* Enqueue());
void TearDownProducer();
// Executed on the consumer (analyzer) thread.
void SetUpConsumer();
// StartDequeue returns a pointer to a memory location for retrieving
// the next record. After the record had been read by a consumer,
// FinishDequeue must be called. Until that moment, subsequent calls
// to StartDequeue will return the same pointer.
void* StartDequeue();
void FinishDequeue();
// Due to a presence of slipping between the producer and the consumer,
// the queue must be notified whether producing has been finished in order
// to process remaining records from the buffer.
void FlushResidualRecords();
void TearDownConsumer();
typedef AtomicWord Cell;
// Reserved values for the first cell of a record.
static const Cell kClear = 0; // Marks clean (processed) chunks.
static const Cell kEnd = -1; // Marks the end of the buffer.
private:
struct ConsumerPosition {
Cell* dequeue_chunk_pos;
Cell* dequeue_chunk_poll_pos;
Cell* dequeue_pos;
Cell* dequeue_end_pos;
};
INLINE(void WrapPositionIfNeeded(Cell** pos));
const int record_size_;
const int chunk_size_in_bytes_;
const int chunk_size_;
const int buffer_size_;
const int producer_consumer_distance_;
Cell* buffer_;
// Store producer and consumer data in TLS to avoid modifying the
// same CPU cache line from two threads simultaneously.
Thread::LocalStorageKey consumer_key_;
Thread::LocalStorageKey producer_key_;
};
} } // namespace v8::internal
#endif // V8_CIRCULAR_QUEUE_H_

16
deps/v8/src/conversions.cc

@ -31,6 +31,7 @@
#include "conversions-inl.h"
#include "factory.h"
#include "fast-dtoa.h"
#include "scanner.h"
namespace v8 {
@ -382,8 +383,17 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
int length = StrLength(decimal_rep);
char* decimal_rep;
bool used_gay_dtoa = false;
char fast_dtoa_buffer[kFastDtoaMaximalLength + 1];
int length;
if (FastDtoa(v, fast_dtoa_buffer, &sign, &length, &decimal_point)) {
decimal_rep = fast_dtoa_buffer;
} else {
decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
used_gay_dtoa = true;
length = StrLength(decimal_rep);
}
if (sign) builder.AddCharacter('-');
@ -418,7 +428,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
builder.AddFormatted("%d", exponent);
}
freedtoa(decimal_rep);
if (used_gay_dtoa) freedtoa(decimal_rep);
}
}
return builder.Finalize();

2
deps/v8/src/counters.h

@ -65,7 +65,7 @@ class StatsTable : public AllStatic {
// may receive a different location to store it's counter.
// The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
static int *FindLocation(const char* name) {
static int* FindLocation(const char* name) {
if (!lookup_function_) return NULL;
return lookup_function_(name);
}

50
deps/v8/src/cpu-profiler-inl.h

@ -0,0 +1,50 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CPU_PROFILER_INL_H_
#define V8_CPU_PROFILER_INL_H_
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "cpu-profiler.h"
namespace v8 {
namespace internal {
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
TickSampleEventRecord* evt =
reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
return &evt->sample;
}
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_INL_H_

201
deps/v8/src/cpu-profiler.cc

@ -0,0 +1,201 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "cpu-profiler-inl.h"
namespace v8 {
namespace internal {
static const int kEventsBufferSize = 256*KB;
static const int kTickSamplesBufferChunkSize = 64*KB;
static const int kTickSamplesBufferChunksCount = 16;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator),
running_(false),
events_buffer_(kEventsBufferSize),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
enqueue_order_(0) { }
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name,
int line_number,
Address start,
unsigned size) {
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start,
unsigned size) {
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int args_count,
Address start,
unsigned size) {
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::CodeMoveEvent(Address from, Address to) {
CodeEventsContainer evt_rec;
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
rec->type = CodeEventRecord::CODE_MOVE;
rec->order = ++enqueue_order_;
rec->from = from;
rec->to = to;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
CodeEventsContainer evt_rec;
CodeDeleteEventRecord* rec = &evt_rec.CodeDeleteEventRecord_;
rec->type = CodeEventRecord::CODE_DELETE;
rec->order = ++enqueue_order_;
rec->start = from;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Address start) {
CodeEventsContainer evt_rec;
CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
rec->type = CodeEventRecord::CODE_ALIAS;
rec->order = ++enqueue_order_;
rec->alias = alias;
rec->start = start;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
CodeMoveEvent(from, to);
}
void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
CodeDeleteEvent(from);
}
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
events_buffer_.Dequeue(&record);
switch (record.generic.type) {
#define PROFILER_TYPE_CASE(type, clss) \
case CodeEventRecord::type: \
record.clss##_.UpdateCodeMap(generator_->code_map()); \
break;
CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
#undef PROFILER_TYPE_CASE
default: return true; // Skip record.
}
*dequeue_order = record.generic.order;
return true;
}
return false;
}
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
const TickSampleEventRecord* rec =
reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.StartDequeue());
if (rec == NULL) return false;
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
ticks_buffer_.FinishDequeue();
} else {
return true;
}
}
}
void ProfilerEventsProcessor::Run() {
ticks_buffer_.SetUpConsumer();
unsigned dequeue_order = 0;
running_ = true;
while (running_) {
// Process ticks until we have any.
if (ProcessTicks(dequeue_order)) {
// All ticks of the current dequeue_order are processed,
// proceed to the next code event.
ProcessCodeEvent(&dequeue_order);
}
YieldCPU();
}
// Process remaining tick events.
ticks_buffer_.FlushResidualRecords();
// Perform processing until we have tick events, skip remaining code events.
while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { }
ticks_buffer_.TearDownConsumer();
}
} } // namespace v8::internal

188
deps/v8/src/cpu-profiler.h

@ -0,0 +1,188 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
#include "circular-queue.h"
#include "profile-generator.h"
namespace v8 {
namespace internal {
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \
V(CODE_ALIAS, CodeAliasEventRecord)
class CodeEventRecord {
public:
#define DECLARE_TYPE(type, ignore) type,
enum Type {
NONE = 0,
CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
NUMBER_OF_TYPES
};
#undef DECLARE_TYPE
Type type;
unsigned order;
};
class CodeCreateEventRecord : public CodeEventRecord {
public:
Address start;
CodeEntry* entry;
unsigned size;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->AddCode(start, entry, size);
}
};
class CodeMoveEventRecord : public CodeEventRecord {
public:
Address from;
Address to;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->MoveCode(from, to);
}
};
class CodeDeleteEventRecord : public CodeEventRecord {
public:
Address start;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->DeleteCode(start);
}
};
class CodeAliasEventRecord : public CodeEventRecord {
public:
Address alias;
Address start;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->AddAlias(alias, start);
}
};
class TickSampleEventRecord {
public:
// In memory, the first machine word of a TickSampleEventRecord will be the
// first entry of TickSample, that is -- a program counter field.
// TickSample is put first, because 'order' can become equal to
// SamplingCircularQueue::kClear, while program counter can't.
TickSample sample;
unsigned order;
#if defined(__GNUC__) && (__GNUC__ < 4)
// Added to avoid 'all member functions in class are private' warning.
INLINE(unsigned get_order() const) { return order; }
// Added to avoid 'class only defines private constructors and
// has no friends' warning.
friend class TickSampleEventRecordFriend;
#endif
private:
// Disable instantiation.
TickSampleEventRecord();
DISALLOW_COPY_AND_ASSIGN(TickSampleEventRecord);
};
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor() { }
// Thread control.
virtual void Run();
inline void Stop() { running_ = false; }
INLINE(bool running()) { return running_; }
// Events adding methods. Called by VM threads.
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number,
Address start, unsigned size);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
int args_count,
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void FunctionCreateEvent(Address alias, Address start);
void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from);
// Tick sampler registration. Called by sampler thread or signal handler.
inline void SetUpSamplesProducer() { ticks_buffer_.SetUpProducer(); }
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
// next record of the buffer.
INLINE(TickSample* TickSampleEvent());
inline void TearDownSamplesProducer() { ticks_buffer_.TearDownProducer(); }
private:
union CodeEventsContainer {
CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_TYPE
};
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
ProfileGenerator* generator_;
bool running_;
CircularQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
unsigned enqueue_order_;
};
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_H_

287
deps/v8/src/data-flow.cc

@ -770,293 +770,6 @@ void AstLabeler::VisitDeclaration(Declaration* decl) {
}
ZoneList<Expression*>* VarUseMap::Lookup(Variable* var) {
HashMap::Entry* entry = HashMap::Lookup(var, var->name()->Hash(), true);
if (entry->value == NULL) {
entry->value = new ZoneList<Expression*>(1);
}
return reinterpret_cast<ZoneList<Expression*>*>(entry->value);
}
void LivenessAnalyzer::Analyze(FunctionLiteral* fun) {
// Process the function body.
VisitStatements(fun->body());
// All variables are implicitly defined at the function start.
// Record a definition of all variables live at function entry.
for (HashMap::Entry* p = live_vars_.Start();
p != NULL;
p = live_vars_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->key);
RecordDef(var, fun);
}
}
void LivenessAnalyzer::VisitStatements(ZoneList<Statement*>* stmts) {
// Visit statements right-to-left.
for (int i = stmts->length() - 1; i >= 0; i--) {
Visit(stmts->at(i));
}
}
void LivenessAnalyzer::RecordUse(Variable* var, Expression* expr) {
ASSERT(var->is_global() || var->is_this());
ZoneList<Expression*>* uses = live_vars_.Lookup(var);
uses->Add(expr);
}
void LivenessAnalyzer::RecordDef(Variable* var, Expression* expr) {
ASSERT(var->is_global() || var->is_this());
// We do not support other expressions that can define variables.
ASSERT(expr->AsFunctionLiteral() != NULL);
// Add the variable to the list of defined variables.
if (expr->defined_vars() == NULL) {
expr->set_defined_vars(new ZoneList<DefinitionInfo*>(1));
}
DefinitionInfo* def = new DefinitionInfo();
expr->AsFunctionLiteral()->defined_vars()->Add(def);
// Compute the last use of the definition. The variable uses are
// inserted in reversed evaluation order. The first element
// in the list of live uses is the last use.
ZoneList<Expression*>* uses = live_vars_.Lookup(var);
while (uses->length() > 0) {
Expression* use_site = uses->RemoveLast();
use_site->set_var_def(def);
if (uses->length() == 0) {
def->set_last_use(use_site);
}
}
}
// Visitor functions for live variable analysis.
void LivenessAnalyzer::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void LivenessAnalyzer::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void LivenessAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
// Do nothing.
}
void LivenessAnalyzer::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWithEnterStatement(
WithEnterStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitDebuggerStatement(
DebuggerStatement* stmt) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->var();
ASSERT(var->is_global());
ASSERT(!var->is_this());
RecordUse(var, expr);
}
void LivenessAnalyzer::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
ASSERT(prop->key()->IsPropertyName());
VariableProxy* proxy = prop->obj()->AsVariableProxy();
ASSERT(proxy != NULL && proxy->var()->is_this());
// Record use of this at the assignment node. Assignments to
// this-properties are treated like unary operations.
RecordUse(proxy->var(), expr);
// Visit right-hand side.
Visit(expr->value());
}
void LivenessAnalyzer::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitProperty(Property* expr) {
ASSERT(expr->key()->IsPropertyName());
VariableProxy* proxy = expr->obj()->AsVariableProxy();
ASSERT(proxy != NULL && proxy->var()->is_this());
RecordUse(proxy->var(), expr);
}
void LivenessAnalyzer::VisitCall(Call* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
// Visit child nodes in reverse evaluation order.
Visit(expr->right());
Visit(expr->left());
}
void LivenessAnalyzer::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
void LivenessAnalyzer::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
: fun_(fun),
av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}

49
deps/v8/src/data-flow.h

@ -550,55 +550,6 @@ class AstLabeler: public AstVisitor {
};
class VarUseMap : public HashMap {
public:
VarUseMap() : HashMap(VarMatch) {}
ZoneList<Expression*>* Lookup(Variable* var);
private:
static bool VarMatch(void* key1, void* key2) { return key1 == key2; }
};
class DefinitionInfo : public ZoneObject {
public:
explicit DefinitionInfo() : last_use_(NULL) {}
Expression* last_use() { return last_use_; }
void set_last_use(Expression* expr) { last_use_ = expr; }
private:
Expression* last_use_;
Register location_;
};
class LivenessAnalyzer : public AstVisitor {
public:
LivenessAnalyzer() {}
void Analyze(FunctionLiteral* fun);
private:
void VisitStatements(ZoneList<Statement*>* stmts);
void RecordUse(Variable* var, Expression* expr);
void RecordDef(Variable* var, Expression* expr);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Map for tracking the live variables.
VarUseMap live_vars_;
DISALLOW_COPY_AND_ASSIGN(LivenessAnalyzer);
};
// Computes the set of assigned variables and annotates variables proxies
// that are trivial sub-expressions and for-loops where the loop variable
// is guaranteed to be a smi.

58
deps/v8/src/diy-fp.cc

@ -0,0 +1,58 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "diy-fp.h"
namespace v8 {
namespace internal {
void DiyFp::Multiply(const DiyFp& other) {
// Simply "emulates" a 128 bit multiplication.
// However: the resulting number only contains 64 bits. The least
// significant 64 bits are only used for rounding the most significant 64
// bits.
const uint64_t kM32 = 0xFFFFFFFFu;
uint64_t a = f_ >> 32;
uint64_t b = f_ & kM32;
uint64_t c = other.f_ >> 32;
uint64_t d = other.f_ & kM32;
uint64_t ac = a * c;
uint64_t bc = b * c;
uint64_t ad = a * d;
uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
// By adding 1U << 31 to tmp we round the final result.
// Halfway cases will be round up.
tmp += 1U << 31;
uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
e_ += other.e_ + 64;
f_ = result_f;
}
} } // namespace v8::internal

117
deps/v8/src/diy-fp.h

@ -0,0 +1,117 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DIY_FP_H_
#define V8_DIY_FP_H_
namespace v8 {
namespace internal {
// This "Do It Yourself Floating Point" class implements a floating-point number
// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
// have the most significant bit of the significand set.
// Multiplication and Subtraction do not normalize their results.
// DiyFp are not designed to contain special doubles (NaN and Infinity).
class DiyFp {
public:
static const int kSignificandSize = 64;
DiyFp() : f_(0), e_(0) {}
DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
// this = this - other.
// The exponents of both numbers must be the same and the significand of this
// must be bigger than the significand of other.
// The result will not be normalized.
void Subtract(const DiyFp& other) {
ASSERT(e_ == other.e_);
ASSERT(f_ >= other.f_);
f_ -= other.f_;
}
// Returns a - b.
// The exponents of both numbers must be the same and this must be bigger
// than other. The result will not be normalized.
static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Subtract(b);
return result;
}
// this = this * other.
void Multiply(const DiyFp& other);
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Multiply(b);
return result;
}
void Normalize() {
ASSERT(f_ != 0);
uint64_t f = f_;
int e = e_;
// This method is mainly called for normalizing boundaries. In general
// boundaries need to be shifted by 10 bits. We thus optimize for this case.
const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
while ((f & k10MSBits) == 0) {
f <<= 10;
e -= 10;
}
while ((f & kUint64MSB) == 0) {
f <<= 1;
e--;
}
f_ = f;
e_ = e;
}
static DiyFp Normalize(const DiyFp& a) {
DiyFp result = a;
result.Normalize();
return result;
}
uint64_t f() const { return f_; }
int e() const { return e_; }
void set_f(uint64_t new_value) { f_ = new_value; }
void set_e(int new_value) { e_ = new_value; }
private:
static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
uint64_t f_;
int e_;
};
} } // namespace v8::internal
#endif // V8_DIY_FP_H_

169
deps/v8/src/double.h

@ -0,0 +1,169 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DOUBLE_H_
#define V8_DOUBLE_H_
#include "diy-fp.h"
namespace v8 {
namespace internal {
// We assume that doubles and uint64_t have the same endianness.
static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
// Helper functions for doubles.
class Double {
public:
static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
static const uint64_t kSignificandMask =
V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
explicit Double(uint64_t d64) : d64_(d64) {}
DiyFp AsDiyFp() const {
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// this->Significand() must not be 0.
DiyFp AsNormalizedDiyFp() const {
uint64_t f = Significand();
int e = Exponent();
ASSERT(f != 0);
// The current double could be a denormal.
while ((f & kHiddenBit) == 0) {
f <<= 1;
e--;
}
// Do the final shifts in one go. Don't forget the hidden bit (the '-1').
f <<= DiyFp::kSignificandSize - kSignificandSize - 1;
e -= DiyFp::kSignificandSize - kSignificandSize - 1;
return DiyFp(f, e);
}
// Returns the double's bit as uint64.
uint64_t AsUint64() const {
return d64_;
}
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
uint64_t d64 = AsUint64();
int biased_e = static_cast<int>((d64 & kExponentMask) >> kSignificandSize);
return biased_e - kExponentBias;
}
uint64_t Significand() const {
uint64_t d64 = AsUint64();
uint64_t significand = d64 & kSignificandMask;
if (!IsDenormal()) {
return significand + kHiddenBit;
} else {
return significand;
}
}
// Returns true if the double is a denormal.
bool IsDenormal() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == 0;
}
// We consider denormals not to be special.
// Hence only Infinity and NaN are special.
bool IsSpecial() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) == 0);
}
int Sign() const {
uint64_t d64 = AsUint64();
return (d64 & kSignMask) == 0? 1: -1;
}
// Returns the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (significand_is_zero && v.e() != kDenormalExponent) {
// The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
// The only exception is for the smallest normal: the largest denormal is
// at the same distance as its successor.
// Note: denormals have the same exponent as the smallest normals.
m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
} else {
m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
}
m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
m_minus.set_e(m_plus.e());
*out_m_plus = m_plus;
*out_m_minus = m_minus;
}
double value() const { return uint64_to_double(d64_); }
private:
static const int kSignificandSize = 52; // Excludes the hidden bit.
static const int kExponentBias = 0x3FF + kSignificandSize;
static const int kDenormalExponent = -kExponentBias + 1;
uint64_t d64_;
};
} } // namespace v8::internal
#endif // V8_DOUBLE_H_

17
deps/v8/src/fast-codegen.cc

@ -436,9 +436,6 @@ Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
AstLabeler labeler;
labeler.Label(info);
LivenessAnalyzer analyzer;
analyzer.Analyze(info->function());
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
@ -598,8 +595,8 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm(), ";; Global");
if (FLAG_print_ir) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%d: t%d = Global(%s) // last_use = %d\n", expr->num(),
expr->num(), *name, expr->var_def()->last_use()->num());
PrintF("%d: t%d = Global(%s)\n", expr->num(),
expr->num(), *name);
}
EmitGlobalVariableLoad(cell);
}
@ -653,9 +650,8 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("Store(this, \"%s\", t%d) // last_use(this) = %d\n", *name_string,
expr->value()->num(),
expr->var_def()->last_use()->num());
PrintF("Store(this, \"%s\", t%d)\n", *name_string,
expr->value()->num());
}
EmitThisPropertyStore(name);
@ -678,9 +674,8 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm(), ";; Load from this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Load(this, \"%s\") // last_use(this) = %d\n",
expr->num(), expr->num(), *name_string,
expr->var_def()->last_use()->num());
PrintF("%d: t%d = Load(this, \"%s\")\n",
expr->num(), expr->num(), *name_string);
}
EmitThisPropertyLoad(name);
}

508
deps/v8/src/fast-dtoa.cc

@ -0,0 +1,508 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "fast-dtoa.h"
#include "cached-powers.h"
#include "diy-fp.h"
#include "double.h"
namespace v8 {
namespace internal {
// The minimal and maximal target exponent define the range of w's binary
// exponent, where 'w' is the result of multiplying the input by a cached power
// of ten.
//
// A different range might be chosen on a different platform, to optimize digit
// generation, but a smaller range requires more powers of ten to be cached.
static const int minimal_target_exponent = -60;
static const int maximal_target_exponent = -32;
// Adjusts the last digit of the generated number, and screens out generated
// solutions that may be inaccurate. A solution may be inaccurate if it is
// outside the safe interval, or if we ctannot prove that it is closer to the
// input than a neighboring representation of the same length.
//
// Input: * buffer containing the digits of too_high / 10^kappa
// * the buffer's length
// * distance_too_high_w == (too_high - w).f() * unit
// * unsafe_interval == (too_high - too_low).f() * unit
// * rest = (too_high - buffer * 10^kappa).f() * unit
// * ten_kappa = 10^kappa * unit
// * unit = the common multiplier
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
bool RoundWeed(char* buffer,
int length,
uint64_t distance_too_high_w,
uint64_t unsafe_interval,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
// w_high = too_high - small_distance.
// Note: w_low < w < w_high
//
// The real w (* unit) must lie somewhere inside the interval
// ]w_low; w_low[ (often written as "(w_low; w_low)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
//
// too_high - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// ^v 1 unit ^ ^ ^ ^
// boundary_high --------------------- . . . .
// ^v 1 unit . . . .
// - - - - - - - - - - - - - - - - - - - + - - + - - - - - - . .
// . . ^ . .
// . big_distance . . .
// . . . . rest
// small_distance . . . .
// v . . . .
// w_high - - - - - - - - - - - - - - - - - - . . . .
// ^v 1 unit . . . .
// w ---------------------------------------- . . . .
// ^v 1 unit v . . .
// w_low - - - - - - - - - - - - - - - - - - - - - . . .
// . . v
// buffer --------------------------------------------------+-------+--------
// . .
// safe_interval .
// v .
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - .
// ^v 1 unit .
// boundary_low ------------------------- unsafe_interval
// ^v 1 unit v
// too_low - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//
//
// Note that the value of buffer could lie anywhere inside the range too_low
// to too_high.
//
// boundary_low, boundary_high and w are approximations of the real boundaries
// and v (the input number). They are guaranteed to be precise up to one unit.
// In fact the error is guaranteed to be strictly less than one unit.
//
// Anything that lies outside the unsafe interval is guaranteed not to round
// to v when read again.
// Anything that lies inside the safe interval is guaranteed to round to v
// when read again.
// If the number inside the buffer lies inside the unsafe interval but not
// inside the safe interval then we simply do not know and bail out (returning
// false).
//
// Similarly we have to take into account the imprecision of 'w' when rounding
// the buffer. If we have two potential representations we need to make sure
// that the chosen one is closer to w_low and w_high since v can be anywhere
// between them.
//
// By generating the digits of too_high we got the largest (closest to
// too_high) buffer that is still in the unsafe interval. In the case where
// w_high < buffer < too_high we try to decrement the buffer.
// This way the buffer approaches (rounds towards) w.
// There are 3 conditions that stop the decrementation process:
// 1) the buffer is already below w_high
// 2) decrementing the buffer would make it leave the unsafe interval
// 3) decrementing the buffer would yield a number below w_high and farther
// away than the current number. In other words:
// (buffer{-1} < w_high) && w_high - buffer{-1} > buffer - w_high
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w_high
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
rest += ten_kappa;
}
// We have approached w+ as much as possible. We now test if approaching w-
// would require changing the buffer. If yes, then we have two possible
// representations close to w, but we cannot decide which one is closer.
if (rest < big_distance &&
unsafe_interval - rest >= ten_kappa &&
(rest + ten_kappa < big_distance ||
big_distance - rest > rest + ten_kappa - big_distance)) {
return false;
}
// Weeding test.
// The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
// Since too_low = too_high - unsafe_interval this is equivalent to
// [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
// Conceptually we have: rest ~= too_high - buffer
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
}
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
static const uint32_t kTen6 = 1000000;
static const uint32_t kTen7 = 10000000;
static const uint32_t kTen8 = 100000000;
static const uint32_t kTen9 = 1000000000;
// Returns the biggest power of ten that is less than or equal than the given
// number. We furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
// Precondition: (1 << number_bits) <= number < (1 << (number_bits + 1)).
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
int* exponent) {
switch (number_bits) {
case 32:
case 31:
case 30:
if (kTen9 <= number) {
*power = kTen9;
*exponent = 9;
break;
} // else fallthrough
case 29:
case 28:
case 27:
if (kTen8 <= number) {
*power = kTen8;
*exponent = 8;
break;
} // else fallthrough
case 26:
case 25:
case 24:
if (kTen7 <= number) {
*power = kTen7;
*exponent = 7;
break;
} // else fallthrough
case 23:
case 22:
case 21:
case 20:
if (kTen6 <= number) {
*power = kTen6;
*exponent = 6;
break;
} // else fallthrough
case 19:
case 18:
case 17:
if (kTen5 <= number) {
*power = kTen5;
*exponent = 5;
break;
} // else fallthrough
case 16:
case 15:
case 14:
if (kTen4 <= number) {
*power = kTen4;
*exponent = 4;
break;
} // else fallthrough
case 13:
case 12:
case 11:
case 10:
if (1000 <= number) {
*power = 1000;
*exponent = 3;
break;
} // else fallthrough
case 9:
case 8:
case 7:
if (100 <= number) {
*power = 100;
*exponent = 2;
break;
} // else fallthrough
case 6:
case 5:
case 4:
if (10 <= number) {
*power = 10;
*exponent = 1;
break;
} // else fallthrough
case 3:
case 2:
case 1:
if (1 <= number) {
*power = 1;
*exponent = 0;
break;
} // else fallthrough
case 0:
*power = 0;
*exponent = -1;
break;
default:
// Following assignments are here to silence compiler warnings.
*power = 0;
*exponent = 0;
UNREACHABLE();
}
}
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by minimal_target_exponent and
// maximal_target_exponent.
// Hence -60 <= w.e() <= -32.
//
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
// is, their error must be less that a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
// * minimal_target_exponent <= w.e() <= maximal_target_exponent
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
// * buffer contains the shortest possible decimal digit-sequence
// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
// correct values of low and high (without their error).
// * if more than one decimal representation gives the minimal number of
// decimal digits then the one closest to W (where W is the correct value
// of w) is chosen.
// Remark: this procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely (~0.5%).
//
// Say, for the sake of example, that
// w.e() == -48, and w.f() == 0x1234567890abcdef
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
// w's fractional part is therefore 0x567890abcdef.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the comma would be computed by
// (0x567890abcdef * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
bool DigitGen(DiyFp low,
DiyFp w,
DiyFp high,
char* buffer,
int* length,
int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
ASSERT(minimal_target_exponent <= w.e() && w.e() <= maximal_target_exponent);
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
// the new numbers are outside of the interval we want the final
// representation to lie in.
// Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
// numbers that are certain to lie in the interval. We will use this fact
// later on.
// We will now start by generating the digits within the uncertain
// interval. Later we will weed out representations that lie outside the safe
// interval and thus _might_ lie outside the correct interval.
uint64_t unit = 1;
DiyFp too_low = DiyFp(low.f() - unit, low.e());
DiyFp too_high = DiyFp(high.f() + unit, high.e());
// too_low and too_high are guaranteed to lie outside the interval we want the
// generated number in.
DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
// We now cut the input number into two parts: the integral digits and the
// fractionals. We will not write any decimal separator though, but adapt
// kappa instead.
// Reminder: we are currently computing the digits (stored inside the buffer)
// such that: too_low < buffer * 10^kappa < too_high
// We use too_high for the digit_generation and stop as soon as possible.
// If we stop early we effectively round down.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divider;
int divider_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divider, &divider_exponent);
*kappa = divider_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divider exponent + 1. And the divider is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
int digit = integrals / divider;
buffer[*length] = '0' + digit;
(*length)++;
integrals %= divider;
(*kappa)--;
// Note that kappa now equals the exponent of the divider and that the
// invariant thus holds again.
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
// Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
// Reminder: unsafe_interval.e() == one.e()
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divider) << -one.e(), unit);
}
divider /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
// Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
// increase its (imaginary) exponent. At the same time we decrease the
// divider's (one's) exponent and shift its significand.
// Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
// fractionals.f *= 10;
// fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
// one.f >>= 1; one.e++; // value remains unchanged.
// and we have again fractionals.e == one.e which allows us to divide
// fractionals.f() by one.f()
// We simply combine the *= 10 and the >>= 1.
while (true) {
fractionals *= 5;
unit *= 5;
unsafe_interval.set_f(unsafe_interval.f() * 5);
unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
one.set_f(one.f() >> 1);
one.set_e(one.e() + 1);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
(*length)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
}
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated).
// If the function returns true then
// v == (double) (buffer * 10^decimal_exponent).
// The digits in the buffer are the shortest representation possible: no
// 0.09999999999999999 instead of 0.1. The shorter representation will even be
// chosen even if the longer one would be closer to v.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
bool grisu3(double v, char* buffer, int* length, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// closest floating-point neighbors. Any number strictly between
// boundary_minus and boundary_plus will round to v when convert to a double.
// Grisu3 will never output representations that lie exactly on a boundary.
DiyFp boundary_minus, boundary_plus;
Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
GetCachedPower(w.e() + DiyFp::kSignificandSize, minimal_target_exponent,
maximal_target_exponent, &mk, &ten_mk);
ASSERT(minimal_target_exponent <= w.e() + ten_mk.e() +
DiyFp::kSignificandSize &&
maximal_target_exponent >= w.e() + ten_mk.e() +
DiyFp::kSignificandSize);
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
ASSERT(scaled_w.e() ==
boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
// In theory it would be possible to avoid some recomputations by computing
// the difference between w and boundary_minus/plus (a power of 2) and to
// compute scaled_boundary_minus/plus by subtracting/adding from
// scaled_w. However the code becomes much less readable and the speed
// enhancements are not terriffic.
DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
// DigitGen will generate the digits of scaled_w. Therefore we have
// v == (double) (scaled_w * 10^-mk).
// Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
// integer than it will be updated. For instance if scaled_w == 1.23 then
// the buffer will be filled with "123" und the decimal_exponent will be
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
bool FastDtoa(double v, char* buffer, int* sign, int* length, int* point) {
ASSERT(v != 0);
ASSERT(!Double(v).IsSpecial());
if (v < 0) {
v = -v;
*sign = 1;
} else {
*sign = 0;
}
int decimal_exponent;
bool result = grisu3(v, buffer, length, &decimal_exponent);
*point = *length + decimal_exponent;
buffer[*length] = '\0';
return result;
}
} } // namespace v8::internal

55
deps/v8/src/fast-dtoa.h

@ -0,0 +1,55 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FAST_DTOA_H_
#define V8_FAST_DTOA_H_
namespace v8 {
namespace internal {
// FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
// include the terminating '\0' character.
static const int kFastDtoaMaximalLength = 17;
// Provides a decimal representation of v.
// v must not be (positive or negative) zero and it must not be Infinity or NaN.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true then
// v == (double) (buffer * 10^(point - length)).
// The digits in the buffer are the shortest representation possible: no
// 0.099999999999 instead of 0.1.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain the
// one closest to v.
// The variable 'sign' will be '0' if the given number is positive, and '1'
// otherwise.
bool FastDtoa(double d, char* buffer, int* sign, int* length, int* point);
} } // namespace v8::internal
#endif // V8_FAST_DTOA_H_

33
deps/v8/src/frames.cc

@ -306,14 +306,12 @@ void SafeStackTraceFrameIterator::Advance() {
void StackHandler::Cook(Code* code) {
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(code->contains(pc()));
set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
}
void StackHandler::Uncook(Code* code) {
ASSERT(MarkCompactCollector::HasCompacted());
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
@ -329,9 +327,6 @@ bool StackFrame::HasHandler() const {
void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
// Only cooking frames when the collector is compacting and thus moving code
// around.
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(!thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Cook();
@ -341,9 +336,6 @@ void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
// Only uncooking frames when the collector is compacting and thus moving code
// around.
ASSERT(MarkCompactCollector::HasCompacted());
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();
@ -520,6 +512,31 @@ void JavaScriptFrame::Print(StringStream* accumulator,
Code* code = NULL;
if (IsConstructor()) accumulator->Add("new ");
accumulator->PrintFunction(function, receiver, &code);
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
Object* script_obj = shared->script();
if (script_obj->IsScript()) {
Handle<Script> script(Script::cast(script_obj));
accumulator->Add(" [");
accumulator->PrintName(script->name());
Address pc = this->pc();
if (code != NULL && code->kind() == Code::FUNCTION &&
pc >= code->instruction_start() && pc < code->relocation_start()) {
int source_pos = code->SourcePosition(pc);
int line = GetScriptLineNumberSafe(script, source_pos) + 1;
accumulator->Add(":%d", line);
} else {
int function_start_pos = shared->start_position();
int line = GetScriptLineNumberSafe(script, function_start_pos) + 1;
accumulator->Add(":~%d", line);
}
accumulator->Add("] ");
}
}
accumulator->Add("(this=%o", receiver);
// Get scope information for nicer output, if possible. If code is

5
deps/v8/src/globals.h

@ -98,6 +98,11 @@ typedef byte* Address;
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"

24
deps/v8/src/handles.cc

@ -514,6 +514,30 @@ int GetScriptLineNumber(Handle<Script> script, int code_pos) {
}
int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
AssertNoAllocation no_allocation;
if (!script->line_ends()->IsUndefined()) {
return GetScriptLineNumber(script, code_pos);
}
// Slow mode: we do not have line_ends. We have to iterate through source.
if (!script->source()->IsString()) {
return -1;
}
String* source = String::cast(script->source());
int line = 0;
int len = source->length();
for (int pos = 0; pos < len; pos++) {
if (pos == code_pos) {
break;
}
if (source->Get(pos) == '\n') {
line++;
}
}
return line;
}
void CustomArguments::IterateInstance(ObjectVisitor* v) {
v->VisitPointers(values_, values_ + 4);
}

2
deps/v8/src/handles.h

@ -267,6 +267,8 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script);
// Script line number computations.
void InitScriptLineEnds(Handle<Script> script);
int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.

40
deps/v8/src/heap.h

@ -1165,26 +1165,26 @@ class Heap : public AllStatic {
class HeapStats {
public:
int *start_marker;
int *new_space_size;
int *new_space_capacity;
int *old_pointer_space_size;
int *old_pointer_space_capacity;
int *old_data_space_size;
int *old_data_space_capacity;
int *code_space_size;
int *code_space_capacity;
int *map_space_size;
int *map_space_capacity;
int *cell_space_size;
int *cell_space_capacity;
int *lo_space_size;
int *global_handle_count;
int *weak_global_handle_count;
int *pending_global_handle_count;
int *near_death_global_handle_count;
int *destroyed_global_handle_count;
int *end_marker;
int* start_marker;
int* new_space_size;
int* new_space_capacity;
int* old_pointer_space_size;
int* old_pointer_space_capacity;
int* old_data_space_size;
int* old_data_space_capacity;
int* code_space_size;
int* code_space_capacity;
int* map_space_size;
int* map_space_capacity;
int* cell_space_size;
int* cell_space_capacity;
int* lo_space_size;
int* global_handle_count;
int* weak_global_handle_count;
int* pending_global_handle_count;
int* near_death_global_handle_count;
int* destroyed_global_handle_count;
int* end_marker;
};

138
deps/v8/src/ia32/codegen-ia32.cc

@ -832,7 +832,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
Comment cmnt(masm_, "ONLY_INTEGER_32");
if (FLAG_debug_code) {
Label ok;
__ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
__ AbortIfNotNumber(value.reg());
__ test(value.reg(), Immediate(kSmiTagMask));
__ j(zero, &ok);
__ fldz();
@ -852,7 +852,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
Comment cmnt(masm_, "ONLY_NUMBER");
// Fast case if NumberInfo indicates only numbers.
if (FLAG_debug_code) {
__ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
__ AbortIfNotNumber(value.reg());
}
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
@ -1038,6 +1038,8 @@ void DeferredInlineBinaryOperation::Generate() {
__ jmp(&load_right);
__ bind(&left_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left_);
}
__ SmiUntag(left_);
__ cvtsi2sd(xmm0, Operand(left_));
@ -1070,6 +1072,8 @@ void DeferredInlineBinaryOperation::Generate() {
__ jmp(&do_op);
__ bind(&right_smi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right_);
}
__ SmiUntag(right_);
__ cvtsi2sd(xmm1, Operand(right_));
@ -1575,7 +1579,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
overwrite_mode);
Label do_op, left_nonsmi;
// if right is a smi we make a fast case if left is either a smi
// If right is a smi we make a fast case if left is either a smi
// or a heapnumber.
if (CpuFeatures::IsSupported(SSE2) && right->number_info().IsSmi()) {
CpuFeatures::Scope use_sse2(SSE2);
@ -1584,7 +1588,10 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
if (!left->number_info().IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, &left_nonsmi);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
}
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
__ SmiUntag(answer.reg());
__ jmp(&do_op);
@ -2003,6 +2010,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (!operand->number_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
answer = *operand;
@ -2040,6 +2049,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (!operand->number_info().IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
}
deferred->BindExit();
operand->Unuse();
@ -2073,6 +2084,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
deferred->BindExit();
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
if (shift_value > 0) {
__ sar(operand->reg(), shift_value);
__ and_(operand->reg(), ~kSmiTagMask);
@ -2104,6 +2116,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (!operand->number_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
__ SmiUntag(answer.reg());
@ -2152,6 +2166,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ sar(ecx, kSmiTagSize);
if (!right.number_info().IsSmi()) {
deferred->Branch(carry);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right.reg());
}
__ shl_cl(answer.reg());
__ cmp(answer.reg(), 0xc0000000);
@ -2192,6 +2208,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (!operand->number_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
}
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
@ -2234,6 +2252,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (!operand->number_info().IsSmi()) {
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(operand->reg());
}
if (op == Token::BIT_AND) {
__ and_(Operand(operand->reg()), Immediate(value));
@ -2427,9 +2447,7 @@ void CodeGenerator::Comparison(AstNode* node,
// by reconstituting them on the non-fall-through path.
if (left_side.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left_side.reg(), "Argument not a smi");
}
if (FLAG_debug_code) __ AbortIfNotSmi(left_side.reg());
} else {
JumpTarget is_smi;
__ test(left_side.reg(), Immediate(kSmiTagMask));
@ -3634,6 +3652,26 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
}
void CodeGenerator::SetTypeForStackSlot(Slot* slot, NumberInfo info) {
ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
if (slot->type() == Slot::LOCAL) {
frame_->SetTypeForLocalAt(slot->index(), info);
} else {
frame_->SetTypeForParamAt(slot->index(), info);
}
if (FLAG_debug_code && info.IsSmi()) {
if (slot->type() == Slot::LOCAL) {
frame_->PushLocalAt(slot->index());
} else {
frame_->PushParameterAt(slot->index());
}
Result var = frame_->Pop();
var.ToRegister();
__ AbortIfNotSmi(var.reg());
}
}
void CodeGenerator::VisitForStatement(ForStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ ForStatement");
@ -3727,21 +3765,14 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
// If we have (a) a loop with a compile-time constant trip count
// and (b) the loop induction variable is not assignend inside the
// loop we update the number type of the induction variable to be smi.
// We know that the loop index is a smi if it is not modified in the
// loop body and it is checked against a constant limit in the loop
// condition. In this case, we reset the static type information of the
// loop index to smi before compiling the body, the update expression, and
// the bottom check of the loop condition.
if (node->is_fast_smi_loop()) {
// Set number type of the loop variable to smi.
Slot* slot = node->loop_variable()->slot();
ASSERT(slot->type() == Slot::LOCAL);
frame_->SetTypeForLocalAt(slot->index(), NumberInfo::Smi());
if (FLAG_debug_code) {
frame_->PushLocalAt(slot->index());
Result var = frame_->Pop();
var.ToRegister();
__ AbortIfNotSmi(var.reg(), "Loop variable not a smi.");
}
SetTypeForStackSlot(node->loop_variable()->slot(), NumberInfo::Smi());
}
Visit(node->body());
@ -3763,6 +3794,13 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
}
}
// Set the type of the loop variable to smi before compiling the test
// expression if we are in a fast smi loop condition.
if (node->is_fast_smi_loop() && has_valid_frame()) {
// Set number type of the loop variable to smi.
SetTypeForStackSlot(node->loop_variable()->slot(), NumberInfo::Smi());
}
// Based on the condition analysis, compile the backward jump as
// necessary.
switch (info) {
@ -6676,9 +6714,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
NumberInfo operand_info = operand.number_info();
operand.ToRegister();
if (operand_info.IsSmi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(operand.reg(), "Operand not a smi.");
}
if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
frame_->Spill(operand.reg());
// Set smi tag bit. It will be reset by the not operation.
__ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
@ -6867,15 +6903,18 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
old_value = allocator_->Allocate();
ASSERT(old_value.is_valid());
__ mov(old_value.reg(), new_value.reg());
// The return value for postfix operations is the
// same as the input, and has the same number info.
old_value.set_number_info(new_value.number_info());
}
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
Result tmp;
if (new_value.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(new_value.reg(), "Operand not a smi");
}
if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
} else {
// We don't know statically if the input is a smi.
// In order to combine the overflow and the smi tag check, we need
@ -6931,6 +6970,13 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
}
deferred->BindExit();
// The result of ++ or -- is an Integer32 if the
// input is a smi. Otherwise it is a number.
if (new_value.is_smi()) {
new_value.set_number_info(NumberInfo::Integer32());
} else {
new_value.set_number_info(NumberInfo::Number());
}
// Postfix: store the old value in the allocated slot under the
// reference.
@ -7832,6 +7878,8 @@ Result CodeGenerator::EmitKeyedLoad() {
if (!key.is_smi()) {
__ test(key.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
}
// Get the elements array from the receiver and check that it
@ -7985,6 +8033,8 @@ static void CheckTwoForSminess(MacroAssembler* masm,
if (!left_info.IsSmi()) {
__ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
}
} else if (!left_info.IsSmi()) {
if (!right_info.IsSmi()) {
@ -7995,11 +8045,15 @@ static void CheckTwoForSminess(MacroAssembler* masm,
} else {
__ test(left, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(left);
if (!right_info.IsSmi()) {
__ test(right, Immediate(kSmiTagMask));
deferred->Branch(not_zero);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(right);
}
}
}
@ -8506,6 +8560,10 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
}
if (static_operands_type_.IsSmi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(left);
__ AbortIfNotSmi(right);
}
if (op_ == Token::BIT_OR) {
__ or_(right, Operand(left));
GenerateReturn(masm);
@ -8860,12 +8918,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(edx,
"GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(eax,
"GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(edx);
__ AbortIfNotNumber(eax);
}
if (static_operands_type_.IsSmi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(edx);
__ AbortIfNotSmi(eax);
}
FloatingPointHelper::LoadSSE2Smis(masm, ecx);
} else {
FloatingPointHelper::LoadSSE2Operands(masm);
@ -8888,10 +8948,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(edx,
"GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(eax,
"GenericBinaryOpStub operand not a number.");
__ AbortIfNotNumber(edx);
__ AbortIfNotNumber(eax);
}
} else {
FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
@ -9621,6 +9679,8 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
if (!number_info.IsSmi()) {
__ test(edx, Immediate(kSmiTagMask));
__ j(not_zero, &arg1_is_object);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(edx);
}
__ SmiUntag(edx);
__ jmp(&load_arg2);
@ -9639,6 +9699,8 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
if (!number_info.IsSmi()) {
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &arg2_is_object);
} else {
if (FLAG_debug_code) __ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
__ mov(ecx, eax);
@ -10526,15 +10588,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ j(negative, &done);
// Read the value from the static offsets vector buffer.
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
// Perform explicit shift
ASSERT_EQ(0, kSmiTag);
__ shl(edi, kSmiTagSize);
// Add previous index (from its stack slot) if value is not negative.
Label capture_negative;
// Carry flag set by shift above.
__ j(negative, &capture_negative, not_taken);
__ add(edi, Operand(eax)); // Add previous index (adding smi to smi).
__ bind(&capture_negative);
__ SmiTag(edi);
// Store the smi value in the last match info.
__ mov(FieldOperand(ebx,
edx,

6
deps/v8/src/ia32/codegen-ia32.h

@ -500,7 +500,7 @@ class CodeGenerator: public AstVisitor {
bool FoldConstantSmis(Token::Value op, int left, int right);
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
// smi and a likely smi. Consumes the Result operand.
Result ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
@ -511,7 +511,7 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
// Consumes the Results left and right.
Result LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
@ -652,6 +652,8 @@ class CodeGenerator: public AstVisitor {
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
void SetTypeForStackSlot(Slot* slot, NumberInfo info);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should
// be no frame-external references to (non-reserved) registers.

17
deps/v8/src/ia32/fast-codegen-ia32.cc

@ -436,9 +436,6 @@ Handle<Code> FastCodeGenerator::MakeCode(CompilationInfo* info) {
AstLabeler labeler;
labeler.Label(info);
LivenessAnalyzer analyzer;
analyzer.Analyze(info->function());
CodeGenerator::MakeCodePrologue(info);
const int kInitialBufferSize = 4 * KB;
@ -802,8 +799,8 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm(), ";; Global");
if (FLAG_print_ir) {
SmartPointer<char> name = expr->name()->ToCString();
PrintF("%d: t%d = Global(%s) // last_use = %d\n", expr->num(),
expr->num(), *name, expr->var_def()->last_use()->num());
PrintF("%d: t%d = Global(%s)\n", expr->num(),
expr->num(), *name);
}
EmitGlobalVariableLoad(cell);
}
@ -857,9 +854,8 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: ", expr->num());
if (!destination().is(no_reg)) PrintF("t%d = ", expr->num());
PrintF("Store(this, \"%s\", t%d) // last_use(this) = %d\n", *name_string,
expr->value()->num(),
expr->var_def()->last_use()->num());
PrintF("Store(this, \"%s\", t%d)\n", *name_string,
expr->value()->num());
}
EmitThisPropertyStore(name);
@ -882,9 +878,8 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm(), ";; Load from this");
if (FLAG_print_ir) {
SmartPointer<char> name_string = name->ToCString();
PrintF("%d: t%d = Load(this, \"%s\") // last_use(this) = %d\n",
expr->num(), expr->num(), *name_string,
expr->var_def()->last_use()->num());
PrintF("%d: t%d = Load(this, \"%s\")\n",
expr->num(), expr->num(), *name_string);
}
EmitThisPropertyLoad(name);
}

10
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -351,20 +351,20 @@ void MacroAssembler::FCmp() {
}
void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
void MacroAssembler::AbortIfNotNumber(Register object) {
Label ok;
test(object, Immediate(kSmiTagMask));
j(zero, &ok);
cmp(FieldOperand(object, HeapObject::kMapOffset),
Factory::heap_number_map());
Assert(equal, msg);
Assert(equal, "Operand not a number");
bind(&ok);
}
void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
void MacroAssembler::AbortIfNotSmi(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(equal, msg);
Assert(equal, "Operand not a smi");
}
@ -1553,7 +1553,7 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
Label *failure) {
Label* failure) {
if (!scratch.is(instance_type)) {
mov(scratch, instance_type);
}

11
deps/v8/src/ia32/macro-assembler-ia32.h

@ -182,17 +182,18 @@ class MacroAssembler: public Assembler {
// Smi tagging support.
void SmiTag(Register reg) {
ASSERT(kSmiTag == 0);
shl(reg, kSmiTagSize);
ASSERT(kSmiTagSize == 1);
add(reg, Operand(reg));
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
}
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object, const char* msg);
void AbortIfNotNumber(Register object);
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object, const char* msg);
void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// Exception handling
@ -476,7 +477,7 @@ class MacroAssembler: public Assembler {
// for both instance type and scratch.
void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
Register scratch,
Label *on_not_flat_ascii_string);
Label* on_not_flat_ascii_string);
// Checks if both objects are sequential ASCII strings, and jumps to label
// if either is not.
@ -484,7 +485,7 @@ class MacroAssembler: public Assembler {
Register object2,
Register scratch1,
Register scratch2,
Label *on_not_flat_ascii_strings);
Label* on_not_flat_ascii_strings);
private:
bool generating_stub_;

23
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -653,6 +653,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ j(not_zero, &exit_label_);
__ bind(&stack_ok);
// Load start index for later use.
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
__ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
@ -662,17 +664,23 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(edi, Operand(ebp, kInputStart));
// Set up edi to be negative offset from string end.
__ sub(edi, Operand(esi));
// Set eax to address of char before start of input
// Set eax to address of char before start of the string.
// (effectively string position -1).
__ lea(eax, Operand(edi, -char_size()));
__ neg(ebx);
if (mode_ == UC16) {
__ lea(eax, Operand(edi, ebx, times_2, -char_size()));
} else {
__ lea(eax, Operand(edi, ebx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
__ mov(Operand(ebp, kInputStartMinusOne), eax);
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
__ mov(ebx, Operand(ebp, kStartIndex));
__ xor_(Operand(ecx), ecx); // setcc only operates on cl (lower byte of ecx).
// Register ebx still holds -stringIndex.
__ test(ebx, Operand(ebx));
__ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
__ mov(Operand(ebp, kAtStart), ecx);
@ -721,10 +729,17 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// copy captures to output
__ mov(ebx, Operand(ebp, kRegisterOutput));
__ mov(ecx, Operand(ebp, kInputEnd));
__ mov(edx, Operand(ebp, kStartIndex));
__ sub(ecx, Operand(ebp, kInputStart));
if (mode_ == UC16) {
__ lea(ecx, Operand(ecx, edx, times_2, 0));
} else {
__ add(ecx, Operand(edx));
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
__ add(eax, Operand(ecx)); // Convert to index from start, not end.
// Convert to index from start of string, not end.
__ add(eax, Operand(ecx));
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}

3
deps/v8/src/ia32/virtual-frame-ia32.h

@ -446,8 +446,9 @@ class VirtualFrame: public ZoneObject {
return true;
}
// Update the type information of a local variable frame element directly.
// Update the type information of a variable frame element directly.
inline void SetTypeForLocalAt(int index, NumberInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;

214
deps/v8/src/jsregexp.cc

@ -149,7 +149,7 @@ Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> atom_string = Factory::NewStringFromTwoByte(atom_pattern);
AtomCompile(re, pattern, flags, atom_string);
} else {
IrregexpPrepare(re, pattern, flags, parse_result.capture_count);
IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
}
ASSERT(re->data()->IsFixedArray());
// Compilation succeeded so the data is set on the regexp
@ -341,10 +341,10 @@ Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
}
void RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags,
int capture_count) {
void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags,
int capture_count) {
// Initialize compiled code entries to null.
Factory::SetRegExpIrregexpData(re,
JSRegExp::IRREGEXP,
@ -354,6 +354,94 @@ void RegExpImpl::IrregexpPrepare(Handle<JSRegExp> re,
}
int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject) {
if (!subject->IsFlat()) {
FlattenString(subject);
}
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
return -1;
}
#ifdef V8_NATIVE_REGEXP
// Native regexp only needs room to output captures. Registers are handled
// internally.
return (IrregexpNumberOfCaptures(FixedArray::cast(regexp->data())) + 1) * 2;
#else // !V8_NATIVE_REGEXP
// Byte-code regexp needs space allocated for all its registers.
return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
#endif // V8_NATIVE_REGEXP
}
RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0);
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
#ifdef V8_NATIVE_REGEXP
ASSERT(output.length() >=
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
do {
bool is_ascii = subject->IsAsciiRepresentation();
Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii));
NativeRegExpMacroAssembler::Result res =
NativeRegExpMacroAssembler::Match(code,
subject,
output.start(),
output.length(),
index);
if (res != NativeRegExpMacroAssembler::RETRY) {
ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
Top::has_pending_exception());
STATIC_ASSERT(
static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
STATIC_ASSERT(
static_cast<int>(NativeRegExpMacroAssembler::FAILURE) == RE_FAILURE);
STATIC_ASSERT(static_cast<int>(NativeRegExpMacroAssembler::EXCEPTION)
== RE_EXCEPTION);
return static_cast<IrregexpResult>(res);
}
// If result is RETRY, the string has changed representation, and we
// must restart from scratch.
// In this case, it means we must make sure we are prepared to handle
// the, potentially, differen subject (the string can switch between
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
#else // ndef V8_NATIVE_REGEXP
ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
bool is_ascii = subject->IsAsciiRepresentation();
// We must have done EnsureCompiledIrregexp, so we can get the number of
// registers.
int* register_vector = output.start();
int number_of_capture_registers =
(IrregexpNumberOfCaptures(*irregexp) + 1) * 2;
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
register_vector[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii));
if (IrregexpInterpreter::Match(byte_codes,
subject,
register_vector,
index)) {
return RE_SUCCESS;
}
return RE_FAILURE;
#endif // ndef V8_NATIVE_REGEXP
}
Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<String> subject,
int previous_index,
@ -361,9 +449,6 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
// Prepare space for the return values.
int number_of_capture_registers =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
#ifndef V8_NATIVE_REGEXP
#ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) {
@ -373,101 +458,42 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
}
#endif
#endif
if (!subject->IsFlat()) {
FlattenString(subject);
}
last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
Handle<FixedArray> array;
// Dispatch to the correct RegExp implementation.
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
#ifdef V8_NATIVE_REGEXP
OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector();
NativeRegExpMacroAssembler::Result res;
do {
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
}
Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
res = NativeRegExpMacroAssembler::Match(code,
subject,
captures_vector,
captures.length(),
previous_index);
// If result is RETRY, the string have changed representation, and we
// must restart from scratch.
} while (res == NativeRegExpMacroAssembler::RETRY);
if (res == NativeRegExpMacroAssembler::EXCEPTION) {
int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
if (required_registers < 0) {
// Compiling failed with an exception.
ASSERT(Top::has_pending_exception());
return Handle<Object>::null();
}
ASSERT(res == NativeRegExpMacroAssembler::SUCCESS
|| res == NativeRegExpMacroAssembler::FAILURE);
if (res != NativeRegExpMacroAssembler::SUCCESS) return Factory::null_value();
OffsetsVector registers(required_registers);
array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
// Capture values are relative to start_offset only.
// Convert them to be relative to start of string.
if (captures_vector[i] >= 0) {
captures_vector[i] += previous_index;
}
if (captures_vector[i + 1] >= 0) {
captures_vector[i + 1] += previous_index;
IrregexpResult res = IrregexpExecOnce(jsregexp,
subject,
previous_index,
Vector<int>(registers.vector(),
registers.length()));
if (res == RE_SUCCESS) {
int capture_register_count =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
AssertNoAllocation no_gc;
int* register_vector = registers.vector();
FixedArray* array = FixedArray::cast(last_match_info->elements());
for (int i = 0; i < capture_register_count; i += 2) {
SetCapture(array, i, register_vector[i]);
SetCapture(array, i + 1, register_vector[i + 1]);
}
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
SetLastCaptureCount(array, capture_register_count);
SetLastSubject(array, *subject);
SetLastInput(array, *subject);
return last_match_info;
}
#else // ! V8_NATIVE_REGEXP
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
if (res == RE_EXCEPTION) {
ASSERT(Top::has_pending_exception());
return Handle<Object>::null();
}
// Now that we have done EnsureCompiledIrregexp we can get the number of
// registers.
int number_of_registers =
IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
OffsetsVector registers(number_of_registers);
int* register_vector = registers.vector();
for (int i = number_of_capture_registers - 1; i >= 0; i--) {
register_vector[i] = -1;
}
Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
if (!IrregexpInterpreter::Match(byte_codes,
subject,
register_vector,
previous_index)) {
return Factory::null_value();
}
array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
SetCapture(*array, i, register_vector[i]);
SetCapture(*array, i + 1, register_vector[i + 1]);
}
#endif // V8_NATIVE_REGEXP
SetLastCaptureCount(*array, number_of_capture_registers);
SetLastSubject(*array, *subject);
SetLastInput(*array, *subject);
return last_match_info;
ASSERT(res == RE_FAILURE);
return Factory::null_value();
}

31
deps/v8/src/jsregexp.h

@ -77,10 +77,10 @@ class RegExpImpl {
Handle<JSArray> lastMatchInfo);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpPrepare(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags,
int capture_register_count);
static void IrregexpInitialize(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags,
int capture_register_count);
static void AtomCompile(Handle<JSRegExp> re,
@ -93,6 +93,29 @@ class RegExpImpl {
int index,
Handle<JSArray> lastMatchInfo);
enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
// Prepare a RegExp for being executed one or more times (using
// IrregexpExecOnce) on the subject.
// This ensures that the regexp is compiled for the subject, and that
// the subject is flat.
// Returns the number of integer spaces required by IrregexpExecOnce
// as its "registers" argument. If the regexp cannot be compiled,
// an exception is set as pending, and this function returns negative.
static int IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject);
// Execute a regular expression once on the subject, starting from
// character "index".
// If successful, returns RE_SUCCESS and set the capture positions
// in the first registers.
// If matching fails, returns RE_FAILURE.
// If execution fails, sets a pending exception and returns RE_EXCEPTION.
static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int32_t> registers);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing
// captured positions. On a failure, the result is the null value.

32
deps/v8/src/liveedit.cc

@ -391,6 +391,26 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
ZoneList<RelocInfo> reloc_infos_;
};
class FrameCookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::CookFramesForThread(top);
}
};
class FrameUncookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::UncookFramesForThread(top);
}
};
static void IterateAllThreads(ThreadVisitor* visitor) {
Top::IterateThread(visitor);
ThreadManager::IterateThreads(visitor);
}
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
@ -405,9 +425,15 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
ThreadManager::MarkCompactPrologue(true);
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
ThreadManager::MarkCompactEpilogue(true);
{
FrameCookingThreadVisitor cooking_visitor;
IterateAllThreads(&cooking_visitor);
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
FrameUncookingThreadVisitor uncooking_visitor;
IterateAllThreads(&uncooking_visitor);
}
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.

2
deps/v8/src/math.js

@ -171,7 +171,7 @@ function MathRandom() {
// ECMA 262 - 15.8.2.15
function MathRound(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_round(x);
return %RoundNumber(x);
}
// ECMA 262 - 15.8.2.16

11
deps/v8/src/objects-inl.h

@ -1121,6 +1121,17 @@ void HeapNumber::set_value(double value) {
}
int HeapNumber::get_exponent() {
return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
kExponentShift) - kExponentBias;
}
int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)

3
deps/v8/src/objects.h

@ -1094,6 +1094,9 @@ class HeapNumber: public HeapObject {
void HeapNumberVerify();
#endif
inline int get_exponent();
inline int get_sign();
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second

4
deps/v8/src/parser.cc

@ -1585,13 +1585,15 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder {
}
void HandleThisPropertyAssignment(Scope* scope, Assignment* assignment) {
// Check that the property assigned to is a named property.
// Check that the property assigned to is a named property, which is not
// __proto__.
Property* property = assignment->target()->AsProperty();
ASSERT(property != NULL);
Literal* literal = property->key()->AsLiteral();
uint32_t dummy;
if (literal != NULL &&
literal->handle()->IsString() &&
!String::cast(*(literal->handle()))->Equals(Heap::Proto_symbol()) &&
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
Handle<String> key = Handle<String>::cast(literal->handle());

6
deps/v8/src/platform.h

@ -114,6 +114,10 @@ int random();
namespace v8 {
namespace internal {
// Use AtomicWord for a machine-sized pointer. It is assumed that
// reads and writes of naturally aligned values of this type are atomic.
typedef intptr_t AtomicWord;
class Semaphore;
double ceiling(double x);
@ -525,7 +529,7 @@ class TickSample {
Address function; // The last called JS function.
StateTag state; // The state of the VM.
static const int kMaxFramesCount = 100;
EmbeddedVector<Address, kMaxFramesCount> stack; // Call stack.
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.
};

2461
deps/v8/src/powers-ten.h

File diff suppressed because it is too large

29
deps/v8/src/profile-generator-inl.h

@ -34,6 +34,17 @@ namespace v8 {
namespace internal {
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name,
const char* resource_name,
int line_number)
: tag_(tag),
name_(name),
resource_name_(resource_name),
line_number_(line_number) {
}
bool CodeEntry::is_js_function() {
return tag_ == Logger::FUNCTION_TAG
|| tag_ == Logger::LAZY_COMPILE_TAG
@ -41,24 +52,6 @@ bool CodeEntry::is_js_function() {
}
StaticNameCodeEntry::StaticNameCodeEntry(Logger::LogEventsAndTags tag,
const char* name)
: CodeEntry(tag),
name_(name) {
}
ManagedNameCodeEntry::ManagedNameCodeEntry(Logger::LogEventsAndTags tag,
String* name,
const char* resource_name,
int line_number)
: CodeEntry(tag),
name_(name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach()),
resource_name_(resource_name),
line_number_(line_number) {
}
ProfileNode::ProfileNode(CodeEntry* entry)
: entry_(entry),
total_ticks_(0),

157
deps/v8/src/profile-generator.cc

@ -53,6 +53,15 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
}
void ProfileNode::GetChildren(List<ProfileNode*>* children) {
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
children->Add(reinterpret_cast<ProfileNode*>(p->value));
}
}
void ProfileNode::Print(int indent) {
OS::Print("%4u %4u %*c %s\n",
total_ticks_, self_ticks_,
@ -233,63 +242,143 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
}
ProfileGenerator::ProfileGenerator()
: resource_names_(StringsMatch) {
CpuProfilesCollection::CpuProfilesCollection()
: function_and_resource_names_(StringsMatch) {
}
static void CodeEntriesDeleter(CodeEntry** entry_ptr) {
static void DeleteArgsCountName(char** name_ptr) {
DeleteArray(*name_ptr);
}
static void DeleteCodeEntry(CodeEntry** entry_ptr) {
delete *entry_ptr;
}
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
delete *profile_ptr;
}
ProfileGenerator::~ProfileGenerator() {
for (HashMap::Entry* p = resource_names_.Start();
CpuProfilesCollection::~CpuProfilesCollection() {
profiles_.Iterate(DeleteCpuProfile);
code_entries_.Iterate(DeleteCodeEntry);
args_count_names_.Iterate(DeleteArgsCountName);
for (HashMap::Entry* p = function_and_resource_names_.Start();
p != NULL;
p = resource_names_.Next(p)) {
p = function_and_resource_names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
}
code_entries_.Iterate(CodeEntriesDeleter);
void CpuProfilesCollection::AddProfile(unsigned uid) {
profiles_.Add(new CpuProfile());
}
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
String* name,
String* resource_name,
int line_number) {
CodeEntry* entry = new CodeEntry(tag,
GetName(name),
GetName(resource_name),
line_number);
code_entries_.Add(entry);
return entry;
}
CodeEntry* ProfileGenerator::NewCodeEntry(
Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number) {
const char* cached_resource_name = NULL;
if (resource_name->IsString()) {
// As we copy contents of resource names, and usually they are repeated,
// we cache names by string hashcode.
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name) {
CodeEntry* entry = new CodeEntry(tag, name, "", 0);
code_entries_.Add(entry);
return entry;
}
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count) {
CodeEntry* entry = new CodeEntry(tag, GetName(args_count), "", 0);
code_entries_.Add(entry);
return entry;
}
const char* CpuProfilesCollection::GetName(String* name) {
if (name->IsString()) {
char* c_name =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach();
HashMap::Entry* cache_entry =
resource_names_.Lookup(resource_name,
StringEntryHash(resource_name),
true);
function_and_resource_names_.Lookup(c_name,
name->Hash(),
true);
if (cache_entry->value == NULL) {
// New entry added.
cache_entry->value =
resource_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL).Detach();
cache_entry->value = c_name;
} else {
DeleteArray(c_name);
}
cached_resource_name = reinterpret_cast<const char*>(cache_entry->value);
return reinterpret_cast<const char*>(cache_entry->value);
} else {
return "";
}
}
CodeEntry* entry = new ManagedNameCodeEntry(tag,
name,
cached_resource_name,
line_number);
code_entries_.Add(entry);
return entry;
const char* CpuProfilesCollection::GetName(int args_count) {
ASSERT(args_count >= 0);
if (args_count_names_.length() <= args_count) {
args_count_names_.AddBlock(
NULL, args_count - args_count_names_.length() + 1);
}
if (args_count_names_[args_count] == NULL) {
const int kMaximumNameLength = 32;
char* name = NewArray<char>(kMaximumNameLength);
OS::SNPrintF(Vector<char>(name, kMaximumNameLength),
"args_count: %d", args_count);
args_count_names_[args_count] = name;
}
return args_count_names_[args_count];
}
CodeEntry* ProfileGenerator::NewCodeEntry(
Logger::LogEventsAndTags tag,
const char* name) {
CodeEntry* entry = new StaticNameCodeEntry(tag, name);
code_entries_.Add(entry);
return entry;
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles) {
}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Allocate space for stack frames + pc + function.
ScopedVector<CodeEntry*> entries(sample.frames_count + 2);
CodeEntry** entry = entries.start();
*entry++ = code_map_.FindEntry(sample.pc);
if (sample.function != NULL) {
*entry = code_map_.FindEntry(sample.function);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
} else {
CodeEntry* pc_entry = *entries.start();
if (pc_entry == NULL || pc_entry->is_js_function())
*entry = NULL;
}
entry++;
} else {
*entry++ = NULL;
}
for (const Address *stack_pos = sample.stack,
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end;
++stack_pos) {
*entry++ = code_map_.FindEntry(*stack_pos);
}
profile()->AddPath(entries);
}
} } // namespace v8::internal

122
deps/v8/src/profile-generator.h

@ -36,50 +36,22 @@ namespace internal {
class CodeEntry {
public:
virtual ~CodeEntry() { }
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag_,
const char* name_,
const char* resource_name_,
int line_number_));
virtual const char* name() = 0;
INLINE(bool is_js_function());
protected:
INLINE(explicit CodeEntry(Logger::LogEventsAndTags tag))
: tag_(tag) { }
INLINE(const char* name()) { return name_; }
private:
Logger::LogEventsAndTags tag_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
class StaticNameCodeEntry : public CodeEntry {
public:
INLINE(StaticNameCodeEntry(Logger::LogEventsAndTags tag,
const char* name));
INLINE(virtual const char* name()) { return name_ != NULL ? name_ : ""; }
private:
const char* name_;
DISALLOW_COPY_AND_ASSIGN(StaticNameCodeEntry);
};
class ManagedNameCodeEntry : public CodeEntry {
public:
INLINE(ManagedNameCodeEntry(Logger::LogEventsAndTags tag,
String* name,
const char* resource_name, int line_number));
INLINE(virtual const char* name()) { return !name_.is_empty() ? *name_ : ""; }
private:
SmartPointer<char> name_;
const char* resource_name_;
int line_number_;
DISALLOW_COPY_AND_ASSIGN(ManagedNameCodeEntry);
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@ -92,17 +64,19 @@ class ProfileNode {
INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(unsigned total_ticks()) { return total_ticks_; }
INLINE(unsigned self_ticks()) { return self_ticks_; }
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
void GetChildren(List<ProfileNode*>* children);
void Print(int indent);
private:
INLINE(static bool CodeEntriesMatch(void* key1, void* key2)) {
return key1 == key2;
INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) {
return entry1 == entry2;
}
INLINE(static bool CodeEntryHash(CodeEntry* entry)) {
INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
}
@ -144,13 +118,16 @@ class ProfileTree BASE_EMBEDDED {
};
class CpuProfile BASE_EMBEDDED {
class CpuProfile {
public:
CpuProfile() { }
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
INLINE(ProfileTree* top_down()) { return &top_down_; }
INLINE(ProfileTree* bottom_up()) { return &bottom_up_; }
void ShortPrint();
void Print();
@ -196,33 +173,70 @@ class CodeMap BASE_EMBEDDED {
};
class ProfileGenerator {
class CpuProfilesCollection {
public:
ProfileGenerator();
~ProfileGenerator();
CpuProfilesCollection();
~CpuProfilesCollection();
void AddProfile(unsigned uid);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
INLINE(CpuProfile* profile()) { return &profile_; }
INLINE(CodeMap* code_map()) { return &code_map_; }
INLINE(CpuProfile* profile()) { return profiles_.last(); }
private:
const char* GetName(String* name);
const char* GetName(int args_count);
INLINE(static bool StringsMatch(void* key1, void* key2)) {
return key1 == key2;
return strcmp(reinterpret_cast<char*>(key1),
reinterpret_cast<char*>(key2)) == 0;
}
INLINE(static bool StringEntryHash(String* entry)) {
return entry->Hash();
// String::Hash -> const char*
HashMap function_and_resource_names_;
// args_count -> char*
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<CpuProfile*> profiles_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
class ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles);
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name,
String* resource_name,
int line_number)) {
return profiles_->NewCodeEntry(tag, name, resource_name, line_number);
}
CpuProfile profile_;
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name)) {
return profiles_->NewCodeEntry(tag, name);
}
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count)) {
return profiles_->NewCodeEntry(tag, args_count);
}
void RecordTickSample(const TickSample& sample);
INLINE(CodeMap* code_map()) { return &code_map_; }
private:
INLINE(CpuProfile* profile()) { return profiles_->profile(); }
CpuProfilesCollection* profiles_;
CodeMap code_map_;
typedef List<CodeEntry*> CodeEntryList;
CodeEntryList code_entries_;
// String::Hash -> const char*
HashMap resource_names_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};

4
deps/v8/src/rewriter.cc

@ -247,7 +247,9 @@ void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
}
if (FLAG_safe_int32_compiler) {
if (var->IsStackAllocated() && !var->is_arguments()) {
if (var->IsStackAllocated() &&
!var->is_arguments() &&
var->mode() != Variable::CONST) {
node->set_side_effect_free(true);
}
}

83
deps/v8/src/runtime.cc

@ -2353,6 +2353,14 @@ template <typename schar>
static inline int SingleCharIndexOf(Vector<const schar> string,
schar pattern_char,
int start_index) {
if (sizeof(schar) == 1) {
const schar* pos = reinterpret_cast<const schar*>(
memchr(string.start() + start_index,
pattern_char,
string.length() - start_index));
if (pos == NULL) return -1;
return pos - string.start();
}
for (int i = start_index, n = string.length(); i < n; i++) {
if (pattern_char == string[i]) {
return i;
@ -2400,7 +2408,19 @@ static int SimpleIndexOf(Vector<const schar> subject,
*complete = false;
return i;
}
if (subject[i] != pattern_first_char) continue;
if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
const schar* pos = reinterpret_cast<const schar*>(
memchr(subject.start() + i,
pattern_first_char,
n - i + 1));
if (pos == NULL) {
*complete = true;
return -1;
}
i = pos - subject.start();
} else {
if (subject[i] != pattern_first_char) continue;
}
int j = 1;
do {
if (pattern[j] != subject[i+j]) {
@ -2425,7 +2445,16 @@ static int SimpleIndexOf(Vector<const schar> subject,
int idx) {
pchar pattern_first_char = pattern[0];
for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
if (subject[i] != pattern_first_char) continue;
if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
const schar* pos = reinterpret_cast<const schar*>(
memchr(subject.start() + i,
pattern_first_char,
n - i + 1));
if (pos == NULL) return -1;
i = pos - subject.start();
} else {
if (subject[i] != pattern_first_char) continue;
}
int j = 1;
do {
if (pattern[j] != subject[i+j]) {
@ -5321,16 +5350,38 @@ static Object* Runtime_Math_pow_cfunction(Arguments args) {
}
static Object* Runtime_Math_round(Arguments args) {
static Object* Runtime_RoundNumber(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
Counters::math_round.Increment();
CONVERT_DOUBLE_CHECKED(x, args[0]);
if (signbit(x) && x >= -0.5) return Heap::minus_zero_value();
double integer = ceil(x);
if (integer - x > 0.5) { integer -= 1.0; }
return Heap::NumberFromDouble(integer);
if (!args[0]->IsHeapNumber()) {
// Must be smi. Return the argument unchanged for all the other types
// to make fuzz-natives test happy.
return args[0];
}
HeapNumber* number = reinterpret_cast<HeapNumber*>(args[0]);
double value = number->value();
int exponent = number->get_exponent();
int sign = number->get_sign();
// We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
// should be rounded to 2^30, which is not smi.
if (!sign && exponent <= kSmiValueSize - 3) {
return Smi::FromInt(static_cast<int>(value + 0.5));
}
// If the magnitude is big enough, there's no place for fraction part. If we
// try to add 0.5 to this number, 1.0 will be added instead.
if (exponent >= 52) {
return number;
}
if (sign && value >= -0.5) return Heap::minus_zero_value();
return Heap::NumberFromDouble(floor(value + 0.5));
}
@ -5635,7 +5686,7 @@ static const char kMonthInYear[] = {
// This function works for dates from 1970 to 2099.
static inline void DateYMDFromTimeAfter1970(int date,
int &year, int &month, int &day) {
int& year, int& month, int& day) {
#ifdef DEBUG
int save_date = date; // Need this for ASSERT in the end.
#endif
@ -5651,7 +5702,7 @@ static inline void DateYMDFromTimeAfter1970(int date,
static inline void DateYMDFromTimeSlow(int date,
int &year, int &month, int &day) {
int& year, int& month, int& day) {
#ifdef DEBUG
int save_date = date; // Need this for ASSERT in the end.
#endif
@ -5680,11 +5731,11 @@ static inline void DateYMDFromTimeSlow(int date,
bool is_leap = (!yd1 || yd2) && !yd3;
ASSERT(date >= -1);
ASSERT(is_leap || date >= 0);
ASSERT(date < 365 || is_leap && date < 366);
ASSERT(is_leap == (year % 4 == 0 && (year % 100 || (year % 400 == 0))));
ASSERT(is_leap || MakeDay(year, 0, 1) + date == save_date);
ASSERT(!is_leap || MakeDay(year, 0, 1) + date + 1 == save_date);
ASSERT(is_leap || (date >= 0));
ASSERT((date < 365) || (is_leap && (date < 366)));
ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
if (is_leap) {
day = kDayInYear[2*365 + 1 + date];
@ -5699,7 +5750,7 @@ static inline void DateYMDFromTimeSlow(int date,
static inline void DateYMDFromTime(int date,
int &year, int &month, int &day) {
int& year, int& month, int& day) {
if (date >= 0 && date < 32 * kDaysIn4Years) {
DateYMDFromTimeAfter1970(date, year, month, day);
} else {

2
deps/v8/src/runtime.h

@ -145,7 +145,7 @@ namespace internal {
F(Math_log, 1, 1) \
F(Math_pow, 2, 1) \
F(Math_pow_cfunction, 2, 1) \
F(Math_round, 1, 1) \
F(RoundNumber, 1, 1) \
F(Math_sin, 1, 1) \
F(Math_sqrt, 1, 1) \
F(Math_tan, 1, 1) \

4
deps/v8/src/serialize.cc

@ -477,7 +477,7 @@ int ExternalReferenceEncoder::IndexOf(Address key) const {
void ExternalReferenceEncoder::Put(Address key, int index) {
HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
entry->value = reinterpret_cast<void *>(index);
entry->value = reinterpret_cast<void*>(index);
}
@ -977,7 +977,7 @@ int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
// the startup snapshot that correspond to the elements of this cache array. On
// deserialization we therefore need to visit the cache array. This fills it up
// with pointers to deserialized objects.
void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
visitor->VisitPointers(
&partial_snapshot_cache_[0],
&partial_snapshot_cache_[partial_snapshot_cache_length_]);

11
deps/v8/src/top.cc

@ -88,6 +88,17 @@ char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
}
void Top::IterateThread(ThreadVisitor* v) {
v->VisitThread(&thread_local_);
}
void Top::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(thread);
}
void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
v->VisitPointer(&(thread->pending_exception_));
v->VisitPointer(&(thread->pending_message_obj_));

3
deps/v8/src/top.h

@ -40,6 +40,7 @@ namespace internal {
// Top has static variables used for JavaScript execution.
class SaveContext; // Forward declaration.
class ThreadVisitor; // Defined in v8threads.h
class ThreadLocalTop BASE_EMBEDDED {
public:
@ -319,6 +320,8 @@ class Top {
static void Iterate(ObjectVisitor* v);
static void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
static char* Iterate(ObjectVisitor* v, char* t);
static void IterateThread(ThreadVisitor* v);
static void IterateThread(ThreadVisitor* v, char* t);
// Returns the global object of the current context. It could be
// a builtin object, or a js global object.

1
deps/v8/src/utils.h

@ -341,7 +341,6 @@ class Vector {
// Releases the array underlying this vector. Once disposed the
// vector is empty.
void Dispose() {
if (is_empty()) return;
DeleteArray(start_);
start_ = NULL;
length_ = 0;

2
deps/v8/src/v8.cc

@ -43,7 +43,7 @@ bool V8::has_been_setup_ = false;
bool V8::has_been_disposed_ = false;
bool V8::has_fatal_error_ = false;
bool V8::Initialize(Deserializer *des) {
bool V8::Initialize(Deserializer* des) {
bool create_heap_objects = des == NULL;
if (has_been_disposed_ || has_fatal_error_) return false;
if (IsRunning()) return true;

11
deps/v8/src/v8threads.cc

@ -331,6 +331,17 @@ void ThreadManager::Iterate(ObjectVisitor* v) {
}
void ThreadManager::IterateThreads(ThreadVisitor* v) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;
state = state->Next()) {
char* data = state->data();
data += HandleScopeImplementer::ArchiveSpacePerThread();
Top::IterateThread(v, data);
}
}
void ThreadManager::MarkCompactPrologue(bool is_compacting) {
for (ThreadState* state = ThreadState::FirstInUse();
state != NULL;

15
deps/v8/src/v8threads.h

@ -79,6 +79,20 @@ class ThreadState {
};
// Defined in top.h
class ThreadLocalTop;
class ThreadVisitor {
public:
// ThreadLocalTop may be only available during this call.
virtual void VisitThread(ThreadLocalTop* top) = 0;
protected:
virtual ~ThreadVisitor() {}
};
class ThreadManager : public AllStatic {
public:
static void Lock();
@ -90,6 +104,7 @@ class ThreadManager : public AllStatic {
static bool IsArchived();
static void Iterate(ObjectVisitor* v);
static void IterateThreads(ThreadVisitor* v);
static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
#define BUILD_NUMBER 6
#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

5
deps/v8/src/virtual-frame-inl.h

@ -125,6 +125,11 @@ void VirtualFrame::SetTypeForLocalAt(int index, NumberInfo info) {
}
void VirtualFrame::SetTypeForParamAt(int index, NumberInfo info) {
elements_[param0_index() + index].set_number_info(info);
}
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_INL_H_

38
deps/v8/src/x64/assembler-x64.cc

@ -1030,6 +1030,22 @@ void Assembler::imull(Register dst, Register src) {
}
void Assembler::imull(Register dst, Register src, Immediate imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst, src);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
emit(imm.value_);
} else {
emit(0x69);
emit_modrm(dst, src);
emitl(imm.value_);
}
}
void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1190,6 +1206,15 @@ void Assembler::lea(Register dst, const Operand& src) {
}
void Assembler::leal(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst, src);
emit(0x8D);
emit_operand(dst, src);
}
void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1219,6 +1244,7 @@ void Assembler::movb(Register dst, const Operand& src) {
emit_operand(dst, src);
}
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1228,6 +1254,7 @@ void Assembler::movb(Register dst, Immediate imm) {
emit(imm.value_);
}
void Assembler::movb(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1236,6 +1263,7 @@ void Assembler::movb(const Operand& dst, Register src) {
emit_operand(src, dst);
}
void Assembler::movw(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1245,6 +1273,7 @@ void Assembler::movw(const Operand& dst, Register src) {
emit_operand(src, dst);
}
void Assembler::movl(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1600,6 +1629,15 @@ void Assembler::not_(const Operand& dst) {
}
void Assembler::notl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xF7);
emit_modrm(0x2, dst);
}
void Assembler::nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.

7
deps/v8/src/x64/assembler-x64.h

@ -742,14 +742,16 @@ class Assembler : public Malloced {
void imul(Register dst, Register src); // dst = dst * src.
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Multiply 32 bit registers
void imull(Register dst, Register src); // dst = dst * src.
// Signed 32-bit multiply instructions.
void imull(Register dst, Register src); // dst = dst * src.
void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
void incq(Register dst);
void incq(const Operand& dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
void leal(Register dst, const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
@ -760,6 +762,7 @@ class Assembler : public Malloced {
void not_(Register dst);
void not_(const Operand& dst);
void notl(Register dst);
void or_(Register dst, Register src) {
arithmetic_op(0x0B, dst, src);

368
deps/v8/src/x64/codegen-x64.cc

@ -7182,12 +7182,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi, &runtime);
// Add previous index (from its stack slot) if value is not negative.
Label capture_negative;
// Negative flag set by smi convertion above.
__ j(negative, &capture_negative);
__ SmiAdd(rdi, rdi, rax, &runtime); // Add previous index.
__ bind(&capture_negative);
// Store the smi value in the last match info.
__ movq(FieldOperand(rbx,
rdx,
@ -8408,14 +8402,15 @@ const char* GenericBinaryOpStub::GetName() {
}
OS::SNPrintF(Vector<char>(name_, len),
"GenericBinaryOpStub_%s_%s%s_%s%s_%s%s",
"GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
op_name,
overwrite_name,
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
args_in_registers_ ? "RegArgs" : "StackArgs",
args_reversed_ ? "_R" : "",
use_sse3_ ? "SSE3" : "SSE2",
operands_type_.ToString());
static_operands_type_.ToString(),
BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
@ -8565,8 +8560,8 @@ Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
// dividend in eax and edx free for the division. Use eax, ebx for those.
// 1. Move arguments into rdx, rax except for DIV and MOD, which need the
// dividend in rax and rdx free for the division. Use rax, rbx for those.
Comment load_comment(masm, "-- Load arguments");
Register left = rdx;
Register right = rax;
@ -8665,7 +8660,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break;
}
// 4. Emit return of result in eax.
// 4. Emit return of result in rax.
GenerateReturn(masm);
// 5. For some operations emit inline code to perform floating point
@ -8726,20 +8721,35 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
if (HasSmiCodeInStub()) {
if (ShouldGenerateSmiCode()) {
GenerateSmiCode(masm, &call_runtime);
} else if (op_ != Token::MOD) {
GenerateLoadArguments(masm);
if (!HasArgsInRegisters()) {
GenerateLoadArguments(masm);
}
}
// Floating point case.
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
// rax: y
// rdx: x
if (operands_type_.IsNumber()) {
if (ShouldGenerateFPCode()) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
HasSmiCodeInStub()) {
// Execution reaches this point when the first non-smi argument occurs
// (and only if smi code is generated). This is the right moment to
// patch to HEAP_NUMBERS state. The transition is attempted only for
// the four basic operations. The stub stays in the DEFAULT state
// forever for all other operations (also if smi code is skipped).
GenerateTypeTransition(masm);
}
Label not_floats;
// rax: y
// rdx: x
if (static_operands_type_.IsNumber()) {
if (FLAG_debug_code) {
// Assert at runtime that inputs are only numbers.
__ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
@ -8748,118 +8758,132 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} else {
FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
}
// Fast-case: Both operands are numbers.
// xmm4 and xmm5 are volatile XMM registers.
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
switch (op_) {
case Token::ADD: __ addsd(xmm4, xmm5); break;
case Token::SUB: __ subsd(xmm4, xmm5); break;
case Token::MUL: __ mulsd(xmm4, xmm5); break;
case Token::DIV: __ divsd(xmm4, xmm5); break;
default: UNREACHABLE();
}
// Allocate a heap number, if needed.
Label skip_allocation;
OverwriteMode mode = mode_;
if (HasArgsReversed()) {
if (mode == OVERWRITE_RIGHT) {
mode = OVERWRITE_LEFT;
} else if (mode == OVERWRITE_LEFT) {
mode = OVERWRITE_RIGHT;
// Fast-case: Both operands are numbers.
// xmm4 and xmm5 are volatile XMM registers.
FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
switch (op_) {
case Token::ADD: __ addsd(xmm4, xmm5); break;
case Token::SUB: __ subsd(xmm4, xmm5); break;
case Token::MUL: __ mulsd(xmm4, xmm5); break;
case Token::DIV: __ divsd(xmm4, xmm5); break;
default: UNREACHABLE();
}
}
switch (mode) {
case OVERWRITE_LEFT:
__ JumpIfNotSmi(rdx, &skip_allocation);
__ AllocateHeapNumber(rbx, rcx, &call_runtime);
__ movq(rdx, rbx);
__ bind(&skip_allocation);
__ movq(rax, rdx);
break;
case OVERWRITE_RIGHT:
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
// Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, &call_runtime);
__ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
GenerateReturn(masm);
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SAR:
case Token::SHL:
case Token::SHR: {
Label skip_allocation, non_smi_result;
FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: __ shrl_cl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
// Check if result is non-negative. This can only happen for a shift
// by zero, which also doesn't update the sign flag.
__ testl(rax, rax);
__ j(negative, &non_smi_result);
}
__ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax);
GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
if (op_ != Token::SHR && non_smi_result.is_linked()) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
switch (mode_) {
// Allocate a heap number, if needed.
Label skip_allocation;
OverwriteMode mode = mode_;
if (HasArgsReversed()) {
if (mode == OVERWRITE_RIGHT) {
mode = OVERWRITE_LEFT;
} else if (mode == OVERWRITE_LEFT) {
mode = OVERWRITE_RIGHT;
}
}
switch (mode) {
case OVERWRITE_LEFT:
__ JumpIfNotSmi(rdx, &skip_allocation);
__ AllocateHeapNumber(rbx, rcx, &call_runtime);
__ movq(rdx, rbx);
__ bind(&skip_allocation);
__ movq(rax, rdx);
break;
case OVERWRITE_RIGHT:
// If the operand was an object, we skip the
// If the argument in rax is already an object, we skip the
// allocation of a heap number.
__ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(rax, rcx, &call_runtime);
// Allocate a heap number for the result. Keep rax and rdx intact
// for the possible runtime call.
__ AllocateHeapNumber(rbx, rcx, &call_runtime);
__ movq(rax, rbx);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize));
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
GenerateReturn(masm);
__ bind(&not_floats);
if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
!HasSmiCodeInStub()) {
// Execution reaches this point when the first non-number argument
// occurs (and only if smi code is skipped from the stub, otherwise
// the patching has already been done earlier in this case branch).
// A perfect moment to try patching to STRINGS for ADD operation.
if (op_ == Token::ADD) {
GenerateTypeTransition(masm);
}
}
break;
}
case Token::MOD: {
// For MOD we go directly to runtime in the non-smi case.
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR:
case Token::SAR:
case Token::SHL:
case Token::SHR: {
Label skip_allocation, non_smi_result;
FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
case Token::SHR: __ shrl_cl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
// Check if result is non-negative. This can only happen for a shift
// by zero, which also doesn't update the sign flag.
__ testl(rax, rax);
__ j(negative, &non_smi_result);
}
__ JumpIfNotValidSmiValue(rax, &non_smi_result);
// Tag smi result, if possible, and return.
__ Integer32ToSmi(rax, rax);
GenerateReturn(masm);
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
// All ops except SHR return a signed int32 that we load in
// a HeapNumber.
if (op_ != Token::SHR && non_smi_result.is_linked()) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
__ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
// If the operand was an object, we skip the
// allocation of a heap number.
__ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
1 * kPointerSize : 2 * kPointerSize));
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
__ AllocateHeapNumber(rax, rcx, &call_runtime);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ fild_s(Operand(rsp, 1 * kPointerSize));
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
GenerateReturn(masm);
}
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
break;
}
break;
default: UNREACHABLE(); break;
}
default: UNREACHABLE(); break;
}
// If all else fails, use the runtime system to get the correct
@ -8868,15 +8892,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ bind(&call_runtime);
if (HasArgsInRegisters()) {
__ pop(rcx);
if (HasArgsReversed()) {
__ push(rax);
__ push(rdx);
} else {
__ push(rdx);
__ push(rax);
}
__ push(rcx);
GenerateRegisterArgsPush(masm);
}
switch (op_) {
@ -8894,8 +8910,14 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1, string1_smi2;
// If this stub has already generated FP-specific code then the arguments
// are already in rdx, rax
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
GenerateLoadArguments(masm);
}
Condition is_smi;
Result answer;
is_smi = masm->CheckSmi(lhs);
__ j(is_smi, &not_string1);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
@ -8974,15 +8996,22 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default:
UNREACHABLE();
}
// TODO(kaznacheev) Remove this (along with clearing) if it does not harm
// performance.
// Generate an unreachable reference to the DEFAULT stub so that it can be
// found at the end of this stub when clearing ICs at GC.
if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
__ TailCallStub(&uninit);
}
}
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
// If arguments are not passed in registers read them from the stack.
if (!HasArgsInRegisters()) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
ASSERT(!HasArgsInRegisters());
__ movq(rax, Operand(rsp, 1 * kPointerSize));
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
}
@ -8997,8 +9026,81 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
}
void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
ASSERT(HasArgsInRegisters());
__ pop(rcx);
if (HasArgsReversed()) {
__ push(rax);
__ push(rdx);
} else {
__ push(rdx);
__ push(rax);
}
__ push(rcx);
}
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
// Keep a copy of operands on the stack and make sure they are also in
// rdx, rax.
if (HasArgsInRegisters()) {
GenerateRegisterArgsPush(masm);
} else {
GenerateLoadArguments(masm);
}
// Internal frame is necessary to handle exceptions properly.
__ EnterInternalFrame();
// Push arguments on stack if the stub expects them there.
if (!HasArgsInRegisters()) {
__ push(rdx);
__ push(rax);
}
// Call the stub proper to get the result in rax.
__ call(&get_result);
__ LeaveInternalFrame();
// Left and right arguments are already on stack.
__ pop(rcx);
// Push the operation result. The tail call to BinaryOp_Patch will
// return it to the original caller..
__ push(rax);
// Push this stub's key.
__ movq(rax, Immediate(MinorKey()));
__ Integer32ToSmi(rax, rax);
__ push(rax);
// Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too.
__ movq(rax, Immediate(op_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ movq(rax, Immediate(runtime_operands_type_));
__ Integer32ToSmi(rax, rax);
__ push(rax);
__ push(rcx);
// Perform patching to an appropriate fast case and return the result.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
6,
1);
// The entry point for the result calculation is assumed to be immediately
// after this sequence.
__ bind(&get_result);
}
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
return Handle<Code>::null();
GenericBinaryOpStub stub(key, type_info);
return stub.GetCode();
}

59
deps/v8/src/x64/codegen-x64.h

@ -28,6 +28,8 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
@ -671,12 +673,26 @@ class GenericBinaryOpStub: public CodeStub {
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
name_(NULL),
operands_type_(operands_type) {
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) {
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
flags_(FlagBits::decode(key)),
args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(NumberInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))),
runtime_operands_type_(type_info),
name_(NULL) {
}
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
@ -696,8 +712,14 @@ class GenericBinaryOpStub: public CodeStub {
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
// Number type information of operands, determined by code generator.
NumberInfo static_operands_type_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
NumberInfo operands_type_;
const char* GetName();
@ -711,35 +733,40 @@ class GenericBinaryOpStub: public CodeStub {
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
operands_type_.ToString());
static_operands_type_.ToString());
}
#endif
// Minor key encoding in 16 bits NNNFRASOOOOOOOMM.
// Minor key encoding in 18 bits TTNNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class NumberInfoBits: public BitField<int, 13, 3> {};
class StaticTypeInfoBits: public BitField<int, 13, 3> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
| NumberInfoBits::encode(operands_type_.ThreeBitRepresentation());
| StaticTypeInfoBits::encode(
static_operands_type_.ThreeBitRepresentation())
| RuntimeTypeInfoBits::encode(runtime_operands_type_);
}
void Generate(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return (op_ == Token::ADD) || (op_ == Token::SUB)
@ -754,6 +781,22 @@ class GenericBinaryOpStub: public CodeStub {
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
bool HasArgsReversed() { return args_reversed_; }
bool ShouldGenerateSmiCode() {
return HasSmiCodeInStub() &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
};

4
deps/v8/src/x64/disasm-x64.cc

@ -1273,7 +1273,9 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*(data + 1), &mod, &regop, &rm);
int32_t imm = *data == 0x6B ? *(data + 2)
: *reinterpret_cast<int32_t*>(data + 2);
AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
AppendToBuffer("imul%c %s,%s,0x%x",
operand_size_code(),
NameOfCPURegister(regop),
NameOfCPURegister(rm), imm);
data += 2 + (*data == 0x6B ? 1 : 4);
break;

119
deps/v8/src/x64/ic-x64.cc

@ -151,6 +151,108 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
Label* miss,
Register elements,
Register key,
Register r0,
Register r1,
Register r2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver and is unchanged.
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
// Holds the result on exit if the load succeeded.
//
// r1 - used to hold the capacity mask of the dictionary
//
// r2 - used for the index into the dictionary.
Label done;
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
__ movl(r1, r0);
__ notl(r0);
__ shll(r1, Immediate(15));
__ addl(r0, r1);
// hash = hash ^ (hash >> 12);
__ movl(r1, r0);
__ shrl(r1, Immediate(12));
__ xorl(r0, r1);
// hash = hash + (hash << 2);
__ leal(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
__ movl(r1, r0);
__ shrl(r1, Immediate(4));
__ xorl(r0, r1);
// hash = hash * 2057;
__ imull(r0, r0, Immediate(2057));
// hash = hash ^ (hash >> 16);
__ movl(r1, r0);
__ shrl(r1, Immediate(16));
__ xorl(r0, r1);
// Compute capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
__ movq(r1, FieldOperand(elements, kCapacityOffset));
__ SmiToInteger32(r1, r1);
__ decl(r1);
const int kElementsStartOffset =
NumberDictionary::kHeaderSize +
NumberDictionary::kElementsStartIndex * kPointerSize;
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Use r2 for index calculations and keep the hash intact in r0.
__ movq(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
__ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
}
__ and_(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
__ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
__ cmpq(key, FieldOperand(elements,
r2,
times_pointer_size,
kElementsStartOffset));
if (i != (kProbes - 1)) {
__ j(equal, &done);
} else {
__ j(not_equal, miss);
}
}
__ bind(&done);
// Check that the value is a normal propety.
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
__ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
@ -271,6 +373,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -----------------------------------
Label slow, check_string, index_int, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@ -294,6 +397,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
// Save key in rbx in case we want it for the number dictionary
// case.
__ movq(rbx, rax);
__ SmiToInteger32(rax, rax);
// Get the elements array of the object.
__ bind(&index_int);
@ -321,7 +427,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &slow);
__ j(not_equal, &check_number_dictionary);
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@ -329,6 +435,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Integer32ToSmi(rax, rax);
__ ret(0);
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// rax: untagged index
// rbx: key
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
__ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);

16
deps/v8/src/x64/regexp-macro-assembler-x64.cc

@ -711,9 +711,15 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
__ subq(rdi, rsi);
// Set rax to address of char before start of input
// Set rax to address of char before start of the string
// (effectively string position -1).
__ lea(rax, Operand(rdi, -char_size()));
__ movq(rbx, Operand(rbp, kStartIndex));
__ neg(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
__ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
__ movq(Operand(rbp, kInputStartMinusOne), rax);
@ -770,9 +776,15 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
__ movq(rdx, Operand(rbp, kStartIndex));
__ movq(rbx, Operand(rbp, kRegisterOutput));
__ movq(rcx, Operand(rbp, kInputEnd));
__ subq(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
__ lea(rcx, Operand(rcx, rdx, times_2, 0));
} else {
__ addq(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
__ addq(rax, rcx); // Convert to index from start, not end.

1
deps/v8/src/x64/virtual-frame-x64.h

@ -417,6 +417,7 @@ class VirtualFrame : public ZoneObject {
inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, NumberInfo info);
inline void SetTypeForParamAt(int index, NumberInfo info);
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;

6
deps/v8/test/cctest/SConscript

@ -34,15 +34,21 @@ Import('context object_files')
SOURCES = {
'all': [
'gay-shortest.cc',
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
'test-ast.cc',
'test-circular-queue.cc',
'test-compiler.cc',
'test-conversions.cc',
'test-cpu-profiler.cc',
'test-dataflow.cc',
'test-debug.cc',
'test-decls.cc',
'test-diy-fp.cc',
'test-double.cc',
'test-fast-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
'test-hashmap.cc',

100048
deps/v8/test/cctest/gay-shortest.cc

File diff suppressed because it is too large

44
deps/v8/test/cctest/gay-shortest.h

@ -0,0 +1,44 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GAY_SHORTEST_H_
#define GAY_SHORTEST_H_
namespace v8 {
namespace internal {
struct GayShortest {
double v;
const char* representation;
int decimal_point;
};
Vector<const GayShortest> PrecomputedShortestRepresentations();
} } // namespace v8::internal
#endif // GAY_SHORTEST_H_

127
deps/v8/test/cctest/test-circular-queue.cc

@ -0,0 +1,127 @@
// Copyright 2010 the V8 project authors. All rights reserved.
//
// Tests of circular queues.
#include "v8.h"
#include "circular-queue-inl.h"
#include "cctest.h"
namespace i = v8::internal;
using i::CircularQueue;
using i::SamplingCircularQueue;
TEST(SingleRecordCircularQueue) {
typedef int Record;
CircularQueue<Record> cq(sizeof(Record) * 2);
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
Record rec = 0;
cq.Dequeue(&rec);
CHECK_EQ(1, rec);
CHECK(cq.IsEmpty());
}
TEST(MultipleRecordsCircularQueue) {
typedef int Record;
const int kQueueSize = 10;
CircularQueue<Record> cq(sizeof(Record) * (kQueueSize + 1));
CHECK(cq.IsEmpty());
cq.Enqueue(1);
CHECK(!cq.IsEmpty());
for (int i = 2; i <= 5; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
Record rec = 0;
for (int i = 1; i <= 4; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
for (int i = 6; i <= 12; ++i) {
cq.Enqueue(i);
CHECK(!cq.IsEmpty());
}
for (int i = 5; i <= 12; ++i) {
CHECK(!cq.IsEmpty());
cq.Dequeue(&rec);
CHECK_EQ(i, rec);
}
CHECK(cq.IsEmpty());
}
TEST(SamplingCircularQueue) {
typedef SamplingCircularQueue::Cell Record;
const int kRecordsPerChunk = 4;
SamplingCircularQueue scq(sizeof(Record),
kRecordsPerChunk * sizeof(Record),
3);
scq.SetUpProducer();
scq.SetUpConsumer();
// Check that we are using non-reserved values.
CHECK_NE(SamplingCircularQueue::kClear, 1);
CHECK_NE(SamplingCircularQueue::kEnd, 1);
// Fill up the first chunk.
CHECK_EQ(NULL, scq.StartDequeue());
for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
CHECK_NE(NULL, rec);
*rec = i;
CHECK_EQ(NULL, scq.StartDequeue());
}
// Fill up the second chunk. Consumption must still be unavailable.
CHECK_EQ(NULL, scq.StartDequeue());
for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
CHECK_NE(NULL, rec);
*rec = i;
CHECK_EQ(NULL, scq.StartDequeue());
}
Record* rec = reinterpret_cast<Record*>(scq.Enqueue());
CHECK_NE(NULL, rec);
*rec = 20;
// Now as we started filling up the third chunk, consumption
// must become possible.
CHECK_NE(NULL, scq.StartDequeue());
// Consume the first chunk.
for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
CHECK_NE(NULL, rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
scq.FinishDequeue();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
}
// Now consumption must not be possible, as consumer now polls
// the first chunk for emptinness.
CHECK_EQ(NULL, scq.StartDequeue());
scq.FlushResidualRecords();
// From now, consumer no more polls ahead of the current chunk,
// so it's possible to consume the second chunk.
CHECK_NE(NULL, scq.StartDequeue());
// Consume the second chunk
for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) {
Record* rec = reinterpret_cast<Record*>(scq.StartDequeue());
CHECK_NE(NULL, rec);
CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec));
CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
scq.FinishDequeue();
CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue()));
}
// Consumption must still be possible as the first cell of the
// last chunk is not clean.
CHECK_NE(NULL, scq.StartDequeue());
scq.TearDownConsumer();
scq.TearDownProducer();
}

202
deps/v8/test/cctest/test-cpu-profiler.cc

@ -0,0 +1,202 @@
// Copyright 2010 the V8 project authors. All rights reserved.
//
// Tests of profiles generator and utilities.
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CpuProfilesCollection;
using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
processor.Start();
while (!processor.running()) {
i::Thread::YieldCPU();
}
processor.Stop();
processor.Join();
}
static v8::Persistent<v8::Context> env;
static void InitializeVM() {
if (env.IsEmpty()) env = v8::Context::New();
v8::HandleScope scope;
env->Enter();
}
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame1,
i::Address frame2 = NULL,
i::Address frame3 = NULL) {
i::TickSample* sample = proc->TickSampleEvent();
sample->pc = frame1;
sample->function = frame1;
sample->frames_count = 0;
if (frame2 != NULL) {
sample->stack[0] = frame2;
sample->frames_count = 1;
}
if (frame3 != NULL) {
sample->stack[1] = frame3;
sample->frames_count = 2;
}
}
TEST(CodeEvents) {
InitializeVM();
CpuProfilesCollection profiles;
profiles.AddProfile(0);
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
processor.Start();
processor.SetUpSamplesProducer();
while (!processor.running()) {
i::Thread::YieldCPU();
}
// Enqueue code creation events.
i::HandleScope scope;
const char* aaa_str = "aaa";
i::Handle<i::String> aaa_name = i::Factory::NewStringFromAscii(
i::Vector<const char>(aaa_str, strlen(aaa_str)));
processor.CodeCreateEvent(i::Logger::FUNCTION_TAG,
*aaa_name,
i::Heap::empty_string(),
0,
ToAddress(0x1000),
0x100);
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"bbb",
ToAddress(0x1200),
0x80);
processor.CodeCreateEvent(i::Logger::STUB_TAG, 5, ToAddress(0x1300), 0x10);
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"ddd",
ToAddress(0x1400),
0x80);
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeDeleteEvent(ToAddress(0x1600));
processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000));
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
processor.Stop();
processor.Join();
// Check the state of profile generator.
CodeEntry* entry1 = generator.code_map()->FindEntry(ToAddress(0x1000));
CHECK_NE(NULL, entry1);
CHECK_EQ(aaa_str, entry1->name());
CodeEntry* entry2 = generator.code_map()->FindEntry(ToAddress(0x1200));
CHECK_NE(NULL, entry2);
CHECK_EQ("bbb", entry2->name());
CodeEntry* entry3 = generator.code_map()->FindEntry(ToAddress(0x1300));
CHECK_NE(NULL, entry3);
CHECK_EQ("args_count: 5", entry3->name());
CHECK_EQ(NULL, generator.code_map()->FindEntry(ToAddress(0x1400)));
CodeEntry* entry4 = generator.code_map()->FindEntry(ToAddress(0x1500));
CHECK_NE(NULL, entry4);
CHECK_EQ("ddd", entry4->name());
CHECK_EQ(NULL, generator.code_map()->FindEntry(ToAddress(0x1600)));
CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700));
CHECK_NE(NULL, entry5);
CHECK_EQ(aaa_str, entry5->name());
processor.TearDownSamplesProducer();
}
template<typename T>
static int CompareProfileNodes(const T* p1, const T* p2) {
return strcmp((*p1)->entry()->name(), (*p2)->entry()->name());
}
TEST(TickEvents) {
CpuProfilesCollection profiles;
profiles.AddProfile(0);
ProfileGenerator generator(&profiles);
ProfilerEventsProcessor processor(&generator);
processor.Start();
processor.SetUpSamplesProducer();
while (!processor.running()) {
i::Thread::YieldCPU();
}
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"bbb",
ToAddress(0x1200),
0x80);
processor.CodeCreateEvent(i::Logger::STUB_TAG, 5, ToAddress(0x1300), 0x10);
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"ddd",
ToAddress(0x1400),
0x80);
EnqueueTickSampleEvent(&processor, ToAddress(0x1210));
EnqueueTickSampleEvent(&processor, ToAddress(0x1305), ToAddress(0x1220));
EnqueueTickSampleEvent(&processor,
ToAddress(0x1404),
ToAddress(0x1305),
ToAddress(0x1230));
processor.Stop();
processor.Join();
// Check call trees.
i::List<ProfileNode*> top_down_root_children;
profiles.profile()->top_down()->root()->GetChildren(&top_down_root_children);
CHECK_EQ(1, top_down_root_children.length());
CHECK_EQ("bbb", top_down_root_children.last()->entry()->name());
i::List<ProfileNode*> top_down_bbb_children;
top_down_root_children.last()->GetChildren(&top_down_bbb_children);
CHECK_EQ(1, top_down_bbb_children.length());
CHECK_EQ("args_count: 5", top_down_bbb_children.last()->entry()->name());
i::List<ProfileNode*> top_down_stub_children;
top_down_bbb_children.last()->GetChildren(&top_down_stub_children);
CHECK_EQ(1, top_down_stub_children.length());
CHECK_EQ("ddd", top_down_stub_children.last()->entry()->name());
i::List<ProfileNode*> top_down_ddd_children;
top_down_stub_children.last()->GetChildren(&top_down_ddd_children);
CHECK_EQ(0, top_down_ddd_children.length());
i::List<ProfileNode*> bottom_up_root_children;
profiles.profile()->bottom_up()->root()->GetChildren(
&bottom_up_root_children);
CHECK_EQ(3, bottom_up_root_children.length());
bottom_up_root_children.Sort(&CompareProfileNodes);
CHECK_EQ("args_count: 5", bottom_up_root_children[0]->entry()->name());
CHECK_EQ("bbb", bottom_up_root_children[1]->entry()->name());
CHECK_EQ("ddd", bottom_up_root_children[2]->entry()->name());
i::List<ProfileNode*> bottom_up_stub_children;
bottom_up_root_children[0]->GetChildren(&bottom_up_stub_children);
CHECK_EQ(1, bottom_up_stub_children.length());
CHECK_EQ("bbb", bottom_up_stub_children.last()->entry()->name());
i::List<ProfileNode*> bottom_up_bbb_children;
bottom_up_root_children[1]->GetChildren(&bottom_up_bbb_children);
CHECK_EQ(0, bottom_up_bbb_children.length());
i::List<ProfileNode*> bottom_up_ddd_children;
bottom_up_root_children[2]->GetChildren(&bottom_up_ddd_children);
CHECK_EQ(1, bottom_up_ddd_children.length());
CHECK_EQ("args_count: 5", bottom_up_ddd_children.last()->entry()->name());
i::List<ProfileNode*> bottom_up_ddd_stub_children;
bottom_up_ddd_children.last()->GetChildren(&bottom_up_ddd_stub_children);
CHECK_EQ(1, bottom_up_ddd_stub_children.length());
CHECK_EQ("bbb", bottom_up_ddd_stub_children.last()->entry()->name());
processor.TearDownSamplesProducer();
}

67
deps/v8/test/cctest/test-diy-fp.cc

@ -0,0 +1,67 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy-fp.h"
using namespace v8::internal;
TEST(Subtract) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(1, 0);
DiyFp diff = DiyFp::Minus(diy_fp1, diy_fp2);
CHECK(2 == diff.f()); // NOLINT
CHECK_EQ(0, diff.e());
diy_fp1.Subtract(diy_fp2);
CHECK(2 == diy_fp1.f()); // NOLINT
CHECK_EQ(0, diy_fp1.e());
}
TEST(Multiply) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(2, 0);
DiyFp product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(64, product.e());
diy_fp1.Multiply(diy_fp2);
CHECK(0 == diy_fp1.f()); // NOLINT
CHECK_EQ(64, diy_fp1.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000000), 11);
diy_fp2 = DiyFp(2, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Test rounding.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000001), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7fffffff, ffffffff), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Halfway cases are allowed to round either way. So don't check for it.
// Big numbers.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 11);
diy_fp2 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 13);
// 128bit result: 0xfffffffffffffffe0000000000000001
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFe) == product.f());
CHECK_EQ(11 + 13 + 64, product.e());
}

204
deps/v8/test/cctest/test-double.cc

@ -0,0 +1,204 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy-fp.h"
#include "double.h"
using namespace v8::internal;
TEST(Uint64Conversions) {
// Start by checking the byte-order.
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
CHECK_EQ(3512700564088504e-318, Double(ordered).value());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(5e-324, Double(min_double64).value());
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
CHECK_EQ(1.7976931348623157e308, Double(max_double64).value());
}
TEST(AsDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52, diy_fp.e());
// The 52 mantissa bits, plus the implicit 1 in bit 52 as a UINT64.
CHECK(V8_2PART_UINT64_C(0x00134567, 89ABCDEF) == diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsDiyFp();
CHECK_EQ(-0x3FF - 52 + 1, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(1 == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52, diy_fp.e());
CHECK(V8_2PART_UINT64_C(0x001fffff, ffffffff) == diy_fp.f()); // NOLINT
}
TEST(AsNormalizedDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsNormalizedDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x00134567, 89ABCDEF) << 11) ==
diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
CHECK_EQ(-0x3FF - 52 + 1 - 63, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(V8_2PART_UINT64_C(0x80000000, 00000000) == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x001fffff, ffffffff) << 11) ==
diy_fp.f()); // NOLINT
}
TEST(IsDenormal) {
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(Double(min_double64).IsDenormal());
uint64_t bits = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
CHECK(Double(bits).IsDenormal());
bits = V8_2PART_UINT64_C(0x00100000, 00000000);
CHECK(!Double(bits).IsDenormal());
}
TEST(IsSpecial) {
CHECK(Double(V8_INFINITY).IsSpecial());
CHECK(Double(-V8_INFINITY).IsSpecial());
CHECK(Double(OS::nan_value()).IsSpecial());
uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
CHECK(Double(bits).IsSpecial());
// Denormals are not special:
CHECK(!Double(5e-324).IsSpecial());
CHECK(!Double(-5e-324).IsSpecial());
// And some random numbers:
CHECK(!Double(0.0).IsSpecial());
CHECK(!Double(-0.0).IsSpecial());
CHECK(!Double(1.0).IsSpecial());
CHECK(!Double(-1.0).IsSpecial());
CHECK(!Double(1000000.0).IsSpecial());
CHECK(!Double(-1000000.0).IsSpecial());
CHECK(!Double(1e23).IsSpecial());
CHECK(!Double(-1e23).IsSpecial());
CHECK(!Double(1.7976931348623157e308).IsSpecial());
CHECK(!Double(-1.7976931348623157e308).IsSpecial());
}
TEST(IsInfinite) {
CHECK(Double(V8_INFINITY).IsInfinite());
CHECK(Double(-V8_INFINITY).IsInfinite());
CHECK(!Double(OS::nan_value()).IsInfinite());
CHECK(!Double(0.0).IsInfinite());
CHECK(!Double(-0.0).IsInfinite());
CHECK(!Double(1.0).IsInfinite());
CHECK(!Double(-1.0).IsInfinite());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsInfinite());
}
TEST(IsNan) {
CHECK(Double(OS::nan_value()).IsNan());
uint64_t other_nan = V8_2PART_UINT64_C(0xFFFFFFFF, 00000001);
CHECK(Double(other_nan).IsNan());
CHECK(!Double(V8_INFINITY).IsNan());
CHECK(!Double(-V8_INFINITY).IsNan());
CHECK(!Double(0.0).IsNan());
CHECK(!Double(-0.0).IsNan());
CHECK(!Double(1.0).IsNan());
CHECK(!Double(-1.0).IsNan());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsNan());
}
TEST(Sign) {
CHECK_EQ(1, Double(1.0).Sign());
CHECK_EQ(1, Double(V8_INFINITY).Sign());
CHECK_EQ(-1, Double(-V8_INFINITY).Sign());
CHECK_EQ(1, Double(0.0).Sign());
CHECK_EQ(-1, Double(-0.0).Sign());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(1, Double(min_double64).Sign());
}
TEST(NormalizedBoundaries) {
DiyFp boundary_plus;
DiyFp boundary_minus;
DiyFp diy_fp = Double(1.5).AsNormalizedDiyFp();
Double(1.5).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.5 does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
diy_fp = Double(1.0).AsNormalizedDiyFp();
Double(1.0).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.0 does have a significand of the form 2^p (for some p).
// Therefore its lower boundary is twice as close as the upper boundary.
CHECK_GT(boundary_plus.f() - diy_fp.f(), diy_fp.f() - boundary_minus.f());
CHECK((1 << 9) == diy_fp.f() - boundary_minus.f()); // NOLINT
CHECK((1 << 10) == boundary_plus.f() - diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
Double(min_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// min-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
// Denormals have their boundaries much closer.
CHECK((static_cast<uint64_t>(1) << 62) ==
diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
diy_fp = Double(smallest_normal64).AsNormalizedDiyFp();
Double(smallest_normal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// Even though the significand is of the form 2^p (for some p), its boundaries
// are at the same distance. (This is the only exception).
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
diy_fp = Double(largest_denormal64).AsNormalizedDiyFp();
Double(largest_denormal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 11) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
Double(max_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// max-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
}

116
deps/v8/test/cctest/test-fast-dtoa.cc

@ -0,0 +1,116 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy-fp.h"
#include "double.h"
#include "fast-dtoa.h"
#include "gay-shortest.h"
using namespace v8::internal;
static const int kBufferSize = 100;
TEST(FastDtoaVariousDoubles) {
char buffer[kBufferSize];
int sign;
int length;
int point;
int status;
double min_double = 5e-324;
status = FastDtoa(min_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5", buffer);
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("17976931348623157", buffer);
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4294967272", buffer);
CHECK_EQ(10, point);
status = FastDtoa(4.1855804968213567e298, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4185580496821357", buffer);
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5562684646268003", buffer);
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("2147483648", buffer);
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, buffer, &sign, &length, &point);
if (status) { // Not all FastDtoa variants manage to compute this number.
CHECK_EQ("35844466002796428", buffer);
CHECK_EQ(0, sign);
CHECK_EQ(299, point);
}
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = FastDtoa(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("22250738585072014", buffer);
CHECK_EQ(-307, point);
}
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
status = FastDtoa(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("2225073858507201", buffer);
CHECK_EQ(-307, point);
}
}
TEST(FastDtoaGayShortest) {
char buffer[kBufferSize];
bool status;
int sign;
int length;
int point;
int succeeded = 0;
int total = 0;
bool needed_max_length = false;
Vector<const GayShortest> precomputed = PrecomputedShortestRepresentations();
for (int i = 0; i < precomputed.length(); ++i) {
const GayShortest current_test = precomputed[i];
total++;
double v = current_test.v;
status = FastDtoa(v, buffer, &sign, &length, &point);
CHECK_GE(kFastDtoaMaximalLength, length);
if (!status) continue;
if (length == kFastDtoaMaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_EQ(current_test.representation, buffer);
}
CHECK_GT(succeeded*1.0/total, 0.99);
CHECK(needed_max_length);
}

97
deps/v8/test/cctest/test-profile-generator.cc

@ -10,25 +10,27 @@ namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
using i::StaticNameCodeEntry;
using i::ProfileGenerator;
using i::TickSample;
using i::Vector;
TEST(ProfileNodeFindOrAddChild) {
ProfileNode node(NULL);
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
CHECK_NE(NULL, childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", "", 0);
ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
CHECK_NE(NULL, childNode3);
CHECK_NE(childNode1, childNode3);
@ -69,9 +71,9 @@ class ProfileTreeTestHelper {
} // namespace
TEST(ProfileTreeAddPathFromStart) {
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", "", 0);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@ -136,9 +138,9 @@ TEST(ProfileTreeAddPathFromStart) {
TEST(ProfileTreeAddPathFromEnd) {
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", "", 0);
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
@ -216,8 +218,8 @@ TEST(ProfileTreeCalculateTotalTicks) {
CHECK_EQ(1, empty_tree.root()->total_ticks());
CHECK_EQ(1, empty_tree.root()->self_ticks());
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
CodeEntry* e1_path[] = {&entry1};
Vector<CodeEntry*> e1_path_vec(
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
@ -255,7 +257,7 @@ TEST(ProfileTreeCalculateTotalTicks) {
CodeEntry* e2_path[] = {&entry2};
Vector<CodeEntry*> e2_path_vec(
e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", "", 0);
CodeEntry* e3_path[] = {&entry3};
Vector<CodeEntry*> e3_path_vec(
e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
@ -316,10 +318,10 @@ static inline i::Address ToAddress(int n) {
TEST(CodeMapAddCode) {
CodeMap code_map;
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
StaticNameCodeEntry entry4(i::Logger::FUNCTION_TAG, "ddd");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
CodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc", "", 0);
CodeEntry entry4(i::Logger::FUNCTION_TAG, "ddd", "", 0);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
@ -346,8 +348,8 @@ TEST(CodeMapAddCode) {
TEST(CodeMapMoveAndDeleteCode) {
CodeMap code_map;
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa", "", 0);
CodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb", "", 0);
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
@ -360,3 +362,60 @@ TEST(CodeMapMoveAndDeleteCode) {
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1800)));
}
TEST(RecordTickSample) {
CpuProfilesCollection profiles;
profiles.AddProfile(0);
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = generator.NewCodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = generator.NewCodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = generator.NewCodeEntry(i::Logger::FUNCTION_TAG, "ccc");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
generator.code_map()->AddCode(ToAddress(0x1700), entry2, 0x100);
generator.code_map()->AddCode(ToAddress(0x1900), entry3, 0x50);
ProfileTreeTestHelper top_down_test_helper(profiles.profile()->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry1));
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
CHECK_EQ(NULL, top_down_test_helper.Walk(entry3));
// We are building the following calls tree:
// -> aaa - sample1
// aaa -> bbb -> ccc - sample2
// -> ccc -> aaa - sample3
TickSample sample1;
sample1.pc = ToAddress(0x1600);
sample1.function = ToAddress(0x1500);
sample1.stack[0] = ToAddress(0x1510);
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
TickSample sample2;
sample2.pc = ToAddress(0x1925);
sample2.function = ToAddress(0x1900);
sample2.stack[0] = ToAddress(0x1780);
sample2.stack[1] = ToAddress(0x10000); // non-existent.
sample2.stack[2] = ToAddress(0x1620);
sample2.frames_count = 3;
generator.RecordTickSample(sample2);
TickSample sample3;
sample3.pc = ToAddress(0x1510);
sample3.function = ToAddress(0x1500);
sample3.stack[0] = ToAddress(0x1910);
sample3.stack[1] = ToAddress(0x1610);
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
ProfileNode* node1 = top_down_test_helper.Walk(entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(entry1, node1->entry());
ProfileNode* node2 = top_down_test_helper.Walk(entry1, entry1);
CHECK_NE(NULL, node2);
CHECK_EQ(entry1, node2->entry());
ProfileNode* node3 = top_down_test_helper.Walk(entry1, entry2, entry3);
CHECK_NE(NULL, node3);
CHECK_EQ(entry3, node3->entry());
ProfileNode* node4 = top_down_test_helper.Walk(entry1, entry3, entry1);
CHECK_NE(NULL, node4);
CHECK_EQ(entry1, node4->entry());
}

37
deps/v8/test/mjsunit/compiler/loopcount.js

@ -53,3 +53,40 @@ function f5() {
return i;
}
assertEquals(-0x40000001, f5());
function f6() { var x = 0x3fffffff; x++; return x+1; }
assertEquals(0x40000001, f6());
function f7() {
var i;
for (i = 0x3ffffffd; i <= 0x3ffffffe; i++) {}
i++; i = i + 1;
return i;
}
assertEquals(0x40000001, f7());
function f8() {
var i;
for (i = 0x3ffffffd; i <= 0x3fffffff; i++) {}
i++; i++;
return i;
}
assertEquals(0x40000002, f8());
function f9() {
var i;
for (i = 0; i < 42; i++) {
return 42;
}
}
assertEquals(42, f9());
function f10(x) {
for (x = 0; x < 4; x++) {}
}
f10(42);

49
deps/v8/test/mjsunit/math-round.js

@ -50,3 +50,52 @@ assertEquals(-9007199254740990, Math.round(-9007199254740990));
assertEquals(-9007199254740991, Math.round(-9007199254740991));
assertEquals(Number.MAX_VALUE, Math.round(Number.MAX_VALUE));
assertEquals(-Number.MAX_VALUE, Math.round(-Number.MAX_VALUE));
assertEquals(536870911, Math.round(536870910.5));
assertEquals(536870911, Math.round(536870911));
assertEquals(536870911, Math.round(536870911.4));
assertEquals(536870912, Math.round(536870911.5));
assertEquals(536870912, Math.round(536870912));
assertEquals(536870912, Math.round(536870912.4));
assertEquals(536870913, Math.round(536870912.5));
assertEquals(536870913, Math.round(536870913));
assertEquals(536870913, Math.round(536870913.4));
assertEquals(1073741823, Math.round(1073741822.5));
assertEquals(1073741823, Math.round(1073741823));
assertEquals(1073741823, Math.round(1073741823.4));
assertEquals(1073741824, Math.round(1073741823.5));
assertEquals(1073741824, Math.round(1073741824));
assertEquals(1073741824, Math.round(1073741824.4));
assertEquals(1073741825, Math.round(1073741824.5));
assertEquals(2147483647, Math.round(2147483646.5));
assertEquals(2147483647, Math.round(2147483647));
assertEquals(2147483647, Math.round(2147483647.4));
assertEquals(2147483648, Math.round(2147483647.5));
assertEquals(2147483648, Math.round(2147483648));
assertEquals(2147483648, Math.round(2147483648.4));
assertEquals(2147483649, Math.round(2147483648.5));
// Tests based on WebKit LayoutTests
assertEquals(0, Math.round(0.4));
assertEquals(-0, Math.round(-0.4));
assertEquals(-0, Math.round(-0.5));
assertEquals(1, Math.round(0.6));
assertEquals(-1, Math.round(-0.6));
assertEquals(2, Math.round(1.5));
assertEquals(2, Math.round(1.6));
assertEquals(-2, Math.round(-1.6));
assertEquals(8640000000000000, Math.round(8640000000000000));
assertEquals(8640000000000001, Math.round(8640000000000001));
assertEquals(8640000000000002, Math.round(8640000000000002));
assertEquals(9007199254740990, Math.round(9007199254740990));
assertEquals(9007199254740991, Math.round(9007199254740991));
assertEquals(1.7976931348623157e+308, Math.round(1.7976931348623157e+308));
assertEquals(-8640000000000000, Math.round(-8640000000000000));
assertEquals(-8640000000000001, Math.round(-8640000000000001));
assertEquals(-8640000000000002, Math.round(-8640000000000002));
assertEquals(-9007199254740990, Math.round(-9007199254740990));
assertEquals(-9007199254740991, Math.round(-9007199254740991));
assertEquals(-1.7976931348623157e+308, Math.round(-1.7976931348623157e+308));
assertEquals(Infinity, Math.round(Infinity));
assertEquals(-Infinity, Math.round(-Infinity));

33
deps/v8/test/mjsunit/regress/regress-646.js

@ -0,0 +1,33 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Regression test for http://code.google.com/p/v8/issues/detail?id=646.
function f() { this.__proto__ = 42 }
var count = 0;
for (var x in new f()) count++;
assertEquals(0, count);

286
deps/v8/tools/generate-ten-powers.scm

@ -0,0 +1,286 @@
;; Copyright 2010 the V8 project authors. All rights reserved.
;; Redistribution and use in source and binary forms, with or without
;; modification, are permitted provided that the following conditions are
;; met:
;;
;; * Redistributions of source code must retain the above copyright
;; notice, this list of conditions and the following disclaimer.
;; * Redistributions in binary form must reproduce the above
;; copyright notice, this list of conditions and the following
;; disclaimer in the documentation and/or other materials provided
;; with the distribution.
;; * Neither the name of Google Inc. nor the names of its
;; contributors may be used to endorse or promote products derived
;; from this software without specific prior written permission.
;;
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
;; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
;; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
;; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
;; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
;; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
;; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;; This is a Scheme script for the Bigloo compiler. Bigloo must be compiled with
;; support for bignums. The compilation of the script can be done as follows:
;; bigloo -static-bigloo -o generate-ten-powers generate-ten-powers.scm
;;
;; Generate approximations of 10^k.
(module gen-ten-powers
(static (class Cached-Fast
v::bignum
e::bint
exact?::bool))
(main my-main))
;;----------------bignum shifts -----------------------------------------------
(define (bit-lshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(*bx x (exptbx #z2 (fixnum->bignum by)))))
(define (bit-rshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(/bx x (exptbx #z2 (fixnum->bignum by)))))
;;----------------the actual power generation -------------------------------
;; e should be an indication. it might be too small.
(define (round-n-cut n e nb-bits)
(define max-container (- (bit-lshbx #z1 nb-bits) 1))
(define (round n)
(case *round*
((down) n)
((up)
(+bx n
;; with the -1 it will only round up if the cut off part is
;; non-zero
(-bx (bit-lshbx #z1
(-fx (+fx e nb-bits) 1))
#z1)))
((round)
(+bx n
(bit-lshbx #z1
(-fx (+fx e nb-bits) 2))))))
(let* ((shift (-fx (+fx e nb-bits) 1))
(cut (bit-rshbx (round n) shift))
(exact? (=bx n (bit-lshbx cut shift))))
(if (<=bx cut max-container)
(values cut e exact?)
(round-n-cut n (+fx e 1) nb-bits))))
(define (rounded-/bx x y)
(case *round*
((down) (/bx x y))
((up) (+bx (/bx x y) #z1))
((round) (let ((tmp (/bx (*bx #z2 x) y)))
(if (zerobx? (remainderbx tmp #z2))
(/bx tmp #z2)
(+bx (/bx tmp #z2) #z1))))))
(define (generate-powers from to mantissa-size)
(let* ((nb-bits mantissa-size)
(offset (- from))
(nb-elements (+ (- from) to 1))
(vec (make-vector nb-elements))
(max-container (- (bit-lshbx #z1 nb-bits) 1)))
;; the negative ones. 10^-1, 10^-2, etc.
;; We already know, that we can't be exact, so exact? will always be #f.
;; Basically we will have a ten^i that we will *10 at each iteration. We
;; want to create the matissa of 1/ten^i. However the mantissa must be
;; normalized (start with a 1). -> we have to shift the number.
;; We shift by multiplying with two^e. -> We encode two^e*(1/ten^i) ==
;; two^e/ten^i.
(let loop ((i 1)
(ten^i #z10)
(two^e #z1)
(e 0))
(unless (< (- i) from)
(if (>bx (/bx (*bx #z2 two^e) ten^i) max-container)
;; another shift would make the number too big. We are
;; hence normalized now.
(begin
(vector-set! vec (-fx offset i)
(instantiate::Cached-Fast
(v (rounded-/bx two^e ten^i))
(e (negfx e))
(exact? #f)))
(loop (+fx i 1) (*bx ten^i #z10) two^e e))
(loop i ten^i (bit-lshbx two^e 1) (+fx e 1)))))
;; the positive ones 10^0, 10^1, etc.
;; start with 1.0. mantissa: 10...0 (1 followed by nb-bits-1 bits)
;; -> e = -(nb-bits-1)
;; exact? is true when the container can still hold the complete 10^i
(let loop ((i 0)
(n (bit-lshbx #z1 (-fx nb-bits 1)))
(e (-fx 1 nb-bits)))
(when (<= i to)
(receive (cut e exact?)
(round-n-cut n e nb-bits)
(vector-set! vec (+fx i offset)
(instantiate::Cached-Fast
(v cut)
(e e)
(exact? exact?)))
(loop (+fx i 1) (*bx n #z10) e))))
vec))
(define (print-c powers from to struct-type
cache-name max-distance-name offset-name macro64)
(define (display-power power k)
(with-access::Cached-Fast power (v e exact?)
(let ((tmp-p (open-output-string)))
;; really hackish way of getting the digits
(display (format "~x" v) tmp-p)
(let ((str (close-output-port tmp-p)))
(printf " {~a(0x~a, ~a), ~a, ~a},\n"
macro64
(substring str 0 8)
(substring str 8 16)
e
k)))))
(define (print-powers-reduced n)
(print "static const " struct-type " " cache-name
"(" n ")"
"[] = {")
(let loop ((i 0)
(nb-elements 0)
(last-e 0)
(max-distance 0))
(cond
((>= i (vector-length powers))
(print " };")
(print "static const int " max-distance-name "(" n ") = "
max-distance ";")
(print "// nb elements (" n "): " nb-elements))
(else
(let* ((power (vector-ref powers i))
(e (Cached-Fast-e power)))
(display-power power (+ i from))
(loop (+ i n)
(+ nb-elements 1)
e
(cond
((=fx i 0) max-distance)
((> (- e last-e) max-distance) (- e last-e))
(else max-distance))))))))
(print "// Copyright 2010 the V8 project authors. All rights reserved.")
(print "// ------------ GENERATED FILE ----------------")
(print "// command used:")
(print "// "
(apply string-append (map (lambda (str)
(string-append " " str))
*main-args*))
" // NOLINT")
(print)
(print
"// This file is intended to be included inside another .h or .cc files\n"
"// with the following defines set:\n"
"// GRISU_CACHE_STRUCT: should expand to the name of a struct that will\n"
"// hold the cached powers of ten. Each entry will hold a 64-bit\n"
"// significand, a 16-bit signed binary exponent, and a 16-bit\n"
"// signed decimal exponent. Each entry will be constructed as follows:\n"
"// { significand, binary_exponent, decimal_exponent }.\n"
"// GRISU_CACHE_NAME(i): generates the name for the different caches.\n"
"// The parameter i will be a number in the range 1-20. A cache will\n"
"// hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will\n"
"// thus hold all elements. The higher i the fewer elements it has.\n"
"// Ideally the user should only reference one cache and let the\n"
"// compiler remove the unused ones.\n"
"// GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum\n"
"// binary exponent distance between all elements of a given cache.\n"
"// GRISU_CACHE_OFFSET: is used as variable name for the decimal\n"
"// exponent offset. It is equal to -cache[0].decimal_exponent.\n"
"// GRISU_UINT64_C: used to construct 64-bit values in a platform\n"
"// independent way. In order to encode 0x123456789ABCDEF0 the macro\n"
"// will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).\n")
(print)
(print-powers-reduced 1)
(print-powers-reduced 2)
(print-powers-reduced 3)
(print-powers-reduced 4)
(print-powers-reduced 5)
(print-powers-reduced 6)
(print-powers-reduced 7)
(print-powers-reduced 8)
(print-powers-reduced 9)
(print-powers-reduced 10)
(print-powers-reduced 11)
(print-powers-reduced 12)
(print-powers-reduced 13)
(print-powers-reduced 14)
(print-powers-reduced 15)
(print-powers-reduced 16)
(print-powers-reduced 17)
(print-powers-reduced 18)
(print-powers-reduced 19)
(print-powers-reduced 20)
(print "static const int GRISU_CACHE_OFFSET = " (- from) ";"))
;;----------------main --------------------------------------------------------
(define *main-args* #f)
(define *mantissa-size* #f)
(define *dest* #f)
(define *round* #f)
(define *from* #f)
(define *to* #f)
(define (my-main args)
(set! *main-args* args)
(args-parse (cdr args)
(section "Help")
(("?") (args-parse-usage #f))
((("-h" "--help") (help "?, -h, --help" "This help message"))
(args-parse-usage #f))
(section "Misc")
(("-o" ?file (help "The output file"))
(set! *dest* file))
(("--mantissa-size" ?size (help "Container-size in bits"))
(set! *mantissa-size* (string->number size)))
(("--round" ?direction (help "Round bignums (down, round or up)"))
(set! *round* (string->symbol direction)))
(("--from" ?from (help "start at 10^from"))
(set! *from* (string->number from)))
(("--to" ?to (help "go up to 10^to"))
(set! *to* (string->number to)))
(else
(print "Illegal argument `" else "'. Usage:")
(args-parse-usage #f)))
(when (not *from*)
(error "generate-ten-powers"
"Missing from"
#f))
(when (not *to*)
(error "generate-ten-powers"
"Missing to"
#f))
(when (not *mantissa-size*)
(error "generate-ten-powers"
"Missing mantissa size"
#f))
(when (not (memv *round* '(up down round)))
(error "generate-ten-powers"
"Missing round-method"
*round*))
(let ((dividers (generate-powers *from* *to* *mantissa-size*))
(p (if (not *dest*)
(current-output-port)
(open-output-file *dest*))))
(unwind-protect
(with-output-to-port p
(lambda ()
(print-c dividers *from* *to*
"GRISU_CACHE_STRUCT" "GRISU_CACHE_NAME"
"GRISU_CACHE_MAX_DISTANCE" "GRISU_CACHE_OFFSET"
"GRISU_UINT64_C"
)))
(if *dest*
(close-output-port p)))))

11
deps/v8/tools/gyp/v8.gyp

@ -229,10 +229,12 @@
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
'../../src/cached-powers.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
'../../src/checks.h',
'../../src/circular-queue.cc',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code.h',
@ -251,6 +253,9 @@
'../../src/counters.cc',
'../../src/counters.h',
'../../src/cpu.h',
'../../src/cpu-profiler-inl.h',
'../../src/cpu-profiler.cc',
'../../src/cpu-profiler.h',
'../../src/data-flow.cc',
'../../src/data-flow.h',
'../../src/dateparser.cc',
@ -264,11 +269,16 @@
'../../src/disassembler.cc',
'../../src/disassembler.h',
'../../src/dtoa-config.c',
'../../src/diy-fp.cc',
'../../src/diy-fp.h',
'../../src/double.h',
'../../src/execution.cc',
'../../src/execution.h',
'../../src/factory.cc',
'../../src/factory.h',
'../../src/fast-codegen.h',
'../../src/fast-dtoa.cc',
'../../src/fast-dtoa.h',
'../../src/flag-definitions.h',
'../../src/flags.cc',
'../../src/flags.h',
@ -330,6 +340,7 @@
'../../src/parser.cc',
'../../src/parser.h',
'../../src/platform.h',
'../../src/powers-ten.h',
'../../src/prettyprinter.cc',
'../../src/prettyprinter.h',
'../../src/property.cc',

20
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -210,6 +210,10 @@
89FB0E3A0F8E533F00B04B3C /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
9F2B3711114FF62D007CDAF4 /* circular-queue.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B370F114FF62D007CDAF4 /* circular-queue.cc */; };
9F2B3712114FF62D007CDAF4 /* circular-queue.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B370F114FF62D007CDAF4 /* circular-queue.cc */; };
9F2B37261152CEA0007CDAF4 /* cpu-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */; };
9F2B37271152CEA0007CDAF4 /* cpu-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */; };
9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
@ -552,6 +556,12 @@
89FB0E370F8E531900B04B3C /* d8-windows.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-windows.cc"; path = "../src/d8-windows.cc"; sourceTree = "<group>"; };
9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "heap-profiler.cc"; sourceTree = "<group>"; };
9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "heap-profiler.h"; sourceTree = "<group>"; };
9F2B370E114FF62D007CDAF4 /* circular-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "circular-queue-inl.h"; sourceTree = "<group>"; };
9F2B370F114FF62D007CDAF4 /* circular-queue.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "circular-queue.cc"; sourceTree = "<group>"; };
9F2B3710114FF62D007CDAF4 /* circular-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "circular-queue.h"; sourceTree = "<group>"; };
9F2B37231152CEA0007CDAF4 /* cpu-profiler-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cpu-profiler-inl.h"; sourceTree = "<group>"; };
9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "cpu-profiler.cc"; sourceTree = "<group>"; };
9F2B37251152CEA0007CDAF4 /* cpu-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "cpu-profiler.h"; sourceTree = "<group>"; };
9F4B7B870FCC877A00DC4117 /* log-utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "log-utils.cc"; sourceTree = "<group>"; };
9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "profile-generator-inl.h"; sourceTree = "<group>"; };
@ -681,6 +691,9 @@
897FF10E0E719B8F00D62E90 /* char-predicates.h */,
897FF10F0E719B8F00D62E90 /* checks.cc */,
897FF1100E719B8F00D62E90 /* checks.h */,
9F2B370E114FF62D007CDAF4 /* circular-queue-inl.h */,
9F2B370F114FF62D007CDAF4 /* circular-queue.cc */,
9F2B3710114FF62D007CDAF4 /* circular-queue.h */,
897FF1110E719B8F00D62E90 /* code-stubs.cc */,
897FF1120E719B8F00D62E90 /* code-stubs.h */,
897FF1130E719B8F00D62E90 /* code.h */,
@ -709,6 +722,9 @@
897FF1230E719B8F00D62E90 /* cpu-arm.cc */,
897FF1240E719B8F00D62E90 /* cpu-ia32.cc */,
897FF1250E719B8F00D62E90 /* cpu.h */,
9F2B37231152CEA0007CDAF4 /* cpu-profiler-inl.h */,
9F2B37241152CEA0007CDAF4 /* cpu-profiler.cc */,
9F2B37251152CEA0007CDAF4 /* cpu-profiler.h */,
893A722A0F7B4A3200303DD2 /* dateparser-inl.h */,
897FF1260E719B8F00D62E90 /* dateparser.cc */,
897FF1270E719B8F00D62E90 /* dateparser.h */,
@ -1248,6 +1264,8 @@
9FBE03DE10BD409900F8BFBA /* fast-codegen.cc in Sources */,
9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */,
9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */,
9F2B3712114FF62D007CDAF4 /* circular-queue.cc in Sources */,
9F2B37271152CEA0007CDAF4 /* cpu-profiler.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -1358,6 +1376,8 @@
9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */,
9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */,
9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */,
9F2B3711114FF62D007CDAF4 /* circular-queue.cc in Sources */,
9F2B37261152CEA0007CDAF4 /* cpu-profiler.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};

24
deps/v8/tools/visual_studio/v8_base.vcproj

@ -252,6 +252,18 @@
RelativePath="..\..\src\checks.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue.cc"
>
</File>
<File
RelativePath="..\..\src\circular-queue.h"
>
</File>
<File
RelativePath="..\..\src\code-stubs.cc"
>
@ -336,6 +348,18 @@
RelativePath="..\..\src\cpu.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler-inl.h"
>
</File>
<File
RelativePath="..\..\src\data-flow.cc"
>

24
deps/v8/tools/visual_studio/v8_base_arm.vcproj

@ -252,6 +252,18 @@
RelativePath="..\..\src\checks.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue.cc"
>
</File>
<File
RelativePath="..\..\src\circular-queue.h"
>
</File>
<File
RelativePath="..\..\src\code-stubs.cc"
>
@ -344,6 +356,18 @@
RelativePath="..\..\src\cpu.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler-inl.h"
>
</File>
<File
RelativePath="..\..\src\data-flow.cc"
>

24
deps/v8/tools/visual_studio/v8_base_x64.vcproj

@ -252,6 +252,18 @@
RelativePath="..\..\src\checks.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue-inl.h"
>
</File>
<File
RelativePath="..\..\src\circular-queue.cc"
>
</File>
<File
RelativePath="..\..\src\circular-queue.h"
>
</File>
<File
RelativePath="..\..\src\code-stubs.cc"
>
@ -336,6 +348,18 @@
RelativePath="..\..\src\cpu.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler.h"
>
</File>
<File
RelativePath="..\..\src\cpu-profiler-inl.h"
>
</File>
<File
RelativePath="..\..\src\data-flow.cc"
>

12
deps/v8/tools/visual_studio/v8_cctest.vcproj

@ -155,6 +155,10 @@
RelativePath="..\..\test\cctest\test-ast.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-circular-queue.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-compiler.cc"
>
@ -163,6 +167,10 @@
RelativePath="..\..\test\cctest\test-conversions.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-debug.cc"
>
@ -219,6 +227,10 @@
RelativePath="..\..\test\cctest\test-platform-win32.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-profile-generator.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-serialize.cc"
>

12
deps/v8/tools/visual_studio/v8_cctest_arm.vcproj

@ -155,6 +155,10 @@
RelativePath="..\..\test\cctest\test-ast.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-circular-queue.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-compiler.cc"
>
@ -163,6 +167,10 @@
RelativePath="..\..\test\cctest\test-conversions.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-debug.cc"
>
@ -211,6 +219,10 @@
RelativePath="..\..\test\cctest\test-platform-win32.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-profile-generator.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-serialize.cc"
>

12
deps/v8/tools/visual_studio/v8_cctest_x64.vcproj

@ -155,6 +155,10 @@
RelativePath="..\..\test\cctest\test-ast.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-circular-queue.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-compiler.cc"
>
@ -163,6 +167,10 @@
RelativePath="..\..\test\cctest\test-conversions.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-cpu-profiler.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-debug.cc"
>
@ -215,6 +223,10 @@
RelativePath="..\..\test\cctest\test-platform-win32.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-profile-generator.cc"
>
</File>
<File
RelativePath="..\..\test\cctest\test-serialize.cc"
>

Loading…
Cancel
Save