Browse Source

Merge branch 'master' into net2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
b865f9e9c8
  1. 6
      deps/v8/ChangeLog
  2. 2
      deps/v8/src/SConscript
  3. 10
      deps/v8/src/api.cc
  4. 95
      deps/v8/src/arm/codegen-arm.cc
  5. 33
      deps/v8/src/arm/codegen-arm.h
  6. 2
      deps/v8/src/array.js
  7. 6
      deps/v8/src/ast.cc
  8. 27
      deps/v8/src/ast.h
  9. 11
      deps/v8/src/bootstrapper.cc
  10. 62
      deps/v8/src/builtins.cc
  11. 1
      deps/v8/src/builtins.h
  12. 119
      deps/v8/src/cached_powers.h
  13. 22
      deps/v8/src/checks.h
  14. 6
      deps/v8/src/compiler.cc
  15. 3
      deps/v8/src/compiler.h
  16. 11
      deps/v8/src/conversions-inl.h
  17. 16
      deps/v8/src/conversions.cc
  18. 205
      deps/v8/src/data-flow.cc
  19. 6
      deps/v8/src/data-flow.h
  20. 39
      deps/v8/src/date-delay.js
  21. 10
      deps/v8/src/debug.cc
  22. 136
      deps/v8/src/diy_fp.h
  23. 169
      deps/v8/src/double.h
  24. 5
      deps/v8/src/factory.cc
  25. 2
      deps/v8/src/factory.h
  26. 2
      deps/v8/src/flag-definitions.h
  27. 15
      deps/v8/src/frame-element.h
  28. 9
      deps/v8/src/globals.h
  29. 494
      deps/v8/src/grisu3.cc
  30. 2
      deps/v8/src/handles.cc
  31. 22
      deps/v8/src/heap-inl.h
  32. 52
      deps/v8/src/heap.cc
  33. 17
      deps/v8/src/heap.h
  34. 6
      deps/v8/src/ia32/assembler-ia32-inl.h
  35. 4
      deps/v8/src/ia32/assembler-ia32.h
  36. 494
      deps/v8/src/ia32/codegen-ia32.cc
  37. 43
      deps/v8/src/ia32/codegen-ia32.h
  38. 54
      deps/v8/src/ia32/register-allocator-ia32.cc
  39. 48
      deps/v8/src/ia32/stub-cache-ia32.cc
  40. 101
      deps/v8/src/ia32/virtual-frame-ia32.cc
  41. 29
      deps/v8/src/ia32/virtual-frame-ia32.h
  42. 5
      deps/v8/src/liveedit-delay.js
  43. 244
      deps/v8/src/liveedit.cc
  44. 2461
      deps/v8/src/powers_ten.h
  45. 88
      deps/v8/src/profile-generator-inl.h
  46. 295
      deps/v8/src/profile-generator.cc
  47. 233
      deps/v8/src/profile-generator.h
  48. 93
      deps/v8/src/regexp-delay.js
  49. 17
      deps/v8/src/register-allocator.h
  50. 48
      deps/v8/src/rewriter.cc
  51. 320
      deps/v8/src/runtime.cc
  52. 4
      deps/v8/src/runtime.h
  53. 60
      deps/v8/src/splay-tree-inl.h
  54. 12
      deps/v8/src/splay-tree.h
  55. 99
      deps/v8/src/string.js
  56. 2
      deps/v8/src/version.cc
  57. 3
      deps/v8/src/virtual-frame.cc
  58. 132
      deps/v8/src/x64/codegen-x64.cc
  59. 33
      deps/v8/src/x64/codegen-x64.h
  60. 2
      deps/v8/src/x64/register-allocator-x64.cc
  61. 5
      deps/v8/test/cctest/SConscript
  62. 100048
      deps/v8/test/cctest/gay_shortest.cc
  63. 8
      deps/v8/test/cctest/test-assembler-ia32.cc
  64. 10
      deps/v8/test/cctest/test-compiler.cc
  65. 6
      deps/v8/test/cctest/test-disasm-ia32.cc
  66. 67
      deps/v8/test/cctest/test-diy_fp.cc
  67. 204
      deps/v8/test/cctest/test-double.cc
  68. 116
      deps/v8/test/cctest/test-grisu3.cc
  69. 362
      deps/v8/test/cctest/test-profile-generator.cc
  70. 37
      deps/v8/test/mjsunit/array-push.js
  71. 56
      deps/v8/test/mjsunit/compiler/loopcount.js
  72. 83
      deps/v8/test/mjsunit/debug-liveedit-patch-positions-replace.js
  73. 93
      deps/v8/test/mjsunit/debug-liveedit-patch-positions.js
  74. 26
      deps/v8/test/mjsunit/regexp-cache-replace.js
  75. 286
      deps/v8/tools/generate-ten-powers.scm
  76. 9
      deps/v8/tools/gyp/v8.gyp
  77. 10
      deps/v8/tools/v8.xcodeproj/project.pbxproj
  78. 12
      deps/v8/tools/visual_studio/v8_base.vcproj
  79. 12
      deps/v8/tools/visual_studio/v8_base_arm.vcproj
  80. 12
      deps/v8/tools/visual_studio/v8_base_x64.vcproj
  81. 2
      doc/api.txt
  82. 26
      lib/dns.js
  83. 16
      src/node.cc
  84. 10
      src/node_dns.cc
  85. 4
      src/node_stat_watcher.cc
  86. 49
      test/disabled/test-dns.js
  87. 24
      test/simple/test-eval-cx.js

6
deps/v8/ChangeLog

@ -1,3 +1,9 @@
2010-03-17: Version 2.1.5
Performance improvements for arithmetic operations.
Performance improvements for string operations.
2010-03-10: Version 2.1.4
Fixed code cache lookup for keyed IC's (issue http://crbug.com/37853).

2
deps/v8/src/SConscript

@ -63,7 +63,6 @@ SOURCES = {
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
grisu3.cc
handles.cc
hashmap.cc
heap-profiler.cc
@ -80,6 +79,7 @@ SOURCES = {
objects.cc
oprofile-agent.cc
parser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc

10
deps/v8/src/api.cc

@ -1137,8 +1137,14 @@ Local<Script> Script::New(v8::Handle<String> source,
pre_data_impl = NULL;
}
i::Handle<i::JSFunction> boilerplate =
i::Compiler::Compile(str, name_obj, line_offset, column_offset, NULL,
pre_data_impl, Utils::OpenHandle(*script_data));
i::Compiler::Compile(str,
name_obj,
line_offset,
column_offset,
NULL,
pre_data_impl,
Utils::OpenHandle(*script_data),
i::NOT_NATIVES_CODE);
has_pending_exception = boilerplate.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Script>());
return Local<Script>(ToApi<Script>(boilerplate));

95
deps/v8/src/arm/codegen-arm.cc

@ -3681,7 +3681,8 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
// Load the argument on the stack and jump to the runtime.
Load(args->at(0));
frame_->CallRuntime(Runtime::kNumberToString, 1);
NumberToStringStub stub;
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
}
@ -5280,6 +5281,79 @@ static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
}
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found) {
// Currently only lookup for smis. Check for smi if object is not known to be
// a smi.
if (!object_is_smi) {
ASSERT(kSmiTag == 0);
__ tst(object, Operand(kSmiTagMask));
__ b(ne, not_found);
}
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch1;
Register scratch = scratch2;
// Load the number string cache.
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
// Divide length by two (length is not a smi).
__ mov(mask, Operand(mask, ASR, 1));
__ sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value.
__ and_(scratch, mask, Operand(object, ASR, 1));
// Calculate address of entry in string cache: each entry consists
// of two pointer sized fields.
__ add(scratch,
number_string_cache,
Operand(scratch, LSL, kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
Register object1 = scratch1;
__ ldr(object1, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ cmp(object, object1);
__ b(ne, not_found);
// Get the result from the cache.
__ ldr(result,
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
__ IncrementCounter(&Counters::number_to_string_native,
1,
scratch1,
scratch2);
}
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
__ ldr(r1, MemOperand(sp, 0));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, false, &runtime);
__ add(sp, sp, Operand(1 * kPointerSize));
__ Ret();
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
}
// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
@ -5503,7 +5577,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// sp[0] : second argument
// sp[4] : first argument
Label not_strings, not_string1, string1;
Label not_strings, not_string1, string1, string1_smi2;
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &not_string1);
__ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
@ -5511,13 +5585,24 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// First argument is a a string, test second.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &string1);
__ b(eq, &string1_smi2);
__ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &string1);
// First and second argument are strings.
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&stub);
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, r0, r2, r4, r5, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ str(r2, MemOperand(sp, 0));
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);

33
deps/v8/src/arm/codegen-arm.h

@ -660,6 +660,39 @@ class StringCompareStub: public CodeStub {
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_

2
deps/v8/src/array.js

@ -1123,7 +1123,7 @@ function SetupArray() {
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
"concat", getFunction("concat", ArrayConcat),
"concat", getFunction("concat", ArrayConcat, 1),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),

6
deps/v8/src/ast.cc

@ -78,14 +78,16 @@ VariableProxy::VariableProxy(Handle<String> name,
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
is_trivial_(false) {
is_trivial_(false),
reaching_definitions_(NULL) {
// names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol());
}
VariableProxy::VariableProxy(bool is_this)
: is_this_(is_this) {
: is_this_(is_this),
reaching_definitions_(NULL) {
}

27
deps/v8/src/ast.h

@ -103,6 +103,7 @@ namespace internal {
class TargetCollector;
class MaterializedLiteral;
class DefinitionInfo;
class BitVector;
#define DEF_FORWARD_DECLARATION(type) class type;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@ -251,6 +252,14 @@ class Expression: public AstNode {
bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
}
// Will the use of this expression treat -0 the same as 0 in all cases?
// If so, we can return 0 instead of -0 if we want to, to optimize code.
bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
void set_no_negative_zero(bool no_negative_zero) {
bitfields_ &= ~NoNegativeZeroField::mask();
bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
}
// Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
// be applied to the value of this expression?
// If so, we may be able to optimize the calculation of the value.
@ -260,8 +269,18 @@ class Expression: public AstNode {
bitfields_ |= ToInt32Field::encode(to_int32);
}
// How many bitwise logical or shift operators are used in this expression?
int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
void set_num_bit_ops(int num_bit_ops) {
bitfields_ &= ~NumBitOpsField::mask();
num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
bitfields_ |= NumBitOpsField::encode(num_bit_ops);
}
private:
static const int kMaxNumBitOps = (1 << 5) - 1;
uint32_t bitfields_;
StaticType type_;
@ -270,7 +289,9 @@ class Expression: public AstNode {
// Using template BitField<type, start, size>.
class SideEffectFreeField : public BitField<bool, 0, 1> {};
class ToInt32Field : public BitField<bool, 1, 1> {};
class NoNegativeZeroField : public BitField<bool, 1, 1> {};
class ToInt32Field : public BitField<bool, 2, 1> {};
class NumBitOpsField : public BitField<int, 3, 5> {};
};
@ -1032,6 +1053,9 @@ class VariableProxy: public Expression {
bool is_trivial() { return is_trivial_; }
void set_is_trivial(bool b) { is_trivial_ = b; }
BitVector* reaching_definitions() { return reaching_definitions_; }
void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@ -1041,6 +1065,7 @@ class VariableProxy: public Expression {
bool is_this_;
bool inside_with_;
bool is_trivial_;
BitVector* reaching_definitions_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this);

11
deps/v8/src/bootstrapper.cc

@ -816,8 +816,15 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate =
Compiler::Compile(source, script_name, 0, 0, extension, NULL,
Handle<String>::null());
Compiler::Compile(
source,
script_name,
0,
0,
extension,
NULL,
Handle<String>::null(),
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
if (boilerplate.is_null()) return false;
cache->Add(name, boilerplate);
}

62
deps/v8/src/builtins.cc

@ -727,6 +727,68 @@ BUILTIN(ArraySplice) {
}
BUILTIN(ArrayConcat) {
if (!ArrayPrototypeHasNoElements()) {
return CallJsBuiltin("ArrayConcat", args);
}
// Iterate through all the arguments performing checks
// and calculating total length.
int n_arguments = args.length();
int result_len = 0;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
return CallJsBuiltin("ArrayConcat", args);
}
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
USE(kHalfOfMaxInt);
result_len += len;
ASSERT(result_len >= 0);
if (result_len > FixedArray::kMaxLength) {
return CallJsBuiltin("ArrayConcat", args);
}
}
if (result_len == 0) {
return AllocateEmptyJSArray();
}
// Allocate result.
Object* result = AllocateJSArray();
if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result);
result = Heap::AllocateUninitializedFixedArray(result_len);
if (result->IsFailure()) return result;
FixedArray* result_elms = FixedArray::cast(result);
// Copy data.
AssertNoAllocation no_gc;
int start_pos = 0;
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
FixedArray* elms = FixedArray::cast(array->elements());
int len = Smi::cast(array->length())->value();
CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
start_pos += len;
}
ASSERT(start_pos == result_len);
// Set the length and elements.
result_array->set_length(Smi::FromInt(result_len));
result_array->set_elements(result_elms);
return result_array;
}
// -----------------------------------------------------------------------------
//

1
deps/v8/src/builtins.h

@ -52,6 +52,7 @@ enum BuiltinExtraArguments {
V(ArrayUnshift, NO_EXTRA_ARGUMENTS) \
V(ArraySlice, NO_EXTRA_ARGUMENTS) \
V(ArraySplice, NO_EXTRA_ARGUMENTS) \
V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
\
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \

119
deps/v8/src/cached_powers.h

@ -1,119 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CACHED_POWERS_H_
#define V8_CACHED_POWERS_H_
#include "diy_fp.h"
namespace v8 {
namespace internal {
struct CachedPower {
uint64_t significand;
int16_t binary_exponent;
int16_t decimal_exponent;
};
// The following defines implement the interface between this file and the
// generated 'powers_ten.h'.
// GRISU_CACHE_NAME(1) contains all possible cached powers.
// GRISU_CACHE_NAME(i) contains GRISU_CACHE_NAME(1) where only every 'i'th
// element is kept. More formally GRISU_CACHE_NAME(i) contains the elements j*i
// with 0 <= j < k with k such that j*k < the size of GRISU_CACHE_NAME(1).
// The higher 'i' is the fewer elements we use.
// Given that there are less elements, the exponent-distance between two
// elements in the cache grows. The variable GRISU_CACHE_MAX_DISTANCE(i) stores
// the maximum distance between two elements.
#define GRISU_CACHE_STRUCT CachedPower
#define GRISU_CACHE_NAME(i) kCachedPowers##i
#define GRISU_CACHE_MAX_DISTANCE(i) kCachedPowersMaxDistance##i
#define GRISU_CACHE_OFFSET kCachedPowerOffset
#define GRISU_UINT64_C V8_2PART_UINT64_C
// The following include imports the precompiled cached powers.
#include "powers_ten.h" // NOLINT
static const double kD_1_LOG2_10 = 0.30102999566398114; // 1 / lg(10)
// We can't use a function since we reference variables depending on the 'i'.
// This way the compiler is able to see at compile time that only one
// cache-array variable is used and thus can remove all the others.
#define COMPUTE_FOR_CACHE(i) \
if (!found && (gamma - alpha + 1 >= GRISU_CACHE_MAX_DISTANCE(i))) { \
int kQ = DiyFp::kSignificandSize; \
double k = ceiling((alpha - e + kQ - 1) * kD_1_LOG2_10); \
int index = (GRISU_CACHE_OFFSET + static_cast<int>(k) - 1) / i + 1; \
cached_power = GRISU_CACHE_NAME(i)[index]; \
found = true; \
} \
static void GetCachedPower(int e, int alpha, int gamma, int* mk, DiyFp* c_mk) {
// The following if statement should be optimized by the compiler so that only
// one array is referenced and the others are not included in the object file.
bool found = false;
CachedPower cached_power;
COMPUTE_FOR_CACHE(20);
COMPUTE_FOR_CACHE(19);
COMPUTE_FOR_CACHE(18);
COMPUTE_FOR_CACHE(17);
COMPUTE_FOR_CACHE(16);
COMPUTE_FOR_CACHE(15);
COMPUTE_FOR_CACHE(14);
COMPUTE_FOR_CACHE(13);
COMPUTE_FOR_CACHE(12);
COMPUTE_FOR_CACHE(11);
COMPUTE_FOR_CACHE(10);
COMPUTE_FOR_CACHE(9);
COMPUTE_FOR_CACHE(8);
COMPUTE_FOR_CACHE(7);
COMPUTE_FOR_CACHE(6);
COMPUTE_FOR_CACHE(5);
COMPUTE_FOR_CACHE(4);
COMPUTE_FOR_CACHE(3);
COMPUTE_FOR_CACHE(2);
COMPUTE_FOR_CACHE(1);
if (!found) {
UNIMPLEMENTED();
// Silence compiler warnings.
cached_power.significand = 0;
cached_power.binary_exponent = 0;
cached_power.decimal_exponent = 0;
}
*c_mk = DiyFp(cached_power.significand, cached_power.binary_exponent);
*mk = cached_power.decimal_exponent;
ASSERT((alpha <= c_mk->e() + e) && (c_mk->e() + e <= gamma));
}
#undef GRISU_REDUCTION
#undef GRISU_CACHE_STRUCT
#undef GRISU_CACHE_NAME
#undef GRISU_CACHE_MAX_DISTANCE
#undef GRISU_CACHE_OFFSET
#undef GRISU_UINT64_C
} } // namespace v8::internal
#endif // V8_CACHED_POWERS_H_

22
deps/v8/src/checks.h

@ -80,7 +80,6 @@ static inline void CheckEqualsHelper(const char* file, int line,
}
}
// Helper function used by the CHECK_EQ function when given int64_t
// arguments. Should not be called directly.
static inline void CheckEqualsHelper(const char* file, int line,
@ -203,27 +202,6 @@ static inline void CheckEqualsHelper(const char* file,
}
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
double expected,
const char* value_source,
double value) {
// Force values to 64 bit memory to truncate 80 bit precision on IA32.
volatile double* exp = new double[1];
*exp = expected;
volatile double* val = new double[1];
*val = value;
if (*exp == *val) {
V8_Fatal(file, line,
"CHECK_NE(%s, %s) failed\n# Value: %f",
expected_source, value_source, *val);
}
delete[] exp;
delete[] val;
}
namespace v8 {
class Value;
template <class T> class Handle;

6
deps/v8/src/compiler.cc

@ -278,7 +278,8 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
int line_offset, int column_offset,
v8::Extension* extension,
ScriptDataImpl* input_pre_data,
Handle<Object> script_data) {
Handle<Object> script_data,
NativesFlag natives) {
int source_length = source->length();
Counters::total_load_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@ -306,6 +307,9 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
if (!script_name.is_null()) {
script->set_name(*script_name);
script->set_line_offset(Smi::FromInt(line_offset));

3
deps/v8/src/compiler.h

@ -237,7 +237,8 @@ class Compiler : public AllStatic {
int line_offset, int column_offset,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data);
Handle<Object> script_data,
NativesFlag is_natives_code);
// Compile a String source within a context for Eval.
static Handle<JSFunction> CompileEval(Handle<String> source,

11
deps/v8/src/conversions-inl.h

@ -60,7 +60,8 @@ static inline int FastD2I(double x) {
// The fast double-to-unsigned-int conversion routine does not guarantee
// rounding towards zero.
// rounding towards zero, or any reasonable value if the argument is larger
// than what fits in an unsigned 32-bit integer.
static inline unsigned int FastD2UI(double x) {
// There is no unsigned version of lrint, so there is no fast path
// in this function as there is in FastD2I. Using lrint doesn't work
@ -77,7 +78,13 @@ static inline unsigned int FastD2UI(double x) {
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
memcpy(&result, &x, sizeof(result)); // Copy low 32 bits.
#ifdef BIG_ENDIAN_FLOATING_POINT
Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
#else
Address mantissa_ptr = reinterpret_cast<Address>(&x);
#endif
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.

16
deps/v8/src/conversions.cc

@ -31,7 +31,6 @@
#include "conversions-inl.h"
#include "factory.h"
#include "grisu3.h"
#include "scanner.h"
namespace v8 {
@ -383,17 +382,8 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int decimal_point;
int sign;
char* decimal_rep;
bool used_dtoa = false;
char grisu_buffer[kGrisu3MaximalLength + 1];
int length;
if (grisu3(v, grisu_buffer, &sign, &length, &decimal_point)) {
decimal_rep = grisu_buffer;
} else {
decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
used_dtoa = true;
length = StrLength(decimal_rep);
}
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
int length = StrLength(decimal_rep);
if (sign) builder.AddCharacter('-');
@ -428,7 +418,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
builder.AddFormatted("%d", exponent);
}
if (used_dtoa) freedtoa(decimal_rep);
freedtoa(decimal_rep);
}
}
return builder.Finalize();

205
deps/v8/src/data-flow.cc

@ -34,6 +34,22 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
void BitVector::Print() {
bool first = true;
PrintF("{");
for (int i = 0; i < length(); i++) {
if (Contains(i)) {
if (!first) PrintF(",");
first = false;
PrintF("%d");
}
}
PrintF("}");
}
#endif
void FlowGraph::AppendInstruction(AstNode* instruction) {
// Add a (non-null) AstNode to the end of the graph fragment.
ASSERT(instruction != NULL);
@ -357,6 +373,7 @@ void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
// not both.
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (expr->is_compound()) Visit(expr->target());
Visit(expr->value());
if (var->IsStackAllocated()) {
expr->set_num(definitions_.length());
@ -1100,6 +1117,12 @@ Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
if (init_value < term_value && update->op() != Token::INC) return NULL;
if (init_value > term_value && update->op() != Token::DEC) return NULL;
// Check that the update operation cannot overflow the smi range. This can
// occur in the two cases where the loop bound is equal to the largest or
// smallest smi.
if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
// Found a smi loop variable.
return loop_var;
}
@ -1333,7 +1356,7 @@ void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
result.Union(av_);
av_.Clear();
}
av_.CopyFrom(result);
av_ = result;
}
@ -1345,7 +1368,7 @@ void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
result.Union(av_);
av_.Clear();
}
av_.CopyFrom(result);
av_ = result;
}
@ -1405,7 +1428,7 @@ void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
Visit(expr->arguments()->at(i));
result.Union(av_);
}
av_.CopyFrom(result);
av_ = result;
}
@ -1418,7 +1441,7 @@ void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
Visit(expr->arguments()->at(i));
result.Union(av_);
}
av_.CopyFrom(result);
av_ = result;
}
@ -1430,7 +1453,7 @@ void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
Visit(expr->arguments()->at(i));
result.Union(av_);
}
av_.CopyFrom(result);
av_ = result;
}
@ -1626,6 +1649,9 @@ void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
PrintF("%s", *var->name()->ToCString());
if (var->IsStackAllocated() && expr->reaching_definitions() != NULL) {
expr->reaching_definitions()->Print();
}
} else {
ASSERT(expr->AsProperty() != NULL);
VisitProperty(expr->AsProperty());
@ -1663,31 +1689,51 @@ void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
if (var == NULL && prop == NULL) {
// Throw reference error.
Visit(expr->target());
return;
}
// Print the left-hand side.
if (var != NULL) {
PrintF("%s %s @%d",
*var->name()->ToCString(),
Token::String(expr->op()),
expr->value()->num());
PrintF("%s", *var->name()->ToCString());
} else if (prop != NULL) {
PrintF("@%d", prop->obj()->num());
if (prop->key()->IsPropertyName()) {
PrintF("@%d.", prop->obj()->num());
PrintF(".");
ASSERT(prop->key()->AsLiteral() != NULL);
prop->key()->AsLiteral()->handle()->Print();
PrintF(" %s @%d",
Token::String(expr->op()),
expr->value()->num());
} else {
PrintF("@%d[@%d] %s @%d",
prop->obj()->num(),
prop->key()->num(),
Token::String(expr->op()),
expr->value()->num());
PrintF("[@%d]", prop->key()->num());
}
}
// Print the operation.
if (expr->is_compound()) {
PrintF(" = ");
// Print the left-hand side again when compound.
if (var != NULL) {
PrintF("@%d", expr->target()->num());
} else {
// Throw reference error.
Visit(expr->target());
PrintF("@%d", prop->obj()->num());
if (prop->key()->IsPropertyName()) {
PrintF(".");
ASSERT(prop->key()->AsLiteral() != NULL);
prop->key()->AsLiteral()->handle()->Print();
} else {
PrintF("[@%d]", prop->key()->num());
}
}
// Print the corresponding binary operator.
PrintF(" %s ", Token::String(expr->binary_op()));
} else {
PrintF(" %s ", Token::String(expr->op()));
}
// Print the right-hand side.
PrintF("@%d", expr->value()->num());
if (expr->num() != AstNode::kNoNumber) {
PrintF(" ;; D%d", expr->num());
}
@ -1798,38 +1844,17 @@ void Node::PrintReachingDefinitions() {
if (rd_.rd_in() != NULL) {
ASSERT(rd_.kill() != NULL && rd_.gen() != NULL);
PrintF("RD_in = {");
bool first = true;
for (int i = 0; i < rd_.rd_in()->length(); i++) {
if (rd_.rd_in()->Contains(i)) {
if (!first) PrintF(",");
PrintF("%d");
first = false;
}
}
PrintF("}\n");
PrintF("RD_in = ");
rd_.rd_in()->Print();
PrintF("\n");
PrintF("RD_kill = {");
first = true;
for (int i = 0; i < rd_.kill()->length(); i++) {
if (rd_.kill()->Contains(i)) {
if (!first) PrintF(",");
PrintF("%d");
first = false;
}
}
PrintF("}\n");
PrintF("RD_kill = ");
rd_.kill()->Print();
PrintF("\n");
PrintF("RD_gen = {");
first = true;
for (int i = 0; i < rd_.gen()->length(); i++) {
if (rd_.gen()->Contains(i)) {
if (!first) PrintF(",");
PrintF("%d");
first = false;
}
}
PrintF("}\n");
PrintF("RD_gen = ");
rd_.gen()->Print();
PrintF("\n");
}
}
@ -1961,7 +1986,7 @@ void ExitNode::ComputeRDOut(BitVector* result) {
void BlockNode::ComputeRDOut(BitVector* result) {
// All definitions reaching this block ...
result->CopyFrom(*rd_.rd_in());
*result = *rd_.rd_in();
// ... except those killed by the block ...
result->Subtract(*rd_.kill());
// ... but including those generated by the block.
@ -1971,13 +1996,13 @@ void BlockNode::ComputeRDOut(BitVector* result) {
void BranchNode::ComputeRDOut(BitVector* result) {
// Branch nodes don't kill or generate definitions.
result->CopyFrom(*rd_.rd_in());
*result = *rd_.rd_in();
}
void JoinNode::ComputeRDOut(BitVector* result) {
// Join nodes don't kill or generate definitions.
result->CopyFrom(*rd_.rd_in());
*result = *rd_.rd_in();
}
@ -2008,7 +2033,7 @@ void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
rd_.rd_in()->CopyFrom(new_rd_in);
*rd_.rd_in() = new_rd_in;
// Add the successor to the worklist if not already present.
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
@ -2024,7 +2049,7 @@ void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
rd_.rd_in()->CopyFrom(new_rd_in);
*rd_.rd_in() = new_rd_in;
// Add the successors to the worklist if not already present.
if (!successor0_->IsMarkedWith(mark)) {
successor0_->MarkWith(mark);
@ -2051,7 +2076,7 @@ void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
rd_.rd_in()->CopyFrom(new_rd_in);
*rd_.rd_in() = new_rd_in;
// Add the successor to the worklist if not already present.
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
@ -2060,6 +2085,66 @@ void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
}
void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
// Nothing to do.
}
void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
// Propagate RD_in from the start of the block to all the variable
// references.
int variable_count = variables->length();
BitVector rd = *rd_.rd_in();
for (int i = 0, len = instructions_.length(); i < len; i++) {
Expression* expr = instructions_[i]->AsExpression();
if (expr == NULL) continue;
// Look for a variable reference to record its reaching definitions.
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy == NULL) {
// Not a VariableProxy? Maybe it's a count operation.
CountOperation* count_operation = expr->AsCountOperation();
if (count_operation != NULL) {
proxy = count_operation->expression()->AsVariableProxy();
}
}
if (proxy == NULL) {
// OK, Maybe it's a compound assignment.
Assignment* assignment = expr->AsAssignment();
if (assignment != NULL && assignment->is_compound()) {
proxy = assignment->target()->AsVariableProxy();
}
}
if (proxy != NULL &&
proxy->var()->IsStackAllocated() &&
!proxy->var()->is_this()) {
// All definitions for this variable.
BitVector* definitions =
variables->at(ReachingDefinitions::IndexFor(proxy->var(),
variable_count));
BitVector* reaching_definitions = new BitVector(*definitions);
// Intersected with all definitions (of any variable) reaching this
// instruction.
reaching_definitions->Intersect(rd);
proxy->set_reaching_definitions(reaching_definitions);
}
// It may instead (or also) be a definition. If so update the running
// value of reaching definitions for the block.
Variable* var = expr->AssignedVar();
if (var == NULL || !var->IsStackAllocated()) continue;
// All definitions of this variable are killed.
BitVector* def_set =
variables->at(ReachingDefinitions::IndexFor(var, variable_count));
rd.Subtract(*def_set);
// This definition is generated.
rd.Add(expr->num());
}
}
void ReachingDefinitions::Compute() {
ASSERT(!definitions_->is_empty());
@ -2088,7 +2173,7 @@ void ReachingDefinitions::Compute() {
PrintF("Def[%s] = {%d", *var->name()->ToCString(), j);
first = false;
} else {
PrintF(", %d", j);
PrintF(",%d", j);
}
}
}
@ -2117,6 +2202,12 @@ void ReachingDefinitions::Compute() {
node->MarkWith(!mark);
node->UpdateRDIn(&worklist, mark);
}
// Step 4: Based on RD_in for block nodes, propagate reaching definitions
// to all variable uses in the block.
for (int i = 0; i < node_count; i++) {
postorder_->at(i)->PropagateReachingDefinitions(&variables_);
}
}

6
deps/v8/src/data-flow.h

@ -129,6 +129,10 @@ class BitVector: public ZoneObject {
int length() const { return length_; }
#ifdef DEBUG
void Print();
#endif
private:
int length_;
int data_length_;
@ -235,6 +239,7 @@ class Node: public ZoneObject {
bool mark);
virtual void ComputeRDOut(BitVector* result) = 0;
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
#ifdef DEBUG
void AssignNodeNumber();
@ -324,6 +329,7 @@ class BlockNode: public Node {
bool mark);
void ComputeRDOut(BitVector* result);
void UpdateRDIn(WorkList<Node>* worklist, bool mark);
void PropagateReachingDefinitions(List<BitVector*>* variables);
#ifdef DEBUG
void PrintText();

39
deps/v8/src/date-delay.js

@ -260,45 +260,12 @@ function TimeInYear(year) {
}
var four_year_cycle_table = CalculateDateTable();
function CalculateDateTable() {
var month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
var four_year_cycle_table = new $Array(1461);
var cumulative = 0;
var position = 0;
var leap_position = 0;
for (var month = 0; month < 12; month++) {
var month_bits = month << kMonthShift;
var length = month_lengths[month];
for (var day = 1; day <= length; day++) {
four_year_cycle_table[leap_position] =
month_bits + day;
four_year_cycle_table[366 + position] =
(1 << kYearShift) + month_bits + day;
four_year_cycle_table[731 + position] =
(2 << kYearShift) + month_bits + day;
four_year_cycle_table[1096 + position] =
(3 << kYearShift) + month_bits + day;
leap_position++;
position++;
}
if (month == 1) {
four_year_cycle_table[leap_position++] = month_bits + 29;
}
}
return four_year_cycle_table;
}
var ymd_from_time_cache = [$NaN, $NaN, $NaN];
var ymd_from_time_cached_time = $NaN;
function YearFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}
@ -312,7 +279,7 @@ function YearFromTime(t) {
function MonthFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}
@ -325,7 +292,7 @@ function MonthFromTime(t) {
function DateFromTime(t) {
if (t !== ymd_from_time_cached_time) {
// Limits according to ECMA 262 15.9.1.1
// Limits according to ECMA 262 15.9.1.1
if (!$isFinite(t) || t < -8640000000000000 || t > 8640000000000000) {
return $NaN;
}

10
deps/v8/src/debug.cc

@ -686,8 +686,14 @@ bool Debug::CompileDebuggerScript(int index) {
bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true;
Handle<JSFunction> boilerplate;
boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
Handle<String>::null());
boilerplate = Compiler::Compile(source_code,
script_name,
0,
0,
NULL,
NULL,
Handle<String>::null(),
NATIVES_CODE);
FLAG_allow_natives_syntax = allow_natives_syntax;
// Silently ignore stack overflows during compilation.

136
deps/v8/src/diy_fp.h

@ -1,136 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DIY_FP_H_
#define V8_DIY_FP_H_
namespace v8 {
namespace internal {
// This "Do It Yourself Floating Point" class implements a floating-point number
// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
// have the most significant bit of the significand set.
// Multiplication and Subtraction do not normalize their results.
// DiyFp are not designed to contain special doubles (NaN and Infinity).
class DiyFp {
public:
static const int kSignificandSize = 64;
DiyFp() : f_(0), e_(0) {}
DiyFp(uint64_t f, int e) : f_(f), e_(e) {}
// this = this - other.
// The exponents of both numbers must be the same and the significand of this
// must be bigger than the significand of other.
// The result will not be normalized.
void Subtract(const DiyFp& other) {
ASSERT(e_ == other.e_);
ASSERT(f_ >= other.f_);
f_ -= other.f_;
}
// Returns a - b.
// The exponents of both numbers must be the same and this must be bigger
// than other. The result will not be normalized.
static DiyFp Minus(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Subtract(b);
return result;
}
// this = this * other.
void Multiply(const DiyFp& other) {
// Simply "emulates" a 128 bit multiplication.
// However: the resulting number only contains 64 bits. The least
// significant 64 bits are only used for rounding the most significant 64
// bits.
const uint64_t kM32 = 0xFFFFFFFFu;
uint64_t a = f_ >> 32;
uint64_t b = f_ & kM32;
uint64_t c = other.f_ >> 32;
uint64_t d = other.f_ & kM32;
uint64_t ac = a * c;
uint64_t bc = b * c;
uint64_t ad = a * d;
uint64_t bd = b * d;
uint64_t tmp = (bd >> 32) + (ad & kM32) + (bc & kM32);
tmp += 1U << 31; // round
uint64_t result_f = ac + (ad >> 32) + (bc >> 32) + (tmp >> 32);
e_ += other.e_ + 64;
f_ = result_f;
}
// returns a * b;
static DiyFp Times(const DiyFp& a, const DiyFp& b) {
DiyFp result = a;
result.Multiply(b);
return result;
}
void Normalize() {
ASSERT(f_ != 0);
uint64_t f = f_;
int e = e_;
// This method is mainly called for normalizing boundaries. In general
// boundaries need to be shifted by 10 bits. We thus optimize for this case.
const uint64_t k10MSBits = V8_2PART_UINT64_C(0xFFC00000, 00000000);
while ((f & k10MSBits) == 0) {
f <<= 10;
e -= 10;
}
while ((f & kUint64MSB) == 0) {
f <<= 1;
e--;
}
f_ = f;
e_ = e;
}
static DiyFp Normalize(const DiyFp& a) {
DiyFp result = a;
result.Normalize();
return result;
}
uint64_t f() const { return f_; }
int e() const { return e_; }
void set_f(uint64_t new_value) { f_ = new_value; }
void set_e(int new_value) { e_ = new_value; }
private:
static const uint64_t kUint64MSB = V8_2PART_UINT64_C(0x80000000, 00000000);
uint64_t f_;
int e_;
};
} } // namespace v8::internal
#endif // V8_DIY_FP_H_

169
deps/v8/src/double.h

@ -1,169 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DOUBLE_H_
#define V8_DOUBLE_H_
#include "diy_fp.h"
namespace v8 {
namespace internal {
// We assume that doubles and uint64_t have the same endianness.
static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
// Helper functions for doubles.
class Double {
public:
static const uint64_t kSignMask = V8_2PART_UINT64_C(0x80000000, 00000000);
static const uint64_t kExponentMask = V8_2PART_UINT64_C(0x7FF00000, 00000000);
static const uint64_t kSignificandMask =
V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
static const uint64_t kHiddenBit = V8_2PART_UINT64_C(0x00100000, 00000000);
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
explicit Double(uint64_t d64) : d64_(d64) {}
DiyFp AsDiyFp() const {
ASSERT(!IsSpecial());
return DiyFp(Significand(), Exponent());
}
// this->Significand() must not be 0.
DiyFp AsNormalizedDiyFp() const {
uint64_t f = Significand();
int e = Exponent();
ASSERT(f != 0);
// The current double could be a denormal.
while ((f & kHiddenBit) == 0) {
f <<= 1;
e--;
}
// Do the final shifts in one go. Don't forget the hidden bit (the '-1').
f <<= DiyFp::kSignificandSize - kSignificandSize - 1;
e -= DiyFp::kSignificandSize - kSignificandSize - 1;
return DiyFp(f, e);
}
// Returns the double's bit as uint64.
uint64_t AsUint64() const {
return d64_;
}
int Exponent() const {
if (IsDenormal()) return kDenormalExponent;
uint64_t d64 = AsUint64();
int biased_e = static_cast<int>((d64 & kExponentMask) >> kSignificandSize);
return biased_e - kExponentBias;
}
uint64_t Significand() const {
uint64_t d64 = AsUint64();
uint64_t significand = d64 & kSignificandMask;
if (!IsDenormal()) {
return significand + kHiddenBit;
} else {
return significand;
}
}
// Returns true if the double is a denormal.
bool IsDenormal() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == 0;
}
// We consider denormals not to be special.
// Hence only Infinity and NaN are special.
bool IsSpecial() const {
uint64_t d64 = AsUint64();
return (d64 & kExponentMask) == kExponentMask;
}
bool IsNan() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) != 0);
}
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
((d64 & kSignificandMask) == 0);
}
int Sign() const {
uint64_t d64 = AsUint64();
return (d64 & kSignMask) == 0? 1: -1;
}
// Returns the two boundaries of this.
// The bigger boundary (m_plus) is normalized. The lower boundary has the same
// exponent as m_plus.
void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
DiyFp v = this->AsDiyFp();
bool significand_is_zero = (v.f() == kHiddenBit);
DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
DiyFp m_minus;
if (significand_is_zero && v.e() != kDenormalExponent) {
// The boundary is closer. Think of v = 1000e10 and v- = 9999e9.
// Then the boundary (== (v - v-)/2) is not just at a distance of 1e9 but
// at a distance of 1e8.
// The only exception is for the smallest normal: the largest denormal is
// at the same distance as its successor.
// Note: denormals have the same exponent as the smallest normals.
m_minus = DiyFp((v.f() << 2) - 1, v.e() - 2);
} else {
m_minus = DiyFp((v.f() << 1) - 1, v.e() - 1);
}
m_minus.set_f(m_minus.f() << (m_minus.e() - m_plus.e()));
m_minus.set_e(m_plus.e());
*out_m_plus = m_plus;
*out_m_minus = m_minus;
}
double value() const { return uint64_to_double(d64_); }
private:
static const int kSignificandSize = 52; // Excludes the hidden bit.
static const int kExponentBias = 0x3FF + kSignificandSize;
static const int kDenormalExponent = -kExponentBias + 1;
uint64_t d64_;
};
} } // namespace v8::internal
#endif // V8_DOUBLE_H_

5
deps/v8/src/factory.cc

@ -560,6 +560,11 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
}
Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
CALL_HEAP_FUNCTION(Heap::CopyCode(*code, reloc_info), Code);
}
static inline Object* DoCopyInsert(DescriptorArray* array,
String* key,
Object* value,

2
deps/v8/src/factory.h

@ -230,6 +230,8 @@ class Factory : public AllStatic {
static Handle<Code> CopyCode(Handle<Code> code);
static Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
static Handle<Object> ToObject(Handle<Object> object);
static Handle<Object> ToObject(Handle<Object> object,
Handle<Context> global_context);

2
deps/v8/src/flag-definitions.h

@ -153,7 +153,7 @@ DEFINE_bool(always_fast_compiler, false,
"try to use the speculative optimizing backend for all code")
DEFINE_bool(trace_bailout, false,
"print reasons for falling back to using the classic V8 backend")
DEFINE_bool(safe_int32_compiler, false,
DEFINE_bool(safe_int32_compiler, true,
"enable optimized side-effect-free int32 expressions.")
DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations")

15
deps/v8/src/frame-element.h

@ -145,6 +145,16 @@ class FrameElement BASE_EMBEDDED {
void set_copied() { value_ = value_ | CopiedField::encode(true); }
void clear_copied() { value_ = value_ & ~CopiedField::mask(); }
// An untagged int32 FrameElement represents a signed int32
// on the stack. These are only allowed in a side-effect-free
// int32 calculation, and if a non-int32 input shows up or an overflow
// occurs, we bail out and drop all the int32 values.
void set_untagged_int32(bool value) {
value_ &= ~UntaggedInt32Field::mask();
value_ |= UntaggedInt32Field::encode(value);
}
bool is_untagged_int32() const { return UntaggedInt32Field::decode(value_); }
Register reg() const {
ASSERT(is_register());
uint32_t reg = DataField::decode(value_);
@ -255,8 +265,9 @@ class FrameElement BASE_EMBEDDED {
class TypeField: public BitField<Type, 0, 3> {};
class CopiedField: public BitField<bool, 3, 1> {};
class SyncedField: public BitField<bool, 4, 1> {};
class NumberInfoField: public BitField<int, 5, 4> {};
class DataField: public BitField<uint32_t, 9, 32 - 9> {};
class UntaggedInt32Field: public BitField<bool, 5, 1> {};
class NumberInfoField: public BitField<int, 6, 4> {};
class DataField: public BitField<uint32_t, 10, 32 - 10> {};
friend class VirtualFrame;
};

9
deps/v8/src/globals.h

@ -98,11 +98,6 @@ typedef byte* Address;
#define V8_PTR_PREFIX ""
#endif // V8_HOST_ARCH_64_BIT
// The following macro works on both 32 and 64-bit platforms.
// Usage: instead of writing 0x1234567890123456
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
#define V8PRIxPTR V8_PTR_PREFIX "x"
#define V8PRIdPTR V8_PTR_PREFIX "d"
@ -322,6 +317,10 @@ enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG };
// Flag indicating whether code is built into the VM (one of the natives files).
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of

494
deps/v8/src/grisu3.cc

@ -1,494 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "grisu3.h"
#include "cached_powers.h"
#include "diy_fp.h"
#include "double.h"
namespace v8 {
namespace internal {
template <int alpha = -60, int gamma = -32>
class Grisu3 {
public:
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer (not null-terminated).
// If the function returns true then
// v == (double) (buffer * 10^decimal_exponent).
// The digits in the buffer are the shortest representation possible: no
// 0.099999999999 instead of 0.1.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
static bool grisu3(double v,
char* buffer, int* length, int* decimal_exponent);
private:
// Rounds the buffer according to the rest.
// If there is too much imprecision to round then false is returned.
// Similarily false is returned when the buffer is not within Delta.
static bool RoundWeed(char* buffer, int len, uint64_t wp_W, uint64_t Delta,
uint64_t rest, uint64_t ten_kappa, uint64_t ulp);
// Dispatches to the a specialized digit-generation routine. The chosen
// routine depends on w.e (which in turn depends on alpha and gamma).
// Currently there is only one digit-generation routine, but it would be easy
// to add others.
static bool DigitGen(DiyFp low, DiyFp w, DiyFp high,
char* buffer, int* len, int* kappa);
// Generates w's digits. The result is the shortest in the interval low-high.
// All DiyFp are assumed to be imprecise and this function takes this
// imprecision into account. If the function cannot compute the best
// representation (due to the imprecision) then false is returned.
static bool DigitGen_m60_m32(DiyFp low, DiyFp w, DiyFp high,
char* buffer, int* length, int* kappa);
};
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::grisu3(double v,
char* buffer,
int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
// neighbors. Any number strictly between boundary_minus and boundary_plus
// will round to v when read as double.
// Grisu3 will never output representations that lie exactly on a boundary.
DiyFp boundary_minus, boundary_plus;
Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
ASSERT(boundary_plus.e() == w.e());
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
GetCachedPower(w.e() + DiyFp::kSignificandSize, alpha, gamma, &mk, &ten_mk);
ASSERT(alpha <= w.e() + ten_mk.e() + DiyFp::kSignificandSize &&
gamma >= w.e() + ten_mk.e() + DiyFp::kSignificandSize);
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
// The DiyFp::Times procedure rounds its result, and ten_mk is approximated
// too. The variable scaled_w (as well as scaled_boundary_minus/plus) are now
// off by a small amount.
// In fact: scaled_w - w*10^k < 1ulp (unit in the last place) of scaled_w.
// In other words: let f = scaled_w.f() and e = scaled_w.e(), then
// (f-1) * 2^e < w*10^k < (f+1) * 2^e
DiyFp scaled_w = DiyFp::Times(w, ten_mk);
ASSERT(scaled_w.e() ==
boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
// In theory it would be possible to avoid some recomputations by computing
// the difference between w and boundary_minus/plus (a power of 2) and to
// compute scaled_boundary_minus/plus by subtracting/adding from
// scaled_w. However the code becomes much less readable and the speed
// enhancements are not terriffic.
DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
// DigitGen will generate the digits of scaled_w. Therefore we have
// v == (double) (scaled_w * 10^-mk).
// Set decimal_exponent == -mk and pass it to DigitGen. If scaled_w is not an
// integer than it will be updated. For instance if scaled_w == 1.23 then
// the buffer will be filled with "123" und the decimal_exponent will be
// decreased by 2.
int kappa;
bool result = DigitGen(scaled_boundary_minus, scaled_w, scaled_boundary_plus,
buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
// Generates the digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by alpha and gamma. Typically alpha >= -63
// and gamma <= 3.
// Returns false if it fails, in which case the generated digits in the buffer
// should not be used.
// Preconditions:
// * low, w and high are correct up to 1 ulp (unit in the last place). That
// is, their error must be less that a unit of their last digits.
// * low.e() == w.e() == high.e()
// * low < w < high, and taking into account their error: low~ <= high~
// * alpha <= w.e() <= gamma
// Postconditions: returns false if procedure fails.
// otherwise:
// * buffer is not null-terminated, but len contains the number of digits.
// * buffer contains the shortest possible decimal digit-sequence
// such that LOW < buffer * 10^kappa < HIGH, where LOW and HIGH are the
// correct values of low and high (without their error).
// * if more than one decimal representation gives the minimal number of
// decimal digits then the one closest to W (where W is the correct value
// of w) is chosen.
// Remark: this procedure takes into account the imprecision of its input
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely (~0.5%).
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::DigitGen(DiyFp low,
DiyFp w,
DiyFp high,
char* buffer,
int* len,
int* kappa) {
ASSERT(low.e() == w.e() && w.e() == high.e());
ASSERT(low.f() + 1 <= high.f() - 1);
ASSERT(alpha <= w.e() && w.e() <= gamma);
// The following tests use alpha and gamma to avoid unnecessary dynamic tests.
if ((alpha >= -60 && gamma <= -32) || // -60 <= w.e() <= -32
(alpha <= -32 && gamma >= -60 && // Alpha/gamma overlaps -60/-32 region.
-60 <= w.e() && w.e() <= -32)) {
return DigitGen_m60_m32(low, w, high, buffer, len, kappa);
} else {
// A simple adaption of the special case -60/-32 would allow greater ranges
// of alpha/gamma and thus reduce the number of precomputed cached powers of
// ten.
UNIMPLEMENTED();
return false;
}
}
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
static const uint32_t kTen6 = 1000000;
static const uint32_t kTen7 = 10000000;
static const uint32_t kTen8 = 100000000;
static const uint32_t kTen9 = 1000000000;
// Returns the biggest power of ten that is <= than the given number. We
// furthermore receive the maximum number of bits 'number' has.
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
static void BiggestPowerTen(uint32_t number,
int number_bits,
uint32_t* power,
int* exponent) {
switch (number_bits) {
case 32:
case 31:
case 30:
if (kTen9 <= number) {
*power = kTen9;
*exponent = 9;
break;
} // else fallthrough
case 29:
case 28:
case 27:
if (kTen8 <= number) {
*power = kTen8;
*exponent = 8;
break;
} // else fallthrough
case 26:
case 25:
case 24:
if (kTen7 <= number) {
*power = kTen7;
*exponent = 7;
break;
} // else fallthrough
case 23:
case 22:
case 21:
case 20:
if (kTen6 <= number) {
*power = kTen6;
*exponent = 6;
break;
} // else fallthrough
case 19:
case 18:
case 17:
if (kTen5 <= number) {
*power = kTen5;
*exponent = 5;
break;
} // else fallthrough
case 16:
case 15:
case 14:
if (kTen4 <= number) {
*power = kTen4;
*exponent = 4;
break;
} // else fallthrough
case 13:
case 12:
case 11:
case 10:
if (1000 <= number) {
*power = 1000;
*exponent = 3;
break;
} // else fallthrough
case 9:
case 8:
case 7:
if (100 <= number) {
*power = 100;
*exponent = 2;
break;
} // else fallthrough
case 6:
case 5:
case 4:
if (10 <= number) {
*power = 10;
*exponent = 1;
break;
} // else fallthrough
case 3:
case 2:
case 1:
if (1 <= number) {
*power = 1;
*exponent = 0;
break;
} // else fallthrough
case 0:
*power = 0;
*exponent = -1;
break;
default:
// Following assignments are here to silence compiler warnings.
*power = 0;
*exponent = 0;
UNREACHABLE();
}
}
// Same comments as for DigitGen but with additional precondition:
// -60 <= w.e() <= -32
//
// Say, for the sake of example, that
// w.e() == -48, and w.f() == 0x1234567890abcdef
// w's value can be computed by w.f() * 2^w.e()
// We can obtain w's integral digits by simply shifting w.f() by -w.e().
// -> w's integral part is 0x1234
// w's fractional part is therefore 0x567890abcdef.
// Printing w's integral part is easy (simply print 0x1234 in decimal).
// In order to print its fraction we repeatedly multiply the fraction by 10 and
// get each digit. Example the first digit after the comma would be computed by
// (0x567890abcdef * 10) >> 48. -> 3
// The whole thing becomes slightly more complicated because we want to stop
// once we have enough digits. That is, once the digits inside the buffer
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::DigitGen_m60_m32(DiyFp low,
DiyFp w,
DiyFp high,
char* buffer,
int* length,
int* kappa) {
// low, w and high are imprecise, but by less than one ulp (unit in the last
// place).
// If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
// the new numbers are outside of the interval we want the final
// representation to lie in.
// Inversely adding (resp. removing) 1 ulp from low (resp. high) would yield
// numbers that are certain to lie in the interval. We will use this fact
// later on.
// We will now start by generating the digits within the uncertain
// interval. Later we will weed out representations that lie outside the safe
// interval and thus _might_ lie outside the correct interval.
uint64_t unit = 1;
DiyFp too_low = DiyFp(low.f() - unit, low.e());
DiyFp too_high = DiyFp(high.f() + unit, high.e());
// too_low and too_high are guaranteed to lie outside the interval we want the
// generated number in.
DiyFp unsafe_interval = DiyFp::Minus(too_high, too_low);
// We now cut the input number into two parts: the integral digits and the
// fractionals. We will not write any decimal separator though, but adapt
// kappa instead.
// Reminder: we are currently computing the digits (stored inside the buffer)
// such that: too_low < buffer * 10^kappa < too_high
// We use too_high for the digit_generation and stop as soon as possible.
// If we stop early we effectively round down.
DiyFp one = DiyFp(static_cast<uint64_t>(1) << -w.e(), w.e());
// Division by one is a shift.
uint32_t integrals = static_cast<uint32_t>(too_high.f() >> -one.e());
// Modulo by one is an and.
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divider;
int divider_exponent;
BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
&divider, &divider_exponent);
*kappa = divider_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
// The invariant holds for the first iteration: kappa has been initialized
// with the divider exponent + 1. And the divider is the biggest power of ten
// that is smaller than integrals.
while (*kappa > 0) {
int digit = integrals / divider;
buffer[*length] = '0' + digit;
(*length)++;
integrals %= divider;
(*kappa)--;
// Note that kappa now equals the exponent of the divider and that the
// invariant thus holds again.
uint64_t rest =
(static_cast<uint64_t>(integrals) << -one.e()) + fractionals;
// Invariant: too_high = buffer * 10^kappa + DiyFp(rest, one.e())
// Reminder: unsafe_interval.e() == one.e()
if (rest < unsafe_interval.f()) {
// Rounding down (by not emitting the remaining digits) yields a number
// that lies within the unsafe interval.
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f(),
unsafe_interval.f(), rest,
static_cast<uint64_t>(divider) << -one.e(), unit);
}
divider /= 10;
}
// The integrals have been generated. We are at the point of the decimal
// separator. In the following loop we simply multiply the remaining digits by
// 10 and divide by one. We just need to pay attention to multiply associated
// data (like the interval or 'unit'), too.
// Instead of multiplying by 10 we multiply by 5 (cheaper operation) and
// increase its (imaginary) exponent. At the same time we decrease the
// divider's (one's) exponent and shift its significand.
// Basically, if fractionals was a DiyFp (with fractionals.e == one.e):
// fractionals.f *= 10;
// fractionals.f >>= 1; fractionals.e++; // value remains unchanged.
// one.f >>= 1; one.e++; // value remains unchanged.
// and we have again fractionals.e == one.e which allows us to divide
// fractionals.f() by one.f()
// We simply combine the *= 10 and the >>= 1.
while (true) {
fractionals *= 5;
unit *= 5;
unsafe_interval.set_f(unsafe_interval.f() * 5);
unsafe_interval.set_e(unsafe_interval.e() + 1); // Will be optimized out.
one.set_f(one.f() >> 1);
one.set_e(one.e() + 1);
// Integer division by one.
int digit = static_cast<int>(fractionals >> -one.e());
buffer[*length] = '0' + digit;
(*length)++;
fractionals &= one.f() - 1; // Modulo by one.
(*kappa)--;
if (fractionals < unsafe_interval.f()) {
return RoundWeed(buffer, *length, DiyFp::Minus(too_high, w).f() * unit,
unsafe_interval.f(), fractionals, one.f(), unit);
}
}
}
// Rounds the given generated digits in the buffer and weeds out generated
// digits that are not in the safe interval, or where we cannot find a rounded
// representation.
// Input: * buffer containing the digits of too_high / 10^kappa
// * the buffer's length
// * distance_too_high_w == (too_high - w).f() * unit
// * unsafe_interval == (too_high - too_low).f() * unit
// * rest = (too_high - buffer * 10^kappa).f() * unit
// * ten_kappa = 10^kappa * unit
// * unit = the common multiplier
// Output: returns true on success.
// Modifies the generated digits in the buffer to approach (round towards) w.
template<int alpha, int gamma>
bool Grisu3<alpha, gamma>::RoundWeed(char* buffer,
int length,
uint64_t distance_too_high_w,
uint64_t unsafe_interval,
uint64_t rest,
uint64_t ten_kappa,
uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w- = too_high - big_distance, and
// w+ = too_high - small_distance.
// Note: w- < w < w+
//
// The real w (* unit) must lie somewhere inside the interval
// ]w-; w+[ (often written as "(w-; w+)")
// Basically the buffer currently contains a number in the unsafe interval
// ]too_low; too_high[ with too_low < w < too_high
//
// By generating the digits of too_high we got the biggest last digit.
// In the case that w+ < buffer < too_high we try to decrement the buffer.
// This way the buffer approaches (rounds towards) w.
// There are 3 conditions that stop the decrementation process:
// 1) the buffer is already below w+
// 2) decrementing the buffer would make it leave the unsafe interval
// 3) decrementing the buffer would yield a number below w+ and farther away
// than the current number. In other words:
// (buffer{-1} < w+) && w+ - buffer{-1} > buffer - w+
// Instead of using the buffer directly we use its distance to too_high.
// Conceptually rest ~= too_high - buffer
while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
(rest + ten_kappa < small_distance || // buffer{-1} > w+
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
rest += ten_kappa;
}
// We have approached w+ as much as possible. We now test if approaching w-
// would require changing the buffer. If yes, then we have two possible
// representations close to w, but we cannot decide which one is closer.
if (rest < big_distance &&
unsafe_interval - rest >= ten_kappa &&
(rest + ten_kappa < big_distance ||
big_distance - rest > rest + ten_kappa - big_distance)) {
return false;
}
// Weeding test.
// The safe interval is [too_low + 2 ulp; too_high - 2 ulp]
// Since too_low = too_high - unsafe_interval this is equivalent too
// [too_high - unsafe_interval + 4 ulp; too_high - 2 ulp]
// Conceptually we have: rest ~= too_high - buffer
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
}
bool grisu3(double v, char* buffer, int* sign, int* length, int* point) {
ASSERT(v != 0);
ASSERT(!Double(v).IsSpecial());
if (v < 0) {
v = -v;
*sign = 1;
} else {
*sign = 0;
}
int decimal_exponent;
bool result = Grisu3<-60, -32>::grisu3(v, buffer, length, &decimal_exponent);
*point = *length + decimal_exponent;
buffer[*length] = '\0';
return result;
}
} } // namespace v8::internal

2
deps/v8/src/handles.cc

@ -780,7 +780,7 @@ void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
bool allow_natives_syntax = FLAG_allow_natives_syntax;
FLAG_allow_natives_syntax = true;
boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL,
Handle<String>::null());
Handle<String>::null(), NATIVES_CODE);
FLAG_allow_natives_syntax = allow_natives_syntax;
// If the compilation failed (possibly due to stack overflows), we
// should never enter the result in the natives cache. Instead we

22
deps/v8/src/heap-inl.h

@ -133,7 +133,8 @@ Object* Heap::AllocateRawMap() {
#ifdef DEBUG
if (!result->IsFailure()) {
// Maps have their own alignment.
CHECK((OffsetFrom(result) & kMapAlignmentMask) == kHeapObjectTag);
CHECK((reinterpret_cast<intptr_t>(result) & kMapAlignmentMask) ==
static_cast<intptr_t>(kHeapObjectTag));
}
#endif
return result;
@ -273,6 +274,25 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
Object* Heap::PrepareForCompare(String* str) {
// Always flatten small strings and force flattening of long strings
// after we have accumulated a certain amount we failed to flatten.
static const int kMaxAlwaysFlattenLength = 32;
static const int kFlattenLongThreshold = 16*KB;
const int length = str->length();
Object* obj = str->TryFlatten();
if (length <= kMaxAlwaysFlattenLength ||
unflattended_strings_length_ >= kFlattenLongThreshold) {
return obj;
}
if (obj->IsFailure()) {
unflattended_strings_length_ += length;
}
return str;
}
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
ASSERT(HasBeenSetup());
int amount = amount_of_external_allocated_memory_ + change_in_bytes;

52
deps/v8/src/heap.cc

@ -114,6 +114,8 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
int Heap::gc_count_ = 0;
int Heap::unflattended_strings_length_ = 0;
int Heap::always_allocate_scope_depth_ = 0;
int Heap::linear_allocation_scope_depth_ = 0;
int Heap::contexts_disposed_ = 0;
@ -302,6 +304,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
TranscendentalCache::Clear();
gc_count_++;
unflattended_strings_length_ = 0;
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
allow_allocation(false);
@ -2257,6 +2260,55 @@ Object* Heap::CopyCode(Code* code) {
}
Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
int new_body_size = RoundUp(code->instruction_size() + reloc_info.length(),
kObjectAlignment);
int sinfo_size = code->sinfo_size();
int new_obj_size = Code::SizeFor(new_body_size, sinfo_size);
Address old_addr = code->address();
int relocation_offset = code->relocation_start() - old_addr;
Object* result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(new_obj_size);
} else {
result = code_space_->AllocateRaw(new_obj_size);
}
if (result->IsFailure()) return result;
// Copy code object.
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
// Copy header and instructions.
memcpy(new_addr, old_addr, relocation_offset);
// Copy patched rinfo.
memcpy(new_addr + relocation_offset,
reloc_info.start(),
reloc_info.length());
Code* new_code = Code::cast(result);
new_code->set_relocation_size(reloc_info.length());
// Copy sinfo.
memcpy(new_code->sinfo_start(), code->sinfo_start(), code->sinfo_size());
// Relocate the copy.
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
new_code->Relocate(new_addr - old_addr);
#ifdef DEBUG
code->Verify();
#endif
return new_code;
}
Object* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);

17
deps/v8/src/heap.h

@ -612,6 +612,11 @@ class Heap : public AllStatic {
Handle<Object> self_reference);
static Object* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information.
static Object* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
@ -629,6 +634,15 @@ class Heap : public AllStatic {
// NULL is returned if string is in new space or not flattened.
static Map* SymbolMapForString(String* str);
// Tries to flatten a string before compare operation.
//
// Returns a failure in case it was decided that flattening was
// necessary and failed. Note, if flattening is not necessary the
// string might stay non-flat even when not a failure is returned.
//
// Please note this function does not perform a garbage collection.
static inline Object* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value.
static Object* ToBoolean(bool condition) {
return condition ? true_value() : false_value();
@ -950,6 +964,9 @@ class Heap : public AllStatic {
static int mc_count_; // how many mark-compact collections happened
static int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
static int unflattended_strings_length_;
#define ROOT_ACCESSOR(type, name, camel_name) \
static inline void set_##name(type* value) { \
roots_[k##camel_name##RootIndex] = value; \

6
deps/v8/src/ia32/assembler-ia32-inl.h

@ -314,6 +314,12 @@ Operand::Operand(Register reg) {
}
Operand::Operand(XMMRegister xmm_reg) {
Register reg = { xmm_reg.code() };
set_modrm(3, reg);
}
Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
// [disp/r]
set_modrm(0, ebp);

4
deps/v8/src/ia32/assembler-ia32.h

@ -241,6 +241,9 @@ class Operand BASE_EMBEDDED {
// reg
INLINE(explicit Operand(Register reg));
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
// [disp/r]
INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
// disp only must always be relocated
@ -709,6 +712,7 @@ class Assembler : public Malloced {
void fistp_s(const Operand& adr);
void fistp_d(const Operand& adr);
// The fisttp instructions require SSE3.
void fisttp_s(const Operand& adr);
void fisttp_d(const Operand& adr);

494
deps/v8/src/ia32/codegen-ia32.cc

@ -112,6 +112,8 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm)
allocator_(NULL),
state_(NULL),
loop_nesting_(0),
in_safe_int32_mode_(false),
safe_int32_mode_enabled_(true),
function_return_is_shadowed_(false),
in_spilled_code_(false) {
}
@ -437,14 +439,14 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
// frame. If the expression is boolean-valued it may be compiled (or
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
void CodeGenerator::LoadCondition(Expression* expr,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
{ CodeGenState new_state(this, dest);
Visit(x);
Visit(expr);
// If we hit a stack overflow, we may not have actually visited
// the expression. In that case, we ensure that we have a
@ -481,13 +483,106 @@ void CodeGenerator::LoadAndSpill(Expression* expression) {
}
void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
BreakTarget* unsafe_bailout) {
set_unsafe_bailout(unsafe_bailout);
set_in_safe_int32_mode(true);
Load(expr);
Result value = frame_->Pop();
ASSERT(frame_->HasNoUntaggedInt32Elements());
ConvertInt32ResultToNumber(&value);
set_in_safe_int32_mode(false);
set_unsafe_bailout(NULL);
frame_->Push(&value);
}
void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
set_safe_int32_mode_enabled(false);
Load(expr);
set_safe_int32_mode_enabled(true);
}
void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
ASSERT(value->is_untagged_int32());
if (value->is_register()) {
Register val = value->reg();
JumpTarget done;
__ add(val, Operand(val));
done.Branch(no_overflow, value);
__ sar(val, 1);
// If there was an overflow, bits 30 and 31 of the original number disagree.
__ xor_(val, 0x80000000u);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvtsi2sd(xmm0, Operand(val));
} else {
// Move val to ST[0] in the FPU
// Push and pop are safe with respect to the virtual frame because
// all synced elements are below the actual stack pointer.
__ push(val);
__ fild_s(Operand(esp, 0));
__ pop(val);
}
Result scratch = allocator_->Allocate();
ASSERT(scratch.is_register());
Label allocation_failed;
__ AllocateHeapNumber(val, scratch.reg(),
no_reg, &allocation_failed);
VirtualFrame* clone = new VirtualFrame(frame_);
scratch.Unuse();
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
}
done.Jump(value);
// Establish the virtual frame, cloned from where AllocateHeapNumber
// jumped to allocation_failed.
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
unsafe_bailout_->Jump();
done.Bind(value);
} else {
ASSERT(value->is_constant());
}
value->set_untagged_int32(false);
}
void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
ASSERT(!in_spilled_code());
// If the expression should be a side-effect-free 32-bit int computation,
// compile that SafeInt32 path, and a bailout path.
if (!in_safe_int32_mode() &&
safe_int32_mode_enabled() &&
expr->side_effect_free() &&
expr->num_bit_ops() > 2 &&
CpuFeatures::IsSupported(SSE2)) {
BreakTarget unsafe_bailout;
JumpTarget done;
unsafe_bailout.set_expected_height(frame_->height());
LoadInSafeInt32Mode(expr, &unsafe_bailout);
done.Jump();
if (unsafe_bailout.is_linked()) {
unsafe_bailout.Bind();
LoadWithSafeInt32ModeDisabled(expr);
}
done.Bind();
} else {
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(expr, &dest, false);
@ -538,7 +633,7 @@ void CodeGenerator::Load(Expression* expr) {
loaded.Bind();
}
}
}
ASSERT(has_valid_frame());
ASSERT(frame_->height() == original_height + 1);
}
@ -1102,7 +1197,8 @@ static NumberInfo CalculateNumberInfo(NumberInfo operands_type,
void CodeGenerator::GenericBinaryOperation(Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode) {
OverwriteMode overwrite_mode,
bool no_negative_zero) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@ -1170,10 +1266,12 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
answer = stub.GenerateCall(masm_, frame_, &left, &right);
} else if (right_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
type, false, overwrite_mode);
type, false, overwrite_mode,
no_negative_zero);
} else if (left_is_smi_constant) {
answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
type, true, overwrite_mode);
type, true, overwrite_mode,
no_negative_zero);
} else {
// Set the flags based on the operation, type and loop nesting level.
// Bit operations always assume they likely operate on Smis. Still only
@ -1184,7 +1282,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
(Token::IsBitOp(op) ||
operands_type.IsInteger32() ||
type->IsLikelySmi())) {
answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
answer = LikelySmiBinaryOperation(op, &left, &right,
overwrite_mode, no_negative_zero);
} else {
GenericBinaryOpStub stub(op,
overwrite_mode,
@ -1291,7 +1390,8 @@ static void CheckTwoForSminess(MacroAssembler* masm,
Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode) {
OverwriteMode overwrite_mode,
bool no_negative_zero) {
Result answer;
// Special handling of div and mod because they use fixed registers.
if (op == Token::DIV || op == Token::MOD) {
@ -1395,13 +1495,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Check for negative zero result. If result is zero, and divisor
// is negative, return a floating point negative zero. The
// virtual frame is unchanged in this block, so local control flow
// can use a Label rather than a JumpTarget.
// can use a Label rather than a JumpTarget. If the context of this
// expression will treat -0 like 0, do not do this test.
if (!no_negative_zero) {
Label non_zero_result;
__ test(left->reg(), Operand(left->reg()));
__ j(not_zero, &non_zero_result);
__ test(right->reg(), Operand(right->reg()));
deferred->Branch(negative);
__ bind(&non_zero_result);
}
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
@ -1423,12 +1526,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// the dividend is negative, return a floating point negative
// zero. The frame is unchanged in this block, so local control
// flow can use a Label rather than a JumpTarget.
if (!no_negative_zero) {
Label non_zero_result;
__ test(edx, Operand(edx));
__ j(not_zero, &non_zero_result, taken);
__ test(left->reg(), Operand(left->reg()));
deferred->Branch(negative);
__ bind(&non_zero_result);
}
deferred->BindExit();
left->Unuse();
right->Unuse();
@ -1468,12 +1573,43 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
left->number_info(),
right->number_info(),
overwrite_mode);
Label do_op, left_nonsmi;
// if right is a smi we make a fast case if left is either a smi
// or a heapnumber.
if (CpuFeatures::IsSupported(SSE2) && right->number_info().IsSmi()) {
CpuFeatures::Scope use_sse2(SSE2);
__ mov(answer.reg(), left->reg());
// Fast case - both are actually smis.
if (!left->number_info().IsSmi()) {
__ test(answer.reg(), Immediate(kSmiTagMask));
__ j(not_zero, &left_nonsmi);
}
__ SmiUntag(answer.reg());
__ jmp(&do_op);
__ bind(&left_nonsmi);
// Branch if not a heapnumber.
__ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
Factory::heap_number_map());
deferred->Branch(not_equal);
// Load integer value into answer register using truncation.
__ cvttsd2si(answer.reg(),
FieldOperand(answer.reg(), HeapNumber::kValueOffset));
// Branch if we do not fit in a smi.
__ cmp(answer.reg(), 0xc0000000);
deferred->Branch(negative);
} else {
CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
left->number_info(), right->number_info(), deferred);
// Untag both operands.
__ mov(answer.reg(), left->reg());
__ SmiUntag(answer.reg());
}
__ bind(&do_op);
__ SmiUntag(ecx);
// Perform the operation.
switch (op) {
@ -1571,6 +1707,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// argument is negative, go to slow case. The frame is unchanged
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
if (!no_negative_zero) {
Label non_zero_result;
__ test(answer.reg(), Operand(answer.reg()));
__ j(not_zero, &non_zero_result, taken);
@ -1579,6 +1716,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->Branch(negative);
__ xor_(answer.reg(), Operand(answer.reg())); // Positive 0 is correct.
__ bind(&non_zero_result);
}
break;
}
@ -1817,7 +1955,8 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Handle<Object> value,
StaticType* type,
bool reversed,
OverwriteMode overwrite_mode) {
OverwriteMode overwrite_mode,
bool no_negative_zero) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a constant smi.
@ -1828,10 +1967,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result unsafe_operand(value);
if (reversed) {
return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
} else {
return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
}
}
@ -1911,7 +2050,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (reversed) {
Result constant_operand(value);
answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@ -1947,7 +2086,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
if (reversed) {
Result constant_operand(value);
answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@ -2140,10 +2279,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result constant_operand(value);
if (reversed) {
answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
} else {
answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
}
}
break;
@ -2180,10 +2319,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result constant_operand(value);
if (reversed) {
answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
} else {
answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
overwrite_mode);
overwrite_mode, no_negative_zero);
}
break;
}
@ -4270,7 +4409,7 @@ Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
ASSERT(!in_safe_int32_mode());
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate =
Compiler::BuildBoilerplate(node, script(), this);
@ -4283,6 +4422,7 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
Result result = InstantiateBoilerplate(node->boilerplate());
frame()->Push(&result);
@ -4291,6 +4431,7 @@ void CodeGenerator::VisitFunctionBoilerplateLiteral(
void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
ASSERT(!in_safe_int32_mode());
JumpTarget then;
JumpTarget else_;
JumpTarget exit;
@ -4461,6 +4602,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
Slot* slot,
TypeofState typeof_state,
JumpTarget* slow) {
ASSERT(!in_safe_int32_mode());
// Check that no extension objects have been created by calls to
// eval from the current scope to the global scope.
Register context = esi;
@ -4629,10 +4771,20 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
}
void CodeGenerator::VisitSlot(Slot* node) {
void CodeGenerator::VisitSlot(Slot* slot) {
Comment cmnt(masm_, "[ Slot");
Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
if (in_safe_int32_mode()) {
if ((slot->type() == Slot::LOCAL && !slot->is_arguments())) {
frame()->UntaggedPushLocalAt(slot->index());
} else if (slot->type() == Slot::PARAMETER) {
frame()->UntaggedPushParameterAt(slot->index());
} else {
UNREACHABLE();
}
} else {
Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
frame()->Push(&result);
}
}
@ -4644,6 +4796,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
Visit(expr);
} else {
ASSERT(var->is_global());
ASSERT(!in_safe_int32_mode());
Reference ref(this, node);
ref.GetValue();
}
@ -4652,7 +4805,11 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
if (in_safe_int32_mode()) {
frame_->PushUntaggedElement(node->handle());
} else {
frame_->Push(node->handle());
}
}
@ -4726,6 +4883,7 @@ void DeferredRegExpLiteral::Generate() {
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ RegExp Literal");
// Retrieve the literals array and check the allocated entry. Begin
@ -4762,6 +4920,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ ObjectLiteral");
// Load a writable copy of the function of this activation in a
@ -4846,6 +5005,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ ArrayLiteral");
// Load a writable copy of the function of this activation in a
@ -4917,6 +5077,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
ASSERT(!in_safe_int32_mode());
ASSERT(!in_spilled_code());
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
@ -4950,7 +5111,8 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) {
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
node->no_negative_zero());
} else {
Load(node->value());
}
@ -5027,7 +5189,8 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
node->no_negative_zero());
} else {
Load(node->value());
}
@ -5106,7 +5269,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
node->no_negative_zero());
} else {
Load(node->value());
}
@ -5133,6 +5297,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
void CodeGenerator::VisitAssignment(Assignment* node) {
ASSERT(!in_safe_int32_mode());
#ifdef DEBUG
int original_height = frame()->height();
#endif
@ -5168,6 +5333,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
void CodeGenerator::VisitThrow(Throw* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ Throw");
Load(node->exception());
Result result = frame_->CallRuntime(Runtime::kThrow, 1);
@ -5176,6 +5342,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
void CodeGenerator::VisitProperty(Property* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
property.GetValue();
@ -5183,6 +5350,7 @@ void CodeGenerator::VisitProperty(Property* node) {
void CodeGenerator::VisitCall(Call* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ Call");
Expression* function = node->expression();
@ -5398,6 +5566,7 @@ void CodeGenerator::VisitCall(Call* node) {
void CodeGenerator::VisitCallNew(CallNew* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@ -6325,6 +6494,7 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
ASSERT(!in_safe_int32_mode());
if (CheckForInlineRuntimeCall(node)) {
return;
}
@ -6450,6 +6620,40 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->SetElementAt(0, Factory::undefined_value());
}
} else {
if (in_safe_int32_mode()) {
Visit(node->expression());
Result value = frame_->Pop();
ASSERT(value.is_untagged_int32());
// Registers containing an int32 value are not multiply used.
ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
value.ToRegister();
switch (op) {
case Token::SUB: {
__ neg(value.reg());
if (node->no_negative_zero()) {
// -MIN_INT is MIN_INT with the overflow flag set.
unsafe_bailout_->Branch(overflow);
} else {
// MIN_INT and 0 both have bad negations. They both have 31 zeros.
__ test(value.reg(), Immediate(0x7FFFFFFF));
unsafe_bailout_->Branch(zero);
}
break;
}
case Token::BIT_NOT: {
__ not_(value.reg());
break;
}
case Token::ADD: {
// Unary plus has no effect on int32 values.
break;
}
default:
UNREACHABLE();
break;
}
frame_->Push(&value);
} else {
Load(node->expression());
bool overwrite =
@ -6460,16 +6664,29 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
GenericUnaryOpStub stub(Token::SUB, overwrite);
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
answer.set_number_info(NumberInfo::Number());
frame_->Push(&answer);
break;
}
case Token::BIT_NOT: {
// Smi check.
JumpTarget smi_label;
JumpTarget continue_label;
Result operand = frame_->Pop();
NumberInfo operand_info = operand.number_info();
operand.ToRegister();
if (operand_info.IsSmi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(operand.reg(), "Operand not a smi.");
}
frame_->Spill(operand.reg());
// Set smi tag bit. It will be reset by the not operation.
__ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
__ not_(operand.reg());
Result answer = operand;
answer.set_number_info(NumberInfo::Smi());
frame_->Push(&answer);
} else {
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
@ -6480,18 +6697,25 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
// Set smi tag bit. It will be reset by the not operation.
__ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
__ not_(answer.reg());
__ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
continue_label.Bind(&answer);
if (operand_info.IsInteger32()) {
answer.set_number_info(NumberInfo::Integer32());
} else {
answer.set_number_info(NumberInfo::Number());
}
frame_->Push(&answer);
}
break;
}
case Token::ADD: {
// Smi check.
JumpTarget continue_label;
Result operand = frame_->Pop();
NumberInfo operand_info = operand.number_info();
operand.ToRegister();
__ test(operand.reg(), Immediate(kSmiTagMask));
continue_label.Branch(zero, &operand, taken);
@ -6501,16 +6725,23 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
CALL_FUNCTION, 1);
continue_label.Bind(&answer);
if (operand_info.IsSmi()) {
answer.set_number_info(NumberInfo::Smi());
} else if (operand_info.IsInteger32()) {
answer.set_number_info(NumberInfo::Integer32());
} else {
answer.set_number_info(NumberInfo::Number());
}
frame_->Push(&answer);
break;
}
default:
// NOT, DELETE, TYPEOF, and VOID are handled outside the
// switch.
UNREACHABLE();
}
}
}
}
@ -6601,6 +6832,7 @@ void DeferredPostfixCountOperation::Generate() {
void CodeGenerator::VisitCountOperation(CountOperation* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@ -6639,19 +6871,25 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
Result tmp;
if (new_value.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(new_value.reg(), "Operand not a smi");
}
} else {
// We don't know statically if the input is a smi.
// In order to combine the overflow and the smi tag check, we need
// to be able to allocate a byte register. We attempt to do so
// without spilling. If we fail, we will generate separate overflow
// and smi tag checks.
//
// We allocate and clear the temporary byte register before
// performing the count operation since clearing the register using
// xor will clear the overflow flag.
Result tmp = allocator_->AllocateByteRegisterWithoutSpilling();
// We allocate and clear a temporary byte register before performing
// the count operation since clearing the register using xor will clear
// the overflow flag.
tmp = allocator_->AllocateByteRegisterWithoutSpilling();
if (tmp.is_valid()) {
__ Set(tmp.reg(), Immediate(0));
}
}
if (is_increment) {
__ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
@ -6659,12 +6897,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
}
if (new_value.is_smi()) {
if (FLAG_debug_code) {
__ AbortIfNotSmi(new_value.reg(), "Argument not a smi");
}
if (tmp.is_valid()) tmp.Unuse();
} else {
DeferredCode* deferred = NULL;
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
@ -6675,12 +6907,16 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
is_increment);
}
if (new_value.is_smi()) {
// In case we have a smi as input just check for overflow.
deferred->Branch(overflow);
} else {
// If the count operation didn't overflow and the result is a valid
// smi, we're done. Otherwise, we jump to the deferred slow-case
// code.
if (tmp.is_valid()) {
// We combine the overflow and the smi tag check if we could
// successfully allocate a temporary byte register.
if (tmp.is_valid()) {
__ setcc(overflow, tmp.reg());
__ or_(Operand(tmp.reg()), new_value.reg());
__ test(tmp.reg(), Immediate(kSmiTagMask));
@ -6692,8 +6928,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
__ test(new_value.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
}
deferred->BindExit();
}
deferred->BindExit();
// Postfix: store the old value in the allocated slot under the
// reference.
@ -6709,6 +6946,166 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
}
void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
Token::Value op = node->op();
Comment cmnt(masm_, "[ Int32BinaryOperation");
ASSERT(in_safe_int32_mode());
ASSERT(safe_int32_mode_enabled());
ASSERT(FLAG_safe_int32_compiler);
if (op == Token::COMMA) {
// Discard left value.
frame_->Nip(1);
return;
}
Result right = frame_->Pop();
Result left = frame_->Pop();
ASSERT(right.is_untagged_int32());
ASSERT(left.is_untagged_int32());
// Registers containing an int32 value are not multiply used.
ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
switch (op) {
case Token::COMMA:
case Token::OR:
case Token::AND:
UNREACHABLE();
break;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
left.ToRegister();
right.ToRegister();
if (op == Token::BIT_OR) {
__ or_(left.reg(), Operand(right.reg()));
} else if (op == Token::BIT_XOR) {
__ xor_(left.reg(), Operand(right.reg()));
} else {
ASSERT(op == Token::BIT_AND);
__ and_(left.reg(), Operand(right.reg()));
}
frame_->Push(&left);
right.Unuse();
break;
case Token::SAR:
case Token::SHL:
case Token::SHR: {
bool test_shr_overflow = false;
left.ToRegister();
if (right.is_constant()) {
ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
if (op == Token::SAR) {
__ sar(left.reg(), shift_amount);
} else if (op == Token::SHL) {
__ shl(left.reg(), shift_amount);
} else {
ASSERT(op == Token::SHR);
__ shr(left.reg(), shift_amount);
if (shift_amount == 0) test_shr_overflow = true;
}
} else {
// Move right to ecx
if (left.is_register() && left.reg().is(ecx)) {
right.ToRegister();
__ xchg(left.reg(), right.reg());
left = right; // Left is unused here, copy of right unused by Push.
} else {
right.ToRegister(ecx);
left.ToRegister();
}
if (op == Token::SAR) {
__ sar_cl(left.reg());
} else if (op == Token::SHL) {
__ shl_cl(left.reg());
} else {
ASSERT(op == Token::SHR);
__ shr_cl(left.reg());
test_shr_overflow = true;
}
}
{
Register left_reg = left.reg();
frame_->Push(&left);
right.Unuse();
if (test_shr_overflow && !node->to_int32()) {
// Uint32 results with top bit set are not Int32 values.
// If they will be forced to Int32, skip the test.
// Test is needed because shr with shift amount 0 does not set flags.
__ test(left_reg, Operand(left_reg));
unsafe_bailout_->Branch(sign);
}
}
break;
}
case Token::ADD:
case Token::SUB:
case Token::MUL:
left.ToRegister();
right.ToRegister();
if (op == Token::ADD) {
__ add(left.reg(), Operand(right.reg()));
} else if (op == Token::SUB) {
__ sub(left.reg(), Operand(right.reg()));
} else {
ASSERT(op == Token::MUL);
// We have statically verified that a negative zero can be ignored.
__ imul(left.reg(), Operand(right.reg()));
}
right.Unuse();
frame_->Push(&left);
if (!node->to_int32()) {
// If ToInt32 is called on the result of ADD, SUB, or MUL, we don't
// care about overflows.
unsafe_bailout_->Branch(overflow);
}
break;
case Token::DIV:
case Token::MOD: {
if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
if (left.is_register() && left.reg().is(edi)) {
right.ToRegister(ebx);
} else {
right.ToRegister(edi);
}
}
left.ToRegister(eax);
Result edx_reg = allocator_->Allocate(edx);
right.ToRegister();
// The results are unused here because BreakTarget::Branch cannot handle
// live results.
Register right_reg = right.reg();
left.Unuse();
right.Unuse();
edx_reg.Unuse();
__ cmp(right_reg, 0);
// Ensure divisor is positive: no chance of non-int32 or -0 result.
unsafe_bailout_->Branch(less_equal);
__ cdq(); // Sign-extend eax into edx:eax
__ idiv(right_reg);
if (op == Token::MOD) {
Result edx_result(edx, NumberInfo::Integer32());
edx_result.set_untagged_int32(true);
frame_->Push(&edx_result);
} else {
ASSERT(op == Token::DIV);
__ test(edx, Operand(edx));
unsafe_bailout_->Branch(not_equal);
Result eax_result(eax, NumberInfo::Integer32());
eax_result.set_untagged_int32(true);
frame_->Push(&eax_result);
}
break;
}
default:
UNREACHABLE();
break;
}
}
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@ -6723,6 +7120,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths.
if (op == Token::AND) {
ASSERT(!in_safe_int32_mode());
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
LoadCondition(node->left(), &dest, false);
@ -6786,6 +7184,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
} else if (op == Token::OR) {
ASSERT(!in_safe_int32_mode());
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
LoadCondition(node->left(), &dest, false);
@ -6847,6 +7246,10 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
exit.Bind();
}
} else if (in_safe_int32_mode()) {
Visit(node->left());
Visit(node->right());
Int32BinaryOperation(node);
} else {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
@ -6868,17 +7271,20 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Load(node->left());
Load(node->right());
}
GenericBinaryOperation(node->op(), node->type(), overwrite_mode);
GenericBinaryOperation(node->op(), node->type(),
overwrite_mode, node->no_negative_zero());
}
}
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
ASSERT(!in_safe_int32_mode());
frame_->PushFunction();
}
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
ASSERT(!in_safe_int32_mode());
Comment cmnt(masm_, "[ CompareOperation");
bool left_already_loaded = false;
@ -8847,9 +9253,9 @@ void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ bind(&get_result);
}
Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
GenericBinaryOpStub stub(key, type_info);
HandleScope scope;
return stub.GetCode();
}

43
deps/v8/src/ia32/codegen-ia32.h

@ -357,6 +357,23 @@ class CodeGenerator: public AstVisitor {
// State
ControlDestination* destination() const { return state_->destination(); }
// Control of side-effect-free int32 expression compilation.
bool in_safe_int32_mode() { return in_safe_int32_mode_; }
void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
bool safe_int32_mode_enabled() {
return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
}
void set_safe_int32_mode_enabled(bool value) {
safe_int32_mode_enabled_ = value;
}
void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
unsafe_bailout_ = unsafe_bailout;
}
// Take the Result that is an untagged int32, and convert it to a tagged
// Smi or HeapNumber. Remove the untagged_int32 flag from the result.
void ConvertInt32ResultToNumber(Result* value);
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
@ -413,7 +430,7 @@ class CodeGenerator: public AstVisitor {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
void LoadCondition(Expression* expr,
ControlDestination* destination,
bool force_control);
void Load(Expression* expr);
@ -425,6 +442,11 @@ class CodeGenerator: public AstVisitor {
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression);
// Evaluate an expression and place its value on top of the frame,
// using, or not using, the side-effect-free expression compiler.
void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
void LoadWithSafeInt32ModeDisabled(Expression* expr);
// Read a value from a slot and leave it on top of the expression stack.
Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
@ -469,7 +491,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode);
OverwriteMode overwrite_mode,
bool no_negative_zero);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
@ -483,7 +506,8 @@ class CodeGenerator: public AstVisitor {
Handle<Object> constant_operand,
StaticType* type,
bool reversed,
OverwriteMode overwrite_mode);
OverwriteMode overwrite_mode,
bool no_negative_zero);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
@ -491,7 +515,14 @@ class CodeGenerator: public AstVisitor {
Result LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
OverwriteMode overwrite_mode,
bool no_negative_zero);
// Emit code to perform a binary operation on two untagged int32 values.
// The values are on top of the frame, and the result is pushed on the frame.
void Int32BinaryOperation(BinaryOperation* node);
void Comparison(AstNode* node,
Condition cc,
@ -639,10 +670,14 @@ class CodeGenerator: public AstVisitor {
RegisterAllocator* allocator_;
CodeGenState* state_;
int loop_nesting_;
bool in_safe_int32_mode_;
bool safe_int32_mode_enabled_;
// Jump targets.
// The target of the return from the function.
BreakTarget function_return_;
// The target of the bailout from a side-effect-free int32 subexpression.
BreakTarget* unsafe_bailout_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather

54
deps/v8/src/ia32/register-allocator-ia32.cc

@ -42,7 +42,33 @@ void Result::ToRegister() {
if (is_constant()) {
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
if (is_untagged_int32()) {
fresh.set_untagged_int32(true);
if (handle()->IsSmi()) {
CodeGeneratorScope::Current()->masm()->Set(
fresh.reg(),
Immediate(Smi::cast(*handle())->value()));
} else if (handle()->IsHeapNumber()) {
double double_value = HeapNumber::cast(*handle())->value();
int32_t value = DoubleToInt32(double_value);
if (double_value == 0 && signbit(double_value)) {
// Negative zero must not be converted to an int32 unless
// the context allows it.
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
} else if (double_value == value) {
CodeGeneratorScope::Current()->masm()->Set(
fresh.reg(), Immediate(value));
} else {
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
}
} else {
// Constant is not a number. This was not predicted by AST analysis.
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
}
} else if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
@ -65,6 +91,29 @@ void Result::ToRegister(Register target) {
CodeGeneratorScope::Current()->masm()->mov(fresh.reg(), reg());
} else {
ASSERT(is_constant());
if (is_untagged_int32()) {
if (handle()->IsSmi()) {
CodeGeneratorScope::Current()->masm()->Set(
fresh.reg(),
Immediate(Smi::cast(*handle())->value()));
} else {
ASSERT(handle()->IsHeapNumber());
double double_value = HeapNumber::cast(*handle())->value();
int32_t value = DoubleToInt32(double_value);
if (double_value == 0 && signbit(double_value)) {
// Negative zero must not be converted to an int32 unless
// the context allows it.
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
} else if (double_value == value) {
CodeGeneratorScope::Current()->masm()->Set(
fresh.reg(), Immediate(value));
} else {
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(equal);
CodeGeneratorScope::Current()->unsafe_bailout_->Branch(not_equal);
}
}
} else {
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
@ -72,6 +121,9 @@ void Result::ToRegister(Register target) {
Immediate(handle()));
}
}
}
fresh.set_number_info(number_info());
fresh.set_untagged_int32(is_untagged_int32());
*this = fresh;
} else if (is_register() && reg().is(target)) {
ASSERT(CodeGeneratorScope::Current()->has_valid_frame());

48
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1251,7 +1251,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_rset_update;
Label call_builtin, exit, with_rset_update,
attempt_to_grow_elements, finish_push;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1263,9 +1264,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiTag(ecx);
// Check if we could survive without allocation, go to builtin otherwise.
// Check if we could survive without allocation.
__ cmp(eax, Operand(ecx));
__ j(greater, &call_builtin);
__ j(greater, &attempt_to_grow_elements);
// Save new length.
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
@ -1277,6 +1278,8 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
__ bind(&finish_push);
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update);
@ -1292,6 +1295,45 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ CallStub(&stub);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address();
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
const int kAllocationDelta = 4;
// Load top.
__ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
// Check if it's the end of elements.
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmp(edx, Operand(ecx));
__ j(not_equal, &call_builtin);
__ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(greater, &call_builtin);
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
Immediate(Factory::undefined_value()));
}
// Restore receiver to edx as finish sequence assumes it's here.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
Immediate(kAllocationDelta));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
__ jmp(&finish_push);
__ bind(&call_builtin);
}

101
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -775,6 +775,89 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
}
void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
ASSERT(index >= 0);
ASSERT(index <= element_count());
FrameElement original = elements_[index];
if (original.is_copy()) {
original = elements_[original.index()];
index = original.index();
}
switch (original.type()) {
case FrameElement::MEMORY:
case FrameElement::REGISTER: {
Label done;
// Emit code to load the original element's data into a register.
// Push that register as a FrameElement on top of the frame.
Result fresh = cgen()->allocator()->Allocate();
ASSERT(fresh.is_valid());
Register fresh_reg = fresh.reg();
FrameElement new_element =
FrameElement::RegisterElement(fresh_reg,
FrameElement::NOT_SYNCED,
original.number_info());
new_element.set_untagged_int32(true);
Use(fresh_reg, element_count());
fresh.Unuse(); // BreakTarget does not handle a live Result well.
elements_.Add(new_element);
if (original.is_register()) {
__ mov(fresh_reg, original.reg());
} else {
ASSERT(original.is_memory());
__ mov(fresh_reg, Operand(ebp, fp_relative(index)));
}
// Now convert the value to int32, or bail out.
if (original.number_info().IsSmi()) {
__ SmiUntag(fresh_reg);
// Pushing the element is completely done.
} else {
__ test(fresh_reg, Immediate(kSmiTagMask));
Label not_smi;
__ j(not_zero, &not_smi);
__ SmiUntag(fresh_reg);
__ jmp(&done);
__ bind(&not_smi);
if (!original.number_info().IsNumber()) {
__ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
Factory::heap_number_map());
cgen()->unsafe_bailout_->Branch(not_equal);
}
if (!CpuFeatures::IsSupported(SSE2)) {
UNREACHABLE();
} else {
CpuFeatures::Scope use_sse2(SSE2);
__ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
__ cvttsd2si(fresh_reg, Operand(xmm0));
__ cvtsi2sd(xmm1, Operand(fresh_reg));
__ ucomisd(xmm0, xmm1);
cgen()->unsafe_bailout_->Branch(not_equal);
cgen()->unsafe_bailout_->Branch(parity_even); // NaN.
// Test for negative zero.
__ test(fresh_reg, Operand(fresh_reg));
__ j(not_zero, &done);
__ movmskpd(fresh_reg, xmm0);
__ and_(fresh_reg, 0x1);
cgen()->unsafe_bailout_->Branch(not_equal);
}
__ bind(&done);
}
break;
}
case FrameElement::CONSTANT:
elements_.Add(CopyElementAt(index));
elements_[element_count() - 1].set_untagged_int32(true);
break;
case FrameElement::COPY:
case FrameElement::INVALID:
UNREACHABLE();
break;
}
}
void VirtualFrame::PushTryHandler(HandlerType type) {
ASSERT(cgen()->HasValidEntryRegisters());
// Grow the expression stack by handler size less one (the return
@ -1060,6 +1143,7 @@ Result VirtualFrame::Pop() {
FrameElement element = elements_.RemoveLast();
int index = element_count();
ASSERT(element.is_valid());
ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
// Get number type information of the result.
NumberInfo info;
@ -1077,6 +1161,7 @@ Result VirtualFrame::Pop() {
ASSERT(temp.is_valid());
__ pop(temp.reg());
temp.set_number_info(info);
temp.set_untagged_int32(element.is_untagged_int32());
return temp;
}
@ -1089,6 +1174,7 @@ Result VirtualFrame::Pop() {
if (element.is_register()) {
Unuse(element.reg());
} else if (element.is_copy()) {
ASSERT(!element.is_untagged_int32());
ASSERT(element.index() < index);
index = element.index();
element = elements_[index];
@ -1100,6 +1186,7 @@ Result VirtualFrame::Pop() {
// Memory elements could only be the backing store of a copy.
// Allocate the original to a register.
ASSERT(index <= stack_pointer_);
ASSERT(!element.is_untagged_int32());
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
Use(temp.reg(), index);
@ -1113,10 +1200,14 @@ Result VirtualFrame::Pop() {
__ mov(temp.reg(), Operand(ebp, fp_relative(index)));
return Result(temp.reg(), info);
} else if (element.is_register()) {
return Result(element.reg(), info);
Result return_value(element.reg(), info);
return_value.set_untagged_int32(element.is_untagged_int32());
return return_value;
} else {
ASSERT(element.is_constant());
return Result(element.handle());
Result return_value(element.handle());
return_value.set_untagged_int32(element.is_untagged_int32());
return return_value;
}
}
@ -1161,6 +1252,12 @@ void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) {
}
void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
elements_[element_count() - 1].set_untagged_int32(true);
}
void VirtualFrame::Push(Expression* expr) {
ASSERT(expr->IsTrivial());

29
deps/v8/src/ia32/virtual-frame-ia32.h

@ -242,6 +242,11 @@ class VirtualFrame: public ZoneObject {
PushFrameSlotAt(local0_index() + index);
}
// Push a copy of the value of a local frame slot on top of the frame.
void UntaggedPushLocalAt(int index) {
UntaggedPushFrameSlotAt(local0_index() + index);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
@ -282,6 +287,11 @@ class VirtualFrame: public ZoneObject {
PushFrameSlotAt(param0_index() + index);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
void UntaggedPushParameterAt(int index) {
UntaggedPushFrameSlotAt(param0_index() + index);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
@ -399,6 +409,8 @@ class VirtualFrame: public ZoneObject {
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
void PushUntaggedElement(Handle<Object> value);
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
@ -410,6 +422,10 @@ class VirtualFrame: public ZoneObject {
ASSERT(result->is_constant());
Push(result->handle());
}
if (cgen()->in_safe_int32_mode()) {
ASSERT(result->is_untagged_int32());
elements_[element_count() - 1].set_untagged_int32(true);
}
result->Unuse();
}
@ -422,6 +438,14 @@ class VirtualFrame: public ZoneObject {
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
// Check that the frame has no elements containing untagged int32 elements.
bool HasNoUntaggedInt32Elements() {
for (int i = 0; i < element_count(); ++i) {
if (elements_[i].is_untagged_int32()) return false;
}
return true;
}
// Update the type information of a local variable frame element directly.
inline void SetTypeForLocalAt(int index, NumberInfo info);
@ -533,6 +557,11 @@ class VirtualFrame: public ZoneObject {
// the frame.
inline void PushFrameSlotAt(int index);
// Push a copy of a frame slot (typically a local or parameter) on top of
// the frame, at an untagged int32 value. Bails out if the value is not
// an int32.
void UntaggedPushFrameSlotAt(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);

5
deps/v8/src/liveedit-delay.js

@ -424,3 +424,8 @@ Debug.LiveEditChangeScript.Failure = function(message) {
Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
return "LiveEdit Failure: " + this.message;
}
// A testing entry.
Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}

244
deps/v8/src/liveedit.cc

@ -346,6 +346,80 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
}
// Visitor that collects all references to a particular code object,
// including "CODE_TARGET" references in other code objects.
// It works in context of ZoneScope.
class ReferenceCollectorVisitor : public ObjectVisitor {
public:
explicit ReferenceCollectorVisitor(Code* original)
: original_(original), rvalues_(10), reloc_infos_(10) {
}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (*p == original_) {
rvalues_.Add(p);
}
}
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
if (Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
reloc_infos_.Add(*rinfo);
}
}
virtual void VisitDebugTarget(RelocInfo* rinfo) {
VisitCodeTarget(rinfo);
}
// Post-visiting method that iterates over all collected references and
// modifies them.
void Replace(Code* substitution) {
for (int i = 0; i < rvalues_.length(); i++) {
*(rvalues_[i]) = substitution;
}
for (int i = 0; i < reloc_infos_.length(); i++) {
reloc_infos_[i].set_target_address(substitution->instruction_start());
}
}
private:
Code* original_;
ZoneList<Object**> rvalues_;
ZoneList<RelocInfo> reloc_infos_;
};
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
AssertNoAllocation no_allocations_please;
// A zone scope for ReferenceCollectorVisitor.
ZoneScope scope(DELETE_ON_EXIT);
ReferenceCollectorVisitor visitor(original);
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
ThreadManager::MarkCompactPrologue(true);
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
ThreadManager::MarkCompactEpilogue(true);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
visitor.Replace(substitution);
}
void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
HandleScope scope;
@ -355,8 +429,9 @@ void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
shared_info->set_code(*(compile_info_wrapper.GetFunctionCode()),
UPDATE_WRITE_BARRIER);
ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode()));
shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
// update breakpoints, original code, constructor stub
@ -389,22 +464,137 @@ static int TranslatePosition(int original_position,
for (int i = 0; i < array_len; i += 3) {
int chunk_start =
Smi::cast(position_change_array->GetElement(i))->value();
int chunk_end =
Smi::cast(position_change_array->GetElement(i + 1))->value();
int chunk_changed_end =
Smi::cast(position_change_array->GetElement(i + 2))->value();
position_diff = chunk_changed_end - chunk_end;
if (original_position < chunk_start) {
break;
}
int chunk_end =
Smi::cast(position_change_array->GetElement(i + 1))->value();
// Position mustn't be inside a chunk.
ASSERT(original_position >= chunk_end);
int chunk_changed_end =
Smi::cast(position_change_array->GetElement(i + 2))->value();
position_diff = chunk_changed_end - chunk_end;
}
return original_position + position_diff;
}
// Auto-growing buffer for writing relocation info code section. This buffer
// is a simplified version of buffer from Assembler. Unlike Assembler, this
// class is platform-independent and it works without dealing with instructions.
// As specified by RelocInfo format, the buffer is filled in reversed order:
// from upper to lower addresses.
// It uses NewArray/DeleteArray for memory management.
class RelocInfoBuffer {
public:
RelocInfoBuffer(int buffer_initial_capicity, byte* pc) {
buffer_size_ = buffer_initial_capicity + kBufferGap;
buffer_ = NewArray<byte>(buffer_size_);
reloc_info_writer_.Reposition(buffer_ + buffer_size_, pc);
}
~RelocInfoBuffer() {
DeleteArray(buffer_);
}
// As specified by RelocInfo format, the buffer is filled in reversed order:
// from upper to lower addresses.
void Write(const RelocInfo* rinfo) {
if (buffer_ + kBufferGap >= reloc_info_writer_.pos()) {
Grow();
}
reloc_info_writer_.Write(rinfo);
}
Vector<byte> GetResult() {
// Return the bytes from pos up to end of buffer.
return Vector<byte>(reloc_info_writer_.pos(),
buffer_ + buffer_size_ - reloc_info_writer_.pos());
}
private:
void Grow() {
// Compute new buffer size.
int new_buffer_size;
if (buffer_size_ < 2 * KB) {
new_buffer_size = 4 * KB;
} else {
new_buffer_size = 2 * buffer_size_;
}
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
if (new_buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
}
// Setup new buffer.
byte* new_buffer = NewArray<byte>(new_buffer_size);
// Copy the data.
int curently_used_size = buffer_ + buffer_size_ - reloc_info_writer_.pos();
memmove(new_buffer + new_buffer_size - curently_used_size,
reloc_info_writer_.pos(), curently_used_size);
reloc_info_writer_.Reposition(
new_buffer + new_buffer_size - curently_used_size,
reloc_info_writer_.last_pc());
DeleteArray(buffer_);
buffer_ = new_buffer;
buffer_size_ = new_buffer_size;
}
RelocInfoWriter reloc_info_writer_;
byte* buffer_;
int buffer_size_;
static const int kBufferGap = 8;
static const int kMaximalBufferSize = 512*MB;
};
// Patch positions in code (changes relocation info section) and possibly
// returns new instance of code.
static Handle<Code> PatchPositionsInCode(Handle<Code> code,
Handle<JSArray> position_change_array) {
RelocInfoBuffer buffer_writer(code->relocation_size(),
code->instruction_start());
{
AssertNoAllocation no_allocations_please;
for (RelocIterator it(*code); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
if (RelocInfo::IsPosition(rinfo->rmode())) {
int position = static_cast<int>(rinfo->data());
int new_position = TranslatePosition(position,
position_change_array);
if (position != new_position) {
RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
buffer_writer.Write(&info_copy);
continue;
}
}
buffer_writer.Write(it.rinfo());
}
}
Vector<byte> buffer = buffer_writer.GetResult();
if (buffer.length() == code->relocation_size()) {
// Simply patch relocation area of code.
memcpy(code->relocation_start(), buffer.start(), buffer.length());
return code;
} else {
// Relocation info section now has different size. We cannot simply
// rewrite it inside code object. Instead we have to create a new
// code object.
Handle<Code> result(Factory::CopyCode(code, buffer));
return result;
}
}
void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@ -415,7 +605,45 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
info->set_end_position(TranslatePosition(info->end_position(),
position_change_array));
// Also patch rinfos (both in working code and original code), breakpoints.
info->set_function_token_position(
TranslatePosition(info->function_token_position(),
position_change_array));
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
position_change_array);
if (*patched_code != info->code()) {
// Replace all references to the code across the heap. In particular,
// some stubs may refer to this code and this code may be being executed
// on stack (it is safe to substitute the code object on stack, because
// we only change the structure of rinfo and leave instructions untouched).
ReplaceCodeObject(info->code(), *patched_code);
}
if (info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(info->debug_info()));
Handle<Code> patched_orig_code =
PatchPositionsInCode(Handle<Code>(debug_info->original_code()),
position_change_array);
if (*patched_orig_code != debug_info->original_code()) {
// Do not use expensive ReplaceCodeObject for original_code, because we
// do not expect any other references except this one.
debug_info->set_original_code(*patched_orig_code);
}
Handle<FixedArray> break_point_infos(debug_info->break_points());
for (int i = 0; i < break_point_infos->length(); i++) {
if (!break_point_infos->get(i)->IsBreakPointInfo()) {
continue;
}
Handle<BreakPointInfo> info(
BreakPointInfo::cast(break_point_infos->get(i)));
int new_position = TranslatePosition(info->source_position()->value(),
position_change_array);
info->set_source_position(Smi::FromInt(new_position));
}
}
// TODO(635): Also patch breakpoint objects in JS.
}

2461
deps/v8/src/powers_ten.h

File diff suppressed because it is too large

88
deps/v8/src/profile-generator-inl.h

@ -0,0 +1,88 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
#include "profile-generator.h"
namespace v8 {
namespace internal {
bool CodeEntry::is_js_function() {
return tag_ == Logger::FUNCTION_TAG
|| tag_ == Logger::LAZY_COMPILE_TAG
|| tag_ == Logger::SCRIPT_TAG;
}
StaticNameCodeEntry::StaticNameCodeEntry(Logger::LogEventsAndTags tag,
const char* name)
: CodeEntry(tag),
name_(name) {
}
ManagedNameCodeEntry::ManagedNameCodeEntry(Logger::LogEventsAndTags tag,
String* name,
const char* resource_name,
int line_number)
: CodeEntry(tag),
name_(name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach()),
resource_name_(resource_name),
line_number_(line_number) {
}
ProfileNode::ProfileNode(CodeEntry* entry)
: entry_(entry),
total_ticks_(0),
self_ticks_(0),
children_(CodeEntriesMatch) {
}
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
CodeTree::Locator locator;
tree_.Insert(addr, &locator);
locator.set_value(CodeEntryInfo(entry, size));
}
void CodeMap::MoveCode(Address from, Address to) {
tree_.Move(from, to);
}
void CodeMap::DeleteCode(Address addr) {
tree_.Remove(addr);
}
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_

295
deps/v8/src/profile-generator.cc

@ -0,0 +1,295 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "profile-generator-inl.h"
namespace v8 {
namespace internal {
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
return map_entry != NULL ?
reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
}
ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), true);
if (map_entry->value == NULL) {
// New node added.
map_entry->value = new ProfileNode(entry);
}
return reinterpret_cast<ProfileNode*>(map_entry->value);
}
void ProfileNode::Print(int indent) {
OS::Print("%4u %4u %*c %s\n",
total_ticks_, self_ticks_,
indent, ' ',
entry_ != NULL ? entry_->name() : "");
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
}
}
namespace {
class DeleteNodesCallback {
public:
void AfterAllChildrenTraversed(ProfileNode* node) {
delete node;
}
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
} // namespace
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
TraverseBreadthFirstPostOrder(&cb);
}
void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
ProfileNode* node = root_;
for (CodeEntry** entry = path.start() + path.length() - 1;
entry != path.start() - 1;
--entry) {
if (*entry != NULL) {
node = node->FindOrAddChild(*entry);
}
}
node->IncrementSelfTicks();
}
void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
ProfileNode* node = root_;
for (CodeEntry** entry = path.start();
entry != path.start() + path.length();
++entry) {
if (*entry != NULL) {
node = node->FindOrAddChild(*entry);
}
}
node->IncrementSelfTicks();
}
namespace {
struct Position {
Position(ProfileNode* a_node, HashMap::Entry* a_p)
: node(a_node), p(a_p) { }
INLINE(ProfileNode* current_child()) {
return reinterpret_cast<ProfileNode*>(p->value);
}
ProfileNode* node;
HashMap::Entry* p;
};
} // namespace
template <typename Callback>
void ProfileTree::TraverseBreadthFirstPostOrder(Callback* callback) {
List<Position> stack(10);
stack.Add(Position(root_, root_->children_.Start()));
do {
Position& current = stack.last();
if (current.p != NULL) {
stack.Add(Position(current.current_child(),
current.current_child()->children_.Start()));
} else {
callback->AfterAllChildrenTraversed(current.node);
if (stack.length() > 1) {
Position& parent = stack[stack.length() - 2];
callback->AfterChildTraversed(parent.node, current.node);
parent.p = parent.node->children_.Next(parent.p);
// Remove child from the stack.
stack.RemoveLast();
}
}
} while (stack.length() > 1 || stack.last().p != NULL);
}
namespace {
class CalculateTotalTicksCallback {
public:
void AfterAllChildrenTraversed(ProfileNode* node) {
node->IncreaseTotalTicks(node->self_ticks());
}
void AfterChildTraversed(ProfileNode* parent, ProfileNode* child) {
parent->IncreaseTotalTicks(child->total_ticks());
}
};
} // namespace
// Non-recursive implementation of breadth-first tree traversal.
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
TraverseBreadthFirstPostOrder(&cb);
}
void ProfileTree::ShortPrint() {
OS::Print("root: %u %u\n", root_->total_ticks(), root_->self_ticks());
}
void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
top_down_.AddPathFromEnd(path);
bottom_up_.AddPathFromStart(path);
}
void CpuProfile::CalculateTotalTicks() {
top_down_.CalculateTotalTicks();
bottom_up_.CalculateTotalTicks();
}
void CpuProfile::ShortPrint() {
OS::Print("top down ");
top_down_.ShortPrint();
OS::Print("bottom up ");
bottom_up_.ShortPrint();
}
void CpuProfile::Print() {
OS::Print("[Top down]:\n");
top_down_.Print();
OS::Print("[Bottom up]:\n");
bottom_up_.Print();
}
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
CodeMap::CodeEntryInfo(NULL, 0);
void CodeMap::AddAlias(Address alias, Address addr) {
CodeTree::Locator locator;
if (tree_.Find(addr, &locator)) {
const CodeEntryInfo& entry_info = locator.value();
tree_.Insert(alias, &locator);
locator.set_value(entry_info);
}
}
CodeEntry* CodeMap::FindEntry(Address addr) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
// locator.key() <= addr. Need to check that addr is within entry.
const CodeEntryInfo& entry = locator.value();
if (addr < (locator.key() + entry.size))
return entry.entry;
}
return NULL;
}
ProfileGenerator::ProfileGenerator()
: resource_names_(StringsMatch) {
}
static void CodeEntriesDeleter(CodeEntry** entry_ptr) {
delete *entry_ptr;
}
ProfileGenerator::~ProfileGenerator() {
for (HashMap::Entry* p = resource_names_.Start();
p != NULL;
p = resource_names_.Next(p)) {
DeleteArray(reinterpret_cast<const char*>(p->value));
}
code_entries_.Iterate(CodeEntriesDeleter);
}
CodeEntry* ProfileGenerator::NewCodeEntry(
Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number) {
const char* cached_resource_name = NULL;
if (resource_name->IsString()) {
// As we copy contents of resource names, and usually they are repeated,
// we cache names by string hashcode.
HashMap::Entry* cache_entry =
resource_names_.Lookup(resource_name,
StringEntryHash(resource_name),
true);
if (cache_entry->value == NULL) {
// New entry added.
cache_entry->value =
resource_name->ToCString(DISALLOW_NULLS,
ROBUST_STRING_TRAVERSAL).Detach();
}
cached_resource_name = reinterpret_cast<const char*>(cache_entry->value);
}
CodeEntry* entry = new ManagedNameCodeEntry(tag,
name,
cached_resource_name,
line_number);
code_entries_.Add(entry);
return entry;
}
CodeEntry* ProfileGenerator::NewCodeEntry(
Logger::LogEventsAndTags tag,
const char* name) {
CodeEntry* entry = new StaticNameCodeEntry(tag, name);
code_entries_.Add(entry);
return entry;
}
} } // namespace v8::internal

233
deps/v8/src/profile-generator.h

@ -0,0 +1,233 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
#include "hashmap.h"
namespace v8 {
namespace internal {
class CodeEntry {
public:
virtual ~CodeEntry() { }
virtual const char* name() = 0;
INLINE(bool is_js_function());
protected:
INLINE(explicit CodeEntry(Logger::LogEventsAndTags tag))
: tag_(tag) { }
private:
Logger::LogEventsAndTags tag_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
class StaticNameCodeEntry : public CodeEntry {
public:
INLINE(StaticNameCodeEntry(Logger::LogEventsAndTags tag,
const char* name));
INLINE(virtual const char* name()) { return name_ != NULL ? name_ : ""; }
private:
const char* name_;
DISALLOW_COPY_AND_ASSIGN(StaticNameCodeEntry);
};
class ManagedNameCodeEntry : public CodeEntry {
public:
INLINE(ManagedNameCodeEntry(Logger::LogEventsAndTags tag,
String* name,
const char* resource_name, int line_number));
INLINE(virtual const char* name()) { return !name_.is_empty() ? *name_ : ""; }
private:
SmartPointer<char> name_;
const char* resource_name_;
int line_number_;
DISALLOW_COPY_AND_ASSIGN(ManagedNameCodeEntry);
};
class ProfileNode {
public:
INLINE(explicit ProfileNode(CodeEntry* entry));
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
INLINE(unsigned total_ticks()) { return total_ticks_; }
INLINE(unsigned self_ticks()) { return self_ticks_; }
void Print(int indent);
private:
INLINE(static bool CodeEntriesMatch(void* key1, void* key2)) {
return key1 == key2;
}
INLINE(static bool CodeEntryHash(CodeEntry* entry)) {
return static_cast<int32_t>(reinterpret_cast<intptr_t>(entry));
}
CodeEntry* entry_;
unsigned total_ticks_;
unsigned self_ticks_;
// CodeEntry* -> ProfileNode*
HashMap children_;
friend class ProfileTree;
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
class ProfileTree BASE_EMBEDDED {
public:
ProfileTree() : root_(new ProfileNode(NULL)) { }
~ProfileTree();
void AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
ProfileNode* root() { return root_; }
void ShortPrint();
void Print() {
root_->Print(0);
}
private:
template <typename Callback>
void TraverseBreadthFirstPostOrder(Callback* callback);
ProfileNode* root_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
class CpuProfile BASE_EMBEDDED {
public:
CpuProfile() { }
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
void ShortPrint();
void Print();
private:
ProfileTree top_down_;
ProfileTree bottom_up_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
class CodeMap BASE_EMBEDDED {
public:
CodeMap() { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
INLINE(void MoveCode(Address from, Address to));
INLINE(void DeleteCode(Address addr));
void AddAlias(Address alias, Address addr);
CodeEntry* FindEntry(Address addr);
private:
struct CodeEntryInfo {
CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
: entry(an_entry), size(a_size) { }
CodeEntry* entry;
unsigned size;
};
struct CodeTreeConfig {
typedef Address Key;
typedef CodeEntryInfo Value;
static const Key kNoKey;
static const Value kNoValue;
static int Compare(const Key& a, const Key& b) {
return a < b ? -1 : (a > b ? 1 : 0);
}
};
typedef SplayTree<CodeTreeConfig> CodeTree;
CodeTree tree_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
class ProfileGenerator {
public:
ProfileGenerator();
~ProfileGenerator();
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
INLINE(CpuProfile* profile()) { return &profile_; }
INLINE(CodeMap* code_map()) { return &code_map_; }
private:
INLINE(static bool StringsMatch(void* key1, void* key2)) {
return key1 == key2;
}
INLINE(static bool StringEntryHash(String* entry)) {
return entry->Hash();
}
CpuProfile profile_;
CodeMap code_map_;
typedef List<CodeEntry*> CodeEntryList;
CodeEntryList code_entries_;
// String::Hash -> const char*
HashMap resource_names_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_H_

93
deps/v8/src/regexp-delay.js

@ -95,16 +95,7 @@ function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
%IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase);
%IgnoreAttributesAndSetProperty(object, 'multiline', multiline);
%IgnoreAttributesAndSetProperty(object, 'lastIndex', 0);
// Clear the regexp result cache.
cachedRegexp = 0;
cachedSubject = 0;
cachedLastIndex = 0;
cachedAnswer = 0;
// These are from string.js.
cachedReplaceSubject = 0;
cachedReplaceRegexp = 0;
cachedReplaceReplacement = 0;
cachedReplaceAnswer = 0;
regExpCache.type = 'none';
}
// Call internal function to compile the pattern.
@ -150,10 +141,17 @@ function DoRegExpExec(regexp, string, index) {
}
var cachedRegexp;
var cachedSubject;
var cachedLastIndex;
var cachedAnswer;
function RegExpCache() {
this.type = 'none';
this.regExp = 0;
this.subject = 0;
this.replaceString = 0;
this.lastIndex = 0;
this.answer = 0;
}
var regExpCache = new RegExpCache();
function CloneRegexpAnswer(array) {
@ -169,10 +167,18 @@ function CloneRegexpAnswer(array) {
function RegExpExec(string) {
if (%_ObjectEquals(cachedLastIndex, this.lastIndex) &&
%_ObjectEquals(cachedRegexp, this) &&
%_ObjectEquals(cachedSubject, string)) {
var last = cachedAnswer;
if (!IS_REGEXP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.exec', this]);
}
var cache = regExpCache;
if (%_ObjectEquals(cache.type, 'exec') &&
%_ObjectEquals(cache.lastIndex, this.lastIndex) &&
%_ObjectEquals(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string)) {
var last = cache.answer;
if (last == null) {
return last;
} else {
@ -180,10 +186,6 @@ function RegExpExec(string) {
}
}
if (!IS_REGEXP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.exec', this]);
}
if (%_ArgumentsLength() == 0) {
var regExpInput = LAST_INPUT(lastMatchInfo);
if (IS_UNDEFINED(regExpInput)) {
@ -212,10 +214,11 @@ function RegExpExec(string) {
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
cachedLastIndex = lastIndex;
cachedRegexp = this;
cachedSubject = s;
cachedAnswer = matchIndices; // Null.
cache.lastIndex = lastIndex;
cache.regExp = this;
cache.subject = s;
cache.answer = matchIndices; // Null.
cache.type = 'exec';
return matchIndices; // No match.
}
@ -246,10 +249,11 @@ function RegExpExec(string) {
this.lastIndex = lastMatchInfo[CAPTURE1];
return result;
} else {
cachedRegexp = this;
cachedSubject = s;
cachedLastIndex = lastIndex;
cachedAnswer = result;
cache.regExp = this;
cache.subject = s;
cache.lastIndex = lastIndex;
cache.answer = result;
cache.type = 'exec';
return CloneRegexpAnswer(result);
}
}
@ -271,13 +275,35 @@ function RegExpTest(string) {
}
string = regExpInput;
}
var s = ToString(string);
var length = s.length;
var s;
if (IS_STRING(string)) {
s = string;
} else {
s = ToString(string);
}
var lastIndex = this.lastIndex;
var cache = regExpCache;
if (%_ObjectEquals(cache.type, 'test') &&
%_ObjectEquals(cache.regExp, this) &&
%_ObjectEquals(cache.subject, string) &&
%_ObjectEquals(cache.lastIndex, lastIndex)) {
return cache.answer;
}
var length = s.length;
var i = this.global ? TO_INTEGER(lastIndex) : 0;
cache.type = 'test';
cache.regExp = this;
cache.subject = s;
cache.lastIndex = i;
if (i < 0 || i > s.length) {
this.lastIndex = 0;
cache.answer = false;
return false;
}
@ -287,10 +313,12 @@ function RegExpTest(string) {
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
cache.answer = false;
return false;
}
if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1];
cache.answer = true;
return true;
}
@ -409,6 +437,7 @@ function SetupRegExp() {
return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
}
function RegExpSetInput(string) {
regExpCache.type = 'none';
LAST_INPUT(lastMatchInfo) = ToString(string);
};

17
deps/v8/src/register-allocator.h

@ -71,6 +71,7 @@ class Result BASE_EMBEDDED {
explicit Result(Handle<Object> value) {
value_ = TypeField::encode(CONSTANT)
| NumberInfoField::encode(NumberInfo::Uninitialized().ToInt())
| IsUntaggedInt32Field::encode(false)
| DataField::encode(ConstantList()->length());
ConstantList()->Add(value);
}
@ -112,6 +113,19 @@ class Result BASE_EMBEDDED {
bool is_register() const { return type() == REGISTER; }
bool is_constant() const { return type() == CONSTANT; }
// An untagged int32 Result contains a signed int32 in a register
// or as a constant. These are only allowed in a side-effect-free
// int32 calculation, and if a non-int32 input shows up or an overflow
// occurs, we bail out and drop all the int32 values. Constants are
// not converted to int32 until they are loaded into a register.
bool is_untagged_int32() const {
return IsUntaggedInt32Field::decode(value_);
}
void set_untagged_int32(bool value) {
value_ &= ~IsUntaggedInt32Field::mask();
value_ |= IsUntaggedInt32Field::encode(value);
}
Register reg() const {
ASSERT(is_register());
uint32_t reg = DataField::decode(value_);
@ -140,7 +154,8 @@ class Result BASE_EMBEDDED {
class TypeField: public BitField<Type, 0, 2> {};
class NumberInfoField : public BitField<int, 2, 4> {};
class DataField: public BitField<uint32_t, 6, 32 - 6> {};
class IsUntaggedInt32Field : public BitField<bool, 6, 1> {};
class DataField: public BitField<uint32_t, 7, 32 - 7> {};
inline void CopyTo(Result* destination) const;

48
deps/v8/src/rewriter.cc

@ -220,6 +220,7 @@ void AstOptimizer::VisitFunctionBoilerplateLiteral(
void AstOptimizer::VisitConditional(Conditional* node) {
node->condition()->set_no_negative_zero(true);
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
@ -265,7 +266,14 @@ void AstOptimizer::VisitLiteral(Literal* node) {
func_name_inferrer_.PushName(lit_str);
}
} else if (literal->IsHeapNumber()) {
if (node->to_int32()) {
// Any HeapNumber has an int32 value if it is the input to a bit op.
node->set_side_effect_free(true);
} else {
double double_value = HeapNumber::cast(*literal)->value();
int32_t int32_value = DoubleToInt32(double_value);
node->set_side_effect_free(double_value == int32_value);
}
}
}
@ -319,6 +327,8 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
node->type()->SetAsLikelySmiIfUnknown();
node->target()->type()->SetAsLikelySmiIfUnknown();
node->value()->type()->SetAsLikelySmiIfUnknown();
node->value()->set_to_int32(true);
node->value()->set_no_negative_zero(true);
break;
case Token::ASSIGN_ADD:
case Token::ASSIGN_SUB:
@ -393,6 +403,7 @@ void AstOptimizer::VisitThrow(Throw* node) {
void AstOptimizer::VisitProperty(Property* node) {
node->key()->set_no_negative_zero(true);
Visit(node->obj());
Visit(node->key());
}
@ -422,6 +433,11 @@ void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
if (node->op() == Token::ADD || node->op() == Token::SUB) {
node->expression()->set_no_negative_zero(node->no_negative_zero());
} else {
node->expression()->set_no_negative_zero(true);
}
Visit(node->expression());
if (FLAG_safe_int32_compiler) {
switch (node->op()) {
@ -430,9 +446,9 @@ void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
// Fall through.
case Token::ADD:
case Token::SUB:
case Token::NOT:
node->set_side_effect_free(node->expression()->side_effect_free());
break;
case Token::NOT:
case Token::DELETE:
case Token::TYPEOF:
case Token::VOID:
@ -449,6 +465,9 @@ void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
void AstOptimizer::VisitCountOperation(CountOperation* node) {
// Count operations assume that they work on Smis.
node->expression()->set_no_negative_zero(node->is_prefix() ?
true :
node->no_negative_zero());
node->type()->SetAsLikelySmiIfUnknown();
node->expression()->type()->SetAsLikelySmiIfUnknown();
Visit(node->expression());
@ -461,7 +480,12 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
switch (node->op()) {
case Token::COMMA:
case Token::OR:
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(node->no_negative_zero());
break;
case Token::AND:
node->left()->set_no_negative_zero(node->no_negative_zero());
node->right()->set_no_negative_zero(node->no_negative_zero());
break;
case Token::BIT_OR:
case Token::BIT_XOR:
@ -474,6 +498,8 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
node->right()->type()->SetAsLikelySmiIfUnknown();
node->left()->set_to_int32(true);
node->right()->set_to_int32(true);
node->left()->set_no_negative_zero(true);
node->right()->set_no_negative_zero(true);
break;
case Token::ADD:
case Token::SUB:
@ -484,6 +510,13 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
node->left()->type()->SetAsLikelySmiIfUnknown();
node->right()->type()->SetAsLikelySmiIfUnknown();
}
node->left()->set_no_negative_zero(node->no_negative_zero());
node->right()->set_no_negative_zero(node->no_negative_zero());
if (node->op() == Token::DIV) {
node->right()->set_no_negative_zero(false);
} else if (node->op() == Token::MOD) {
node->right()->set_no_negative_zero(true);
}
break;
default:
UNREACHABLE();
@ -528,6 +561,9 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
case Token::SHL:
case Token::SAR:
case Token::SHR:
// Add one to the number of bit operations in this expression.
node->set_num_bit_ops(1);
// Fall through.
case Token::ADD:
case Token::SUB:
case Token::MUL:
@ -535,6 +571,12 @@ void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
case Token::MOD:
node->set_side_effect_free(node->left()->side_effect_free() &&
node->right()->side_effect_free());
node->set_num_bit_ops(node->num_bit_ops() +
node->left()->num_bit_ops() +
node->right()->num_bit_ops());
if (!node->no_negative_zero() && node->op() == Token::MUL) {
node->set_side_effect_free(false);
}
break;
default:
UNREACHABLE();
@ -551,6 +593,10 @@ void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
node->right()->type()->SetAsLikelySmiIfUnknown();
}
node->left()->set_no_negative_zero(true);
// Only [[HasInstance]] has the right argument passed unchanged to it.
node->right()->set_no_negative_zero(true);
Visit(node->left());
Visit(node->right());

320
deps/v8/src/runtime.cc

@ -1297,6 +1297,7 @@ static Object* Runtime_SpecialArrayFunctions(Arguments args) {
InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift);
InstallBuiltin(holder, "slice", Builtins::ArraySlice);
InstallBuiltin(holder, "splice", Builtins::ArraySplice);
InstallBuiltin(holder, "concat", Builtins::ArrayConcat);
return *holder;
}
@ -2146,10 +2147,23 @@ class BMGoodSuffixBuffers {
static int bad_char_occurrence[kBMAlphabetSize];
static BMGoodSuffixBuffers bmgs_buffers;
// State of the string match tables.
// SIMPLE: No usable content in the buffers.
// BOYER_MOORE_HORSPOOL: The bad_char_occurences table has been populated.
// BOYER_MOORE: The bmgs_buffers tables have also been populated.
// Whenever starting with a new needle, one should call InitializeStringSearch
// to determine which search strategy to use, and in the case of a long-needle
// strategy, the call also initializes the algorithm to SIMPLE.
enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
static StringSearchAlgorithm algorithm;
// Compute the bad-char table for Boyer-Moore in the static buffer.
template <typename pchar>
static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern,
int start) {
static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern) {
// Only preprocess at most kBMMaxShift last characters of pattern.
int start = pattern.length() < kBMMaxShift ? 0
: pattern.length() - kBMMaxShift;
// Run forwards to populate bad_char_table, so that *last* instance
// of character equivalence class is the one registered.
// Notice: Doesn't include the last character.
@ -2169,10 +2183,11 @@ static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern,
}
}
template <typename pchar>
static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern,
int start) {
static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern) {
int m = pattern.length();
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
int len = m - start;
// Compute Good Suffix tables.
bmgs_buffers.init(m);
@ -2219,6 +2234,7 @@ static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern,
}
}
template <typename schar, typename pchar>
static inline int CharOccurrence(int char_code) {
if (sizeof(schar) == 1) {
@ -2233,6 +2249,7 @@ static inline int CharOccurrence(int char_code) {
return bad_char_occurrence[char_code % kBMAlphabetSize];
}
// Restricted simplified Boyer-Moore string matching.
// Uses only the bad-shift table of Boyer-Moore and only uses it
// for the character compared to the last character of the needle.
@ -2241,14 +2258,13 @@ static int BoyerMooreHorspool(Vector<const schar> subject,
Vector<const pchar> pattern,
int start_index,
bool* complete) {
ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
int n = subject.length();
int m = pattern.length();
// Only preprocess at most kBMMaxShift last characters of pattern.
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
BoyerMoorePopulateBadCharTable(pattern, start);
int badness = -m;
int badness = -m; // How bad we are doing without a good-suffix table.
// How bad we are doing without a good-suffix table.
int idx; // No matches found prior to this index.
pchar last_char = pattern[m - 1];
int last_char_shift = m - 1 - CharOccurrence<schar, pchar>(last_char);
@ -2293,13 +2309,12 @@ template <typename schar, typename pchar>
static int BoyerMooreIndexOf(Vector<const schar> subject,
Vector<const pchar> pattern,
int idx) {
ASSERT(algorithm <= BOYER_MOORE);
int n = subject.length();
int m = pattern.length();
// Only preprocess at most kBMMaxShift last characters of pattern.
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
// Build the Good Suffix table and continue searching.
BoyerMoorePopulateGoodSuffixTable(pattern, start);
pchar last_char = pattern[m - 1];
// Continue search from i.
while (idx <= n - m) {
@ -2335,7 +2350,7 @@ static int BoyerMooreIndexOf(Vector<const schar> subject,
template <typename schar>
static int SingleCharIndexOf(Vector<const schar> string,
static inline int SingleCharIndexOf(Vector<const schar> string,
schar pattern_char,
int start_index) {
for (int i = start_index, n = string.length(); i < n; i++) {
@ -2375,10 +2390,10 @@ static int SimpleIndexOf(Vector<const schar> subject,
// done enough work we decide it's probably worth switching to a better
// algorithm.
int badness = -10 - (pattern.length() << 2);
// We know our pattern is at least 2 characters, we cache the first so
// the common case of the first character not matching is faster.
pchar pattern_first_char = pattern[0];
for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
badness++;
if (badness > 0) {
@ -2426,39 +2441,84 @@ static int SimpleIndexOf(Vector<const schar> subject,
}
// Dispatch to different algorithms.
template <typename schar, typename pchar>
static int StringMatchStrategy(Vector<const schar> sub,
Vector<const pchar> pat,
int start_index) {
ASSERT(pat.length() > 1);
// Strategy for searching for a string in another string.
enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
template <typename pchar>
static inline StringSearchStrategy InitializeStringSearch(
Vector<const pchar> pat, bool ascii_subject) {
ASSERT(pat.length() > 1);
// We have an ASCII haystack and a non-ASCII needle. Check if there
// really is a non-ASCII character in the needle and bail out if there
// is.
if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
if (ascii_subject && sizeof(pchar) > 1) {
for (int i = 0; i < pat.length(); i++) {
uc16 c = pat[i];
if (c > String::kMaxAsciiCharCode) {
return -1;
return SEARCH_FAIL;
}
}
}
if (pat.length() < kBMMinPatternLength) {
// We don't believe fancy searching can ever be more efficient.
// The max shift of Boyer-Moore on a pattern of this length does
// not compensate for the overhead.
return SimpleIndexOf(sub, pat, start_index);
return SEARCH_SHORT;
}
algorithm = SIMPLE_SEARCH;
return SEARCH_LONG;
}
// Dispatch long needle searches to different algorithms.
template <typename schar, typename pchar>
static int ComplexIndexOf(Vector<const schar> sub,
Vector<const pchar> pat,
int start_index) {
ASSERT(pat.length() >= kBMMinPatternLength);
// Try algorithms in order of increasing setup cost and expected performance.
bool complete;
int idx = SimpleIndexOf(sub, pat, start_index, &complete);
int idx = start_index;
switch (algorithm) {
case SIMPLE_SEARCH:
idx = SimpleIndexOf(sub, pat, idx, &complete);
if (complete) return idx;
BoyerMoorePopulateBadCharTable(pat);
algorithm = BOYER_MOORE_HORSPOOL;
// FALLTHROUGH.
case BOYER_MOORE_HORSPOOL:
idx = BoyerMooreHorspool(sub, pat, idx, &complete);
if (complete) return idx;
// Build the Good Suffix table and continue searching.
BoyerMoorePopulateGoodSuffixTable(pat);
algorithm = BOYER_MOORE;
// FALLTHROUGH.
case BOYER_MOORE:
return BoyerMooreIndexOf(sub, pat, idx);
}
UNREACHABLE();
return -1;
}
// Dispatch to different search strategies for a single search.
// If searching multiple times on the same needle, the search
// strategy should only be computed once and then dispatch to different
// loops.
template <typename schar, typename pchar>
static int StringSearch(Vector<const schar> sub,
Vector<const pchar> pat,
int start_index) {
bool ascii_subject = (sizeof(schar) == 1);
StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
switch (strategy) {
case SEARCH_FAIL: return -1;
case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
}
UNREACHABLE();
return -1;
}
// Perform string match of pattern on subject, starting at start index.
// Caller must ensure that 0 <= start_index <= sub->length(),
// and should check that pat->length() + start_index <= sub->length()
@ -2477,6 +2537,7 @@ int Runtime::StringMatch(Handle<String> sub,
if (!sub->IsFlat()) {
FlattenString(sub);
}
// Searching for one specific character is common. For one
// character patterns linear search is necessary, so any smart
// algorithm is unnecessary overhead.
@ -2510,15 +2571,15 @@ int Runtime::StringMatch(Handle<String> sub,
if (pat->IsAsciiRepresentation()) {
Vector<const char> pat_vector = pat->ToAsciiVector();
if (sub->IsAsciiRepresentation()) {
return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
}
return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
}
Vector<const uc16> pat_vector = pat->ToUC16Vector();
if (sub->IsAsciiRepresentation()) {
return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
return StringSearch(sub->ToAsciiVector(), pat_vector, start_index);
}
return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
return StringSearch(sub->ToUC16Vector(), pat_vector, start_index);
}
@ -4273,6 +4334,169 @@ static Object* Runtime_StringTrim(Arguments args) {
}
template <typename schar, typename pchar>
void FindStringIndices(Vector<const schar> subject,
Vector<const pchar> pattern,
ZoneList<int>* indices,
unsigned int limit) {
ASSERT(limit > 0);
// Collect indices of pattern in subject, and the end-of-string index.
// Stop after finding at most limit values.
StringSearchStrategy strategy =
InitializeStringSearch(pattern, sizeof(schar) == 1);
switch (strategy) {
case SEARCH_FAIL: return;
case SEARCH_SHORT: {
int pattern_length = pattern.length();
int index = 0;
while (limit > 0) {
index = SimpleIndexOf(subject, pattern, index);
if (index < 0) return;
indices->Add(index);
index += pattern_length;
limit--;
}
return;
}
case SEARCH_LONG: {
int pattern_length = pattern.length();
int index = 0;
while (limit > 0) {
index = ComplexIndexOf(subject, pattern, index);
if (index < 0) return;
indices->Add(index);
index += pattern_length;
limit--;
}
return;
}
default:
UNREACHABLE();
return;
}
}
template <typename schar>
inline void FindCharIndices(Vector<const schar> subject,
const schar pattern_char,
ZoneList<int>* indices,
unsigned int limit) {
// Collect indices of pattern_char in subject, and the end-of-string index.
// Stop after finding at most limit values.
int index = 0;
while (limit > 0) {
index = SingleCharIndexOf(subject, pattern_char, index);
if (index < 0) return;
indices->Add(index);
index++;
limit--;
}
}
static Object* Runtime_StringSplit(Arguments args) {
ASSERT(args.length() == 3);
HandleScope handle_scope;
CONVERT_ARG_CHECKED(String, subject, 0);
CONVERT_ARG_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
int subject_length = subject->length();
int pattern_length = pattern->length();
RUNTIME_ASSERT(pattern_length > 0);
// The limit can be very large (0xffffffffu), but since the pattern
// isn't empty, we can never create more parts than ~half the length
// of the subject.
if (!subject->IsFlat()) FlattenString(subject);
static const int kMaxInitialListCapacity = 16;
ZoneScope scope(DELETE_ON_EXIT);
// Find (up to limit) indices of separator and end-of-string in subject
int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
ZoneList<int> indices(initial_capacity);
if (pattern_length == 1) {
// Special case, go directly to fast single-character split.
AssertNoAllocation nogc;
uc16 pattern_char = pattern->Get(0);
if (subject->IsTwoByteRepresentation()) {
FindCharIndices(subject->ToUC16Vector(), pattern_char,
&indices,
limit);
} else if (pattern_char <= String::kMaxAsciiCharCode) {
FindCharIndices(subject->ToAsciiVector(),
static_cast<char>(pattern_char),
&indices,
limit);
}
} else {
if (!pattern->IsFlat()) FlattenString(pattern);
AssertNoAllocation nogc;
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (pattern->IsAsciiRepresentation()) {
FindStringIndices(subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
FindStringIndices(subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
}
} else {
Vector<const uc16> subject_vector = subject->ToUC16Vector();
if (pattern->IsAsciiRepresentation()) {
FindStringIndices(subject_vector,
pattern->ToAsciiVector(),
&indices,
limit);
} else {
FindStringIndices(subject_vector,
pattern->ToUC16Vector(),
&indices,
limit);
}
}
}
if (static_cast<uint32_t>(indices.length()) < limit) {
indices.Add(subject_length);
}
// The list indices now contains the end of each part to create.
// Create JSArray of substrings separated by separator.
int part_count = indices.length();
Handle<JSArray> result = Factory::NewJSArray(part_count);
result->set_length(Smi::FromInt(part_count));
ASSERT(result->HasFastElements());
if (part_count == 1 && indices.at(0) == subject_length) {
FixedArray::cast(result->elements())->set(0, *subject);
return *result;
}
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
int part_start = 0;
for (int i = 0; i < part_count; i++) {
HandleScope local_loop_handle;
int part_end = indices.at(i);
Handle<String> substring =
Factory::NewSubString(subject, part_start, part_end);
elements->set(i, *substring);
part_start = part_end + pattern_length;
}
return *result;
}
// Copies ascii characters to the given fixed array looking up
// one-char strings in the cache. Gives up on the first char that is
// not in the cache and fills the remainder with smi zeros. Returns
@ -4849,7 +5073,7 @@ static Object* FlatStringCompare(String* x, String* y) {
Vector<const char> x_chars = x->ToAsciiVector();
if (y->IsAsciiRepresentation()) {
Vector<const char> y_chars = y->ToAsciiVector();
r = memcmp(x_chars.start(), y_chars.start(), prefix_length);
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
} else {
Vector<const uc16> y_chars = y->ToUC16Vector();
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
@ -4897,8 +5121,10 @@ static Object* Runtime_StringCompare(Arguments args) {
if (d < 0) return Smi::FromInt(LESS);
else if (d > 0) return Smi::FromInt(GREATER);
x->TryFlatten();
y->TryFlatten();
Object* obj = Heap::PrepareForCompare(x);
if (obj->IsFailure()) return obj;
obj = Heap::PrepareForCompare(y);
if (obj->IsFailure()) return obj;
return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
: StringInputBufferCompare(x, y);
@ -8839,6 +9065,36 @@ static Object* Runtime_LiveEditCheckStackActivations(Arguments args) {
}
// A testing entry. Returns statement position which is the closest to
// source_position.
static Object* Runtime_GetFunctionCodePositionFromSource(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
CONVERT_ARG_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
Handle<Code> code(function->code());
RelocIterator it(*code, 1 << RelocInfo::STATEMENT_POSITION);
int closest_pc = 0;
int distance = kMaxInt;
while (!it.done()) {
int statement_position = static_cast<int>(it.rinfo()->data());
// Check if this break point is closer that what was previously found.
if (source_position <= statement_position &&
statement_position - source_position < distance) {
closest_pc = it.rinfo()->pc() - code->instruction_start();
distance = statement_position - source_position;
// Check whether we can't get any closer.
if (distance == 0) break;
}
it.next();
}
return Smi::FromInt(closest_pc);
}
#endif // ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_LOGGING_AND_PROFILING

4
deps/v8/src/runtime.h

@ -93,6 +93,7 @@ namespace internal {
F(StringParseFloat, 1, 1) \
F(StringToLowerCase, 1, 1) \
F(StringToUpperCase, 1, 1) \
F(StringSplit, 3, 1) \
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
@ -332,7 +333,8 @@ namespace internal {
F(LiveEditReplaceFunctionCode, 2, 1) \
F(LiveEditRelinkFunctionToScript, 2, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
F(LiveEditCheckStackActivations, 1, 1)
F(LiveEditCheckStackActivations, 1, 1) \
F(GetFunctionCodePositionFromSource, 2, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
#endif

60
deps/v8/src/splay-tree-inl.h

@ -58,6 +58,15 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
}
// Insert the new node.
Node* node = new Node(key, Config::kNoValue);
InsertInternal(cmp, node);
}
locator->bind(root_);
return true;
}
template<typename Config, class Allocator>
void SplayTree<Config, Allocator>::InsertInternal(int cmp, Node* node) {
if (cmp > 0) {
node->left_ = root_;
node->right_ = root_->right_;
@ -68,18 +77,21 @@ bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
root_->left_ = NULL;
}
root_ = node;
}
locator->bind(root_);
return true;
}
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
bool SplayTree<Config, Allocator>::FindInternal(const Key& key) {
if (is_empty())
return false;
Splay(key);
if (Config::Compare(key, root_->key_) == 0) {
return Config::Compare(key, root_->key_) == 0;
}
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Find(const Key& key, Locator* locator) {
if (FindInternal(key)) {
locator->bind(root_);
return true;
} else {
@ -161,15 +173,38 @@ bool SplayTree<Config, Allocator>::FindLeast(Locator* locator) {
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Remove(const Key& key) {
// Bail if the tree is empty
if (is_empty())
bool SplayTree<Config, Allocator>::Move(const Key& old_key,
const Key& new_key) {
if (!FindInternal(old_key))
return false;
// Splay on the key to move the node with the given key to the top.
Splay(key);
// Bail if the key is not in the tree
if (Config::Compare(key, root_->key_) != 0)
Node* node_to_move = root_;
RemoveRootNode(old_key);
Splay(new_key);
int cmp = Config::Compare(new_key, root_->key_);
if (cmp == 0) {
// A node with the target key already exists.
delete node_to_move;
return false;
}
node_to_move->key_ = new_key;
InsertInternal(cmp, node_to_move);
return true;
}
template<typename Config, class Allocator>
bool SplayTree<Config, Allocator>::Remove(const Key& key) {
if (!FindInternal(key))
return false;
Node* node_to_remove = root_;
RemoveRootNode(key);
delete node_to_remove;
return true;
}
template<typename Config, class Allocator>
void SplayTree<Config, Allocator>::RemoveRootNode(const Key& key) {
if (root_->left_ == NULL) {
// No left child, so the new tree is just the right child.
root_ = root_->right_;
@ -184,7 +219,6 @@ bool SplayTree<Config, Allocator>::Remove(const Key& key) {
// root.
root_->right_ = right;
}
return true;
}

12
deps/v8/src/splay-tree.h

@ -88,6 +88,9 @@ class SplayTree {
// Find the mapping with the least key in this tree.
bool FindLeast(Locator* locator);
// Move the node from one key to another.
bool Move(const Key& old_key, const Key& new_key);
// Remove the node with the given key from the tree.
bool Remove(const Key& key);
@ -151,6 +154,15 @@ class SplayTree {
void ResetRoot() { root_ = NULL; }
private:
// Search for a node with a given key. If found, root_ points
// to the node.
bool FindInternal(const Key& key);
// Inserts a node assuming that root_ is already set up.
void InsertInternal(int cmp, Node* node);
// Removes root_ node.
void RemoveRootNode(const Key& key);
template<class Callback>
class NodeToPairAdaptor BASE_EMBEDDED {

99
deps/v8/src/string.js

@ -168,9 +168,32 @@ function StringMatch(regexp) {
var subject = TO_STRING_INLINE(this);
if (!regexp.global) return regexp.exec(subject);
var cache = regExpCache;
if (%_ObjectEquals(cache.type, 'match') &&
%_ObjectEquals(cache.regExp, regexp) &&
%_ObjectEquals(cache.subject, subject)) {
var last = cache.answer;
if (last == null) {
return last;
} else {
return CloneRegexpAnswer(last);
}
}
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp-delay.js.
return %StringMatch(subject, regexp, lastMatchInfo);
var result = %StringMatch(subject, regexp, lastMatchInfo);
cache.type = 'match';
cache.regExp = regexp;
cache.subject = subject;
cache.answer = result;
if (result == null) {
return result;
} else {
return CloneRegexpAnswer(result);
}
}
@ -206,6 +229,7 @@ function StringReplace(search, replace) {
if (IS_REGEXP(search)) {
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (IS_FUNCTION(replace)) {
regExpCache.type = 'none';
return StringReplaceRegExpWithFunction(subject, search, replace);
} else {
return StringReplaceRegExp(subject, search, replace);
@ -239,27 +263,25 @@ function StringReplace(search, replace) {
}
var cachedReplaceSubject;
var cachedReplaceRegexp;
var cachedReplaceReplacement;
var cachedReplaceAnswer;
// Helper function for regular expressions in String.prototype.replace.
function StringReplaceRegExp(subject, regexp, replace) {
if (%_ObjectEquals(replace, cachedReplaceReplacement) &&
%_ObjectEquals(subject, cachedReplaceSubject) &&
%_ObjectEquals(regexp, cachedReplaceRegexp)) {
return cachedReplaceAnswer;
var cache = regExpCache;
if (%_ObjectEquals(cache.regExp, regexp) &&
%_ObjectEquals(cache.type, 'replace') &&
%_ObjectEquals(cache.replaceString, replace) &&
%_ObjectEquals(cache.subject, subject)) {
return cache.answer;
}
replace = TO_STRING_INLINE(replace);
var answer = %StringReplaceRegExpWithString(subject,
regexp,
replace,
lastMatchInfo);
cachedReplaceSubject = subject;
cachedReplaceRegexp = regexp;
cachedReplaceReplacement = replace;
cachedReplaceAnswer = answer;
cache.subject = subject;
cache.regExp = regexp;
cache.replaceString = replace;
cache.answer = answer;
cache.type = 'replace';
return answer;
}
@ -557,7 +579,7 @@ function StringSplit(separator, limit) {
// ECMA-262 says that if separator is undefined, the result should
// be an array of size 1 containing the entire string. SpiderMonkey
// and KJS have this behaviour only when no separator is given. If
// and KJS have this behavior only when no separator is given. If
// undefined is explicitly given, they convert it to a string and
// use that. We do as SpiderMonkey and KJS.
if (%_ArgumentsLength() === 0) {
@ -572,26 +594,31 @@ function StringSplit(separator, limit) {
// If the separator string is empty then return the elements in the subject.
if (separator_length === 0) return %StringToArray(subject);
var result = [];
var start_index = 0;
var index;
while (true) {
if (start_index + separator_length > length ||
(index = %StringIndexOf(subject, separator, start_index)) === -1) {
result.push(SubString(subject, start_index, length));
break;
}
if (result.push(SubString(subject, start_index, index)) === limit) break;
start_index = index + separator_length;
}
var result = %StringSplit(subject, separator, limit);
return result;
}
var cache = regExpCache;
if (%_ObjectEquals(cache.type, 'split') &&
%_ObjectEquals(cache.regExp, separator) &&
%_ObjectEquals(cache.subject, subject)) {
return CloneRegexpAnswer(cache.answer);
}
cache.type = 'split';
cache.regExp = separator;
cache.subject = subject;
%_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
if (length === 0) {
if (splitMatch(separator, subject, 0, 0) != null) return [];
if (splitMatch(separator, subject, 0, 0) != null) {
cache.answer = [];
return [];
}
cache.answer = [subject];
return [subject];
}
@ -603,14 +630,16 @@ function StringSplit(separator, limit) {
if (startIndex === length) {
result[result.length] = subject.slice(currentIndex, length);
return result;
cache.answer = result;
return CloneRegexpAnswer(result);
}
var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
if (IS_NULL(matchInfo)) {
result[result.length] = subject.slice(currentIndex, length);
return result;
cache.answer = result;
return CloneRegexpAnswer(result);
}
var endIndex = matchInfo[CAPTURE1];
@ -622,7 +651,10 @@ function StringSplit(separator, limit) {
}
result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
if (result.length === limit) return result;
if (result.length === limit) {
cache.answer = result;
return CloneRegexpAnswer(result);
}
var num_captures = NUMBER_OF_CAPTURES(matchInfo);
for (var i = 2; i < num_captures; i += 2) {
@ -633,7 +665,10 @@ function StringSplit(separator, limit) {
} else {
result[result.length] = void 0;
}
if (result.length === limit) return result;
if (result.length === limit) {
cache.answer = result;
return CloneRegexpAnswer(result);
}
}
startIndex = currentIndex = endIndex;

2
deps/v8/src/version.cc

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
#define BUILD_NUMBER 4
#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

3
deps/v8/src/virtual-frame.cc

@ -163,6 +163,9 @@ void VirtualFrame::SpillElementAt(int index) {
if (elements_[index].is_copied()) {
new_element.set_copied();
}
if (elements_[index].is_untagged_int32()) {
new_element.set_untagged_int32(true);
}
if (elements_[index].is_register()) {
Unuse(elements_[index].reg());
}

132
deps/v8/src/x64/codegen-x64.cc

@ -4053,8 +4053,9 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
// Load the argument on the stack and jump to the runtime.
Load(args->at(0));
Result answer = frame_->CallRuntime(Runtime::kNumberToString, 1);
frame_->Push(&answer);
NumberToStringStub stub;
Result result = frame_->CallStub(&stub, 1);
frame_->Push(&result);
}
@ -7207,6 +7208,77 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found) {
// Currently only lookup for smis. Check for smi if object is not known to be
// a smi.
if (!object_is_smi) {
__ JumpIfNotSmi(object, not_found);
}
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Register mask = scratch1;
Register scratch = scratch2;
// Load the number string cache.
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
__ subl(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value.
__ movq(scratch, object);
__ SmiToInteger32(scratch, scratch);
__ andl(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
// So we have to premultiply entry index before lookup
__ shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
__ cmpq(object,
FieldOperand(number_string_cache,
scratch,
times_1,
FixedArray::kHeaderSize));
__ j(not_equal, not_found);
// Get the result from the cache.
__ movq(result,
FieldOperand(number_string_cache,
scratch,
times_1,
FixedArray::kHeaderSize + kPointerSize));
__ IncrementCounter(&Counters::number_to_string_native, 1);
}
void NumberToStringStub::Generate(MacroAssembler* masm) {
Label runtime;
__ movq(rbx, Operand(rsp, kPointerSize));
// Generate code to lookup number in the number string cache.
GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
__ ret(1 * kPointerSize);
__ bind(&runtime);
// Handle number to string in the runtime system if not found in the cache.
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
}
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
@ -8794,6 +8866,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. If arguments was passed in registers now place them on the
// stack in the correct order below the return address.
__ bind(&call_runtime);
if (HasArgsInRegisters()) {
__ pop(rcx);
if (HasArgsReversed()) {
@ -8805,48 +8878,63 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
__ push(rcx);
}
switch (op_) {
case Token::ADD: {
// Registers containing left and right operands respectively.
Register lhs, rhs;
if (HasArgsReversed()) {
lhs = rax;
rhs = rdx;
} else {
lhs = rdx;
rhs = rax;
}
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1;
Label not_strings, both_strings, not_string1, string1, string1_smi2;
Condition is_smi;
Result answer;
is_smi = masm->CheckSmi(rdx);
is_smi = masm->CheckSmi(lhs);
__ j(is_smi, &not_string1);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rdx);
__ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
__ j(above_equal, &not_string1);
// First argument is a a string, test second.
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string1);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
is_smi = masm->CheckSmi(rhs);
__ j(is_smi, &string1_smi2);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string1);
// First and second argument are strings.
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&stub);
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, rhs, rbx, rcx, r8, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(
HasArgsReversed() ?
Builtins::STRING_ADD_RIGHT :
Builtins::STRING_ADD_LEFT,
JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
is_smi = masm->CheckSmi(rax);
is_smi = masm->CheckSmi(rhs);
__ j(is_smi, &not_strings);
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rax);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
__ j(above_equal, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(
HasArgsReversed() ?
Builtins::STRING_ADD_LEFT :
Builtins::STRING_ADD_RIGHT,
JUMP_FUNCTION);
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.

33
deps/v8/src/x64/codegen-x64.h

@ -865,6 +865,39 @@ class StringCompareStub: public CodeStub {
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_

2
deps/v8/src/x64/register-allocator-x64.cc

@ -44,6 +44,7 @@ void Result::ToRegister() {
ASSERT(fresh.is_valid());
CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
// This result becomes a copy of the fresh one.
fresh.set_number_info(number_info());
*this = fresh;
}
ASSERT(is_register());
@ -61,6 +62,7 @@ void Result::ToRegister(Register target) {
ASSERT(is_constant());
CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle());
}
fresh.set_number_info(number_info());
*this = fresh;
} else if (is_register() && reg().is(target)) {
ASSERT(CodeGeneratorScope::Current()->has_valid_frame());

5
deps/v8/test/cctest/SConscript

@ -34,7 +34,6 @@ Import('context object_files')
SOURCES = {
'all': [
'gay_shortest.cc',
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
@ -44,11 +43,8 @@ SOURCES = {
'test-dataflow.cc',
'test-debug.cc',
'test-decls.cc',
'test-diy_fp.cc',
'test-double.cc',
'test-flags.cc',
'test-func-name-inference.cc',
'test-grisu3.cc',
'test-hashmap.cc',
'test-heap.cc',
'test-heap-profiler.cc',
@ -58,6 +54,7 @@ SOURCES = {
'test-log-utils.cc',
'test-mark-compact.cc',
'test-parsing.cc',
'test-profile-generator.cc',
'test-regexp.cc',
'test-serialize.cc',
'test-sockets.cc',

100048
deps/v8/test/cctest/gay_shortest.cc

File diff suppressed because it is too large

8
deps/v8/test/cctest/test-assembler-ia32.cc

@ -167,6 +167,8 @@ TEST(AssemblerIa322) {
typedef int (*F3)(float x);
TEST(AssemblerIa323) {
if (!CpuFeatures::IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
@ -201,6 +203,8 @@ TEST(AssemblerIa323) {
typedef int (*F4)(double x);
TEST(AssemblerIa324) {
if (!CpuFeatures::IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
@ -258,6 +262,8 @@ TEST(AssemblerIa325) {
typedef double (*F5)(double x, double y);
TEST(AssemblerIa326) {
if (!CpuFeatures::IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
CHECK(CpuFeatures::IsSupported(SSE2));
@ -303,6 +309,8 @@ TEST(AssemblerIa326) {
typedef double (*F6)(int x);
TEST(AssemblerIa328) {
if (!CpuFeatures::IsSupported(SSE2)) return;
InitializeVM();
v8::HandleScope scope;
CHECK(CpuFeatures::IsSupported(SSE2));

10
deps/v8/test/cctest/test-compiler.cc

@ -115,8 +115,14 @@ static void SetGlobalProperty(const char* name, Object* value) {
static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source)));
Handle<JSFunction> boilerplate =
Compiler::Compile(source_code, Handle<String>(), 0, 0, NULL, NULL,
Handle<String>::null());
Compiler::Compile(source_code,
Handle<String>(),
0,
0,
NULL,
NULL,
Handle<String>::null(),
NOT_NATIVES_CODE);
return Factory::NewFunctionFromBoilerplate(boilerplate,
Top::global_context());
}

6
deps/v8/test/cctest/test-disasm-ia32.cc

@ -361,7 +361,7 @@ TEST(DisasmIa320) {
__ fwait();
__ nop();
{
CHECK(CpuFeatures::IsSupported(SSE2));
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
@ -379,10 +379,11 @@ TEST(DisasmIa320) {
__ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
__ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
}
}
// cmov.
{
CHECK(CpuFeatures::IsSupported(CMOV));
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(overflow, eax, Operand(eax, 0));
__ cmov(no_overflow, eax, Operand(eax, 1));
@ -401,6 +402,7 @@ TEST(DisasmIa320) {
__ cmov(less_equal, eax, Operand(edx, 2));
__ cmov(greater, eax, Operand(edx, 3));
}
}
__ ret(0);

67
deps/v8/test/cctest/test-diy_fp.cc

@ -1,67 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
using namespace v8::internal;
TEST(Subtract) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(1, 0);
DiyFp diff = DiyFp::Minus(diy_fp1, diy_fp2);
CHECK(2 == diff.f()); // NOLINT
CHECK_EQ(0, diff.e());
diy_fp1.Subtract(diy_fp2);
CHECK(2 == diy_fp1.f()); // NOLINT
CHECK_EQ(0, diy_fp1.e());
}
TEST(Multiply) {
DiyFp diy_fp1 = DiyFp(3, 0);
DiyFp diy_fp2 = DiyFp(2, 0);
DiyFp product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(64, product.e());
diy_fp1.Multiply(diy_fp2);
CHECK(0 == diy_fp1.f()); // NOLINT
CHECK_EQ(64, diy_fp1.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000000), 11);
diy_fp2 = DiyFp(2, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Test rounding.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x80000000, 00000001), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(1 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0x7fffffff, ffffffff), 11);
diy_fp2 = DiyFp(1, 13);
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(0 == product.f()); // NOLINT
CHECK_EQ(11 + 13 + 64, product.e());
// Halfway cases are allowed to round either way. So don't check for it.
// Big numbers.
diy_fp1 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 11);
diy_fp2 = DiyFp(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF), 13);
// 128bit result: 0xfffffffffffffffe0000000000000001
product = DiyFp::Times(diy_fp1, diy_fp2);
CHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFe) == product.f());
CHECK_EQ(11 + 13 + 64, product.e());
}

204
deps/v8/test/cctest/test-double.cc

@ -1,204 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
#include "double.h"
using namespace v8::internal;
TEST(Uint64Conversions) {
// Start by checking the byte-order.
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
CHECK_EQ(3512700564088504e-318, Double(ordered).value());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(5e-324, Double(min_double64).value());
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
CHECK_EQ(1.7976931348623157e308, Double(max_double64).value());
}
TEST(AsDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52, diy_fp.e());
// The 52 mantissa bits, plus the implicit 1 in bit 52 as a UINT64.
CHECK(V8_2PART_UINT64_C(0x00134567, 89ABCDEF) == diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsDiyFp();
CHECK_EQ(-0x3FF - 52 + 1, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(1 == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52, diy_fp.e());
CHECK(V8_2PART_UINT64_C(0x001fffff, ffffffff) == diy_fp.f()); // NOLINT
}
TEST(AsNormalizedDiyFp) {
uint64_t ordered = V8_2PART_UINT64_C(0x01234567, 89ABCDEF);
DiyFp diy_fp = Double(ordered).AsNormalizedDiyFp();
CHECK_EQ(0x12 - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x00134567, 89ABCDEF) << 11) ==
diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
CHECK_EQ(-0x3FF - 52 + 1 - 63, diy_fp.e());
// This is a denormal; so no hidden bit.
CHECK(V8_2PART_UINT64_C(0x80000000, 00000000) == diy_fp.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
CHECK_EQ(0x7FE - 0x3FF - 52 - 11, diy_fp.e());
CHECK((V8_2PART_UINT64_C(0x001fffff, ffffffff) << 11) ==
diy_fp.f()); // NOLINT
}
TEST(IsDenormal) {
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(Double(min_double64).IsDenormal());
uint64_t bits = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
CHECK(Double(bits).IsDenormal());
bits = V8_2PART_UINT64_C(0x00100000, 00000000);
CHECK(!Double(bits).IsDenormal());
}
TEST(IsSpecial) {
CHECK(Double(V8_INFINITY).IsSpecial());
CHECK(Double(-V8_INFINITY).IsSpecial());
CHECK(Double(OS::nan_value()).IsSpecial());
uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
CHECK(Double(bits).IsSpecial());
// Denormals are not special:
CHECK(!Double(5e-324).IsSpecial());
CHECK(!Double(-5e-324).IsSpecial());
// And some random numbers:
CHECK(!Double(0.0).IsSpecial());
CHECK(!Double(-0.0).IsSpecial());
CHECK(!Double(1.0).IsSpecial());
CHECK(!Double(-1.0).IsSpecial());
CHECK(!Double(1000000.0).IsSpecial());
CHECK(!Double(-1000000.0).IsSpecial());
CHECK(!Double(1e23).IsSpecial());
CHECK(!Double(-1e23).IsSpecial());
CHECK(!Double(1.7976931348623157e308).IsSpecial());
CHECK(!Double(-1.7976931348623157e308).IsSpecial());
}
TEST(IsInfinite) {
CHECK(Double(V8_INFINITY).IsInfinite());
CHECK(Double(-V8_INFINITY).IsInfinite());
CHECK(!Double(OS::nan_value()).IsInfinite());
CHECK(!Double(0.0).IsInfinite());
CHECK(!Double(-0.0).IsInfinite());
CHECK(!Double(1.0).IsInfinite());
CHECK(!Double(-1.0).IsInfinite());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsInfinite());
}
TEST(IsNan) {
CHECK(Double(OS::nan_value()).IsNan());
uint64_t other_nan = V8_2PART_UINT64_C(0xFFFFFFFF, 00000001);
CHECK(Double(other_nan).IsNan());
CHECK(!Double(V8_INFINITY).IsNan());
CHECK(!Double(-V8_INFINITY).IsNan());
CHECK(!Double(0.0).IsNan());
CHECK(!Double(-0.0).IsNan());
CHECK(!Double(1.0).IsNan());
CHECK(!Double(-1.0).IsNan());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK(!Double(min_double64).IsNan());
}
TEST(Sign) {
CHECK_EQ(1, Double(1.0).Sign());
CHECK_EQ(1, Double(V8_INFINITY).Sign());
CHECK_EQ(-1, Double(-V8_INFINITY).Sign());
CHECK_EQ(1, Double(0.0).Sign());
CHECK_EQ(-1, Double(-0.0).Sign());
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
CHECK_EQ(1, Double(min_double64).Sign());
}
TEST(NormalizedBoundaries) {
DiyFp boundary_plus;
DiyFp boundary_minus;
DiyFp diy_fp = Double(1.5).AsNormalizedDiyFp();
Double(1.5).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.5 does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
diy_fp = Double(1.0).AsNormalizedDiyFp();
Double(1.0).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// 1.0 does have a significand of the form 2^p (for some p).
// Therefore its lower boundary is twice as close as the upper boundary.
CHECK_GT(boundary_plus.f() - diy_fp.f(), diy_fp.f() - boundary_minus.f());
CHECK((1 << 9) == diy_fp.f() - boundary_minus.f()); // NOLINT
CHECK((1 << 10) == boundary_plus.f() - diy_fp.f()); // NOLINT
uint64_t min_double64 = V8_2PART_UINT64_C(0x00000000, 00000001);
diy_fp = Double(min_double64).AsNormalizedDiyFp();
Double(min_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// min-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
// Denormals have their boundaries much closer.
CHECK((static_cast<uint64_t>(1) << 62) ==
diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
diy_fp = Double(smallest_normal64).AsNormalizedDiyFp();
Double(smallest_normal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// Even though the significand is of the form 2^p (for some p), its boundaries
// are at the same distance. (This is the only exception).
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
diy_fp = Double(largest_denormal64).AsNormalizedDiyFp();
Double(largest_denormal64).NormalizedBoundaries(&boundary_minus,
&boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 11) == diy_fp.f() - boundary_minus.f()); // NOLINT
uint64_t max_double64 = V8_2PART_UINT64_C(0x7fefffff, ffffffff);
diy_fp = Double(max_double64).AsNormalizedDiyFp();
Double(max_double64).NormalizedBoundaries(&boundary_minus, &boundary_plus);
CHECK_EQ(diy_fp.e(), boundary_minus.e());
CHECK_EQ(diy_fp.e(), boundary_plus.e());
// max-value does not have a significand of the form 2^p (for some p).
// Therefore its boundaries are at the same distance.
CHECK(diy_fp.f() - boundary_minus.f() == boundary_plus.f() - diy_fp.f());
CHECK((1 << 10) == diy_fp.f() - boundary_minus.f()); // NOLINT
}

116
deps/v8/test/cctest/test-grisu3.cc

@ -1,116 +0,0 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#include <stdlib.h>
#include "v8.h"
#include "platform.h"
#include "cctest.h"
#include "diy_fp.h"
#include "double.h"
#include "gay_shortest.h"
#include "grisu3.h"
using namespace v8::internal;
static const int kBufferSize = 100;
TEST(GrisuVariousDoubles) {
char buffer[kBufferSize];
int sign;
int length;
int point;
int status;
double min_double = 5e-324;
status = grisu3(min_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5", buffer);
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = grisu3(max_double, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("17976931348623157", buffer);
CHECK_EQ(309, point);
status = grisu3(4294967272.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4294967272", buffer);
CHECK_EQ(10, point);
status = grisu3(4.1855804968213567e298, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("4185580496821357", buffer);
CHECK_EQ(299, point);
status = grisu3(5.5626846462680035e-309, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("5562684646268003", buffer);
CHECK_EQ(-308, point);
status = grisu3(2147483648.0, buffer, &sign, &length, &point);
CHECK(status);
CHECK_EQ(0, sign);
CHECK_EQ("2147483648", buffer);
CHECK_EQ(10, point);
status = grisu3(3.5844466002796428e+298, buffer, &sign, &length, &point);
if (status) { // Not all grisu3 variants manage to compute this number.
CHECK_EQ("35844466002796428", buffer);
CHECK_EQ(0, sign);
CHECK_EQ(299, point);
}
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = grisu3(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("22250738585072014", buffer);
CHECK_EQ(-307, point);
}
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
status = grisu3(v, buffer, &sign, &length, &point);
if (status) {
CHECK_EQ(0, sign);
CHECK_EQ("2225073858507201", buffer);
CHECK_EQ(-307, point);
}
}
TEST(GrisuGayShortest) {
char buffer[kBufferSize];
bool status;
int sign;
int length;
int point;
int succeeded = 0;
int total = 0;
bool needed_max_length = false;
Vector<const GayShortest> precomputed = PrecomputedShortestRepresentations();
for (int i = 0; i < precomputed.length(); ++i) {
const GayShortest current_test = precomputed[i];
total++;
double v = current_test.v;
status = grisu3(v, buffer, &sign, &length, &point);
CHECK_GE(kGrisu3MaximalLength, length);
if (!status) continue;
if (length == kGrisu3MaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_EQ(current_test.representation, buffer);
}
CHECK_GT(succeeded*1.0/total, 0.99);
CHECK(needed_max_length);
}

362
deps/v8/test/cctest/test-profile-generator.cc

@ -0,0 +1,362 @@
// Copyright 2010 the V8 project authors. All rights reserved.
//
// Tests of profiles generator and utilities.
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::ProfileNode;
using i::ProfileTree;
using i::StaticNameCodeEntry;
using i::Vector;
TEST(ProfileNodeFindOrAddChild) {
ProfileNode node(NULL);
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
CHECK_NE(NULL, childNode1);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
CHECK_NE(NULL, childNode2);
CHECK_NE(childNode1, childNode2);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
CHECK_NE(NULL, childNode3);
CHECK_NE(childNode1, childNode3);
CHECK_NE(childNode2, childNode3);
CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
CHECK_EQ(childNode3, node.FindOrAddChild(&entry3));
}
namespace {
class ProfileTreeTestHelper {
public:
explicit ProfileTreeTestHelper(ProfileTree* tree)
: tree_(tree) { }
ProfileNode* Walk(CodeEntry* entry1,
CodeEntry* entry2 = NULL,
CodeEntry* entry3 = NULL) {
ProfileNode* node = tree_->root();
node = node->FindChild(entry1);
if (node == NULL) return NULL;
if (entry2 != NULL) {
node = node->FindChild(entry2);
if (node == NULL) return NULL;
}
if (entry3 != NULL) {
node = node->FindChild(entry3);
}
return node;
}
private:
ProfileTree* tree_;
};
} // namespace
TEST(ProfileTreeAddPathFromStart) {
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
CodeEntry* path[] = {NULL, &entry1, NULL, &entry2, NULL, NULL, &entry3, NULL};
Vector<CodeEntry*> path_vec(path, sizeof(path) / sizeof(path[0]));
tree.AddPathFromStart(path_vec);
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
ProfileNode* node1 = helper.Walk(&entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(0, node1->self_ticks());
CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
ProfileNode* node2 = helper.Walk(&entry1, &entry2);
CHECK_NE(NULL, node2);
CHECK_NE(node1, node2);
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(0, node2->self_ticks());
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry2));
ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
CHECK_NE(NULL, node3);
CHECK_NE(node1, node3);
CHECK_NE(node2, node3);
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(1, node3->self_ticks());
tree.AddPathFromStart(path_vec);
CHECK_EQ(node1, helper.Walk(&entry1));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(0, node1->self_ticks());
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(0, node2->self_ticks());
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(2, node3->self_ticks());
CodeEntry* path2[] = {&entry1, &entry2, &entry2};
Vector<CodeEntry*> path2_vec(path2, sizeof(path2) / sizeof(path2[0]));
tree.AddPathFromStart(path2_vec);
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
CHECK_EQ(node1, helper.Walk(&entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(2, node3->self_ticks());
ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
CHECK_NE(NULL, node4);
CHECK_NE(node3, node4);
CHECK_EQ(0, node4->total_ticks());
CHECK_EQ(1, node4->self_ticks());
}
TEST(ProfileTreeAddPathFromEnd) {
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
ProfileTree tree;
ProfileTreeTestHelper helper(&tree);
CHECK_EQ(NULL, helper.Walk(&entry1));
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
CodeEntry* path[] = {NULL, &entry3, NULL, &entry2, NULL, NULL, &entry1, NULL};
Vector<CodeEntry*> path_vec(path, sizeof(path) / sizeof(path[0]));
tree.AddPathFromEnd(path_vec);
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
ProfileNode* node1 = helper.Walk(&entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(0, node1->self_ticks());
CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
ProfileNode* node2 = helper.Walk(&entry1, &entry2);
CHECK_NE(NULL, node2);
CHECK_NE(node1, node2);
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(0, node2->self_ticks());
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry2));
ProfileNode* node3 = helper.Walk(&entry1, &entry2, &entry3);
CHECK_NE(NULL, node3);
CHECK_NE(node1, node3);
CHECK_NE(node2, node3);
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(1, node3->self_ticks());
tree.AddPathFromEnd(path_vec);
CHECK_EQ(node1, helper.Walk(&entry1));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(0, node1->self_ticks());
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(0, node2->self_ticks());
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(2, node3->self_ticks());
CodeEntry* path2[] = {&entry2, &entry2, &entry1};
Vector<CodeEntry*> path2_vec(path2, sizeof(path2) / sizeof(path2[0]));
tree.AddPathFromEnd(path2_vec);
CHECK_EQ(NULL, helper.Walk(&entry2));
CHECK_EQ(NULL, helper.Walk(&entry3));
CHECK_EQ(node1, helper.Walk(&entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry1));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry3));
CHECK_EQ(node2, helper.Walk(&entry1, &entry2));
CHECK_EQ(NULL, helper.Walk(&entry1, &entry2, &entry1));
CHECK_EQ(node3, helper.Walk(&entry1, &entry2, &entry3));
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(2, node3->self_ticks());
ProfileNode* node4 = helper.Walk(&entry1, &entry2, &entry2);
CHECK_NE(NULL, node4);
CHECK_NE(node3, node4);
CHECK_EQ(0, node4->total_ticks());
CHECK_EQ(1, node4->self_ticks());
}
TEST(ProfileTreeCalculateTotalTicks) {
ProfileTree empty_tree;
CHECK_EQ(0, empty_tree.root()->total_ticks());
CHECK_EQ(0, empty_tree.root()->self_ticks());
empty_tree.CalculateTotalTicks();
CHECK_EQ(0, empty_tree.root()->total_ticks());
CHECK_EQ(0, empty_tree.root()->self_ticks());
empty_tree.root()->IncrementSelfTicks();
CHECK_EQ(0, empty_tree.root()->total_ticks());
CHECK_EQ(1, empty_tree.root()->self_ticks());
empty_tree.CalculateTotalTicks();
CHECK_EQ(1, empty_tree.root()->total_ticks());
CHECK_EQ(1, empty_tree.root()->self_ticks());
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* e1_path[] = {&entry1};
Vector<CodeEntry*> e1_path_vec(
e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
CodeEntry* e1_e2_path[] = {&entry1, &entry2};
Vector<CodeEntry*> e1_e2_path_vec(
e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
ProfileTree flat_tree;
ProfileTreeTestHelper flat_helper(&flat_tree);
flat_tree.AddPathFromStart(e1_path_vec);
flat_tree.AddPathFromStart(e1_path_vec);
flat_tree.AddPathFromStart(e1_e2_path_vec);
flat_tree.AddPathFromStart(e1_e2_path_vec);
flat_tree.AddPathFromStart(e1_e2_path_vec);
// Results in {root,0,0} -> {entry1,0,2} -> {entry2,0,3}
CHECK_EQ(0, flat_tree.root()->total_ticks());
CHECK_EQ(0, flat_tree.root()->self_ticks());
ProfileNode* node1 = flat_helper.Walk(&entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(2, node1->self_ticks());
ProfileNode* node2 = flat_helper.Walk(&entry1, &entry2);
CHECK_NE(NULL, node2);
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(3, node2->self_ticks());
flat_tree.CalculateTotalTicks();
// Must calculate {root,5,0} -> {entry1,5,2} -> {entry2,3,3}
CHECK_EQ(5, flat_tree.root()->total_ticks());
CHECK_EQ(0, flat_tree.root()->self_ticks());
CHECK_EQ(5, node1->total_ticks());
CHECK_EQ(2, node1->self_ticks());
CHECK_EQ(3, node2->total_ticks());
CHECK_EQ(3, node2->self_ticks());
CodeEntry* e2_path[] = {&entry2};
Vector<CodeEntry*> e2_path_vec(
e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
CodeEntry* e3_path[] = {&entry3};
Vector<CodeEntry*> e3_path_vec(
e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
ProfileTree wide_tree;
ProfileTreeTestHelper wide_helper(&wide_tree);
wide_tree.AddPathFromStart(e1_path_vec);
wide_tree.AddPathFromStart(e1_path_vec);
wide_tree.AddPathFromStart(e1_e2_path_vec);
wide_tree.AddPathFromStart(e2_path_vec);
wide_tree.AddPathFromStart(e2_path_vec);
wide_tree.AddPathFromStart(e2_path_vec);
wide_tree.AddPathFromStart(e3_path_vec);
wide_tree.AddPathFromStart(e3_path_vec);
wide_tree.AddPathFromStart(e3_path_vec);
wide_tree.AddPathFromStart(e3_path_vec);
// Results in -> {entry1,0,2} -> {entry2,0,1}
// {root,0,0} -> {entry2,0,3}
// -> {entry3,0,4}
CHECK_EQ(0, wide_tree.root()->total_ticks());
CHECK_EQ(0, wide_tree.root()->self_ticks());
node1 = wide_helper.Walk(&entry1);
CHECK_NE(NULL, node1);
CHECK_EQ(0, node1->total_ticks());
CHECK_EQ(2, node1->self_ticks());
ProfileNode* node1_2 = wide_helper.Walk(&entry1, &entry2);
CHECK_NE(NULL, node1_2);
CHECK_EQ(0, node1_2->total_ticks());
CHECK_EQ(1, node1_2->self_ticks());
node2 = wide_helper.Walk(&entry2);
CHECK_NE(NULL, node2);
CHECK_EQ(0, node2->total_ticks());
CHECK_EQ(3, node2->self_ticks());
ProfileNode* node3 = wide_helper.Walk(&entry3);
CHECK_NE(NULL, node3);
CHECK_EQ(0, node3->total_ticks());
CHECK_EQ(4, node3->self_ticks());
wide_tree.CalculateTotalTicks();
// Calculates -> {entry1,3,2} -> {entry2,1,1}
// {root,10,0} -> {entry2,3,3}
// -> {entry3,4,4}
CHECK_EQ(10, wide_tree.root()->total_ticks());
CHECK_EQ(0, wide_tree.root()->self_ticks());
CHECK_EQ(3, node1->total_ticks());
CHECK_EQ(2, node1->self_ticks());
CHECK_EQ(1, node1_2->total_ticks());
CHECK_EQ(1, node1_2->self_ticks());
CHECK_EQ(3, node2->total_ticks());
CHECK_EQ(3, node2->self_ticks());
CHECK_EQ(4, node3->total_ticks());
CHECK_EQ(4, node3->self_ticks());
}
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
TEST(CodeMapAddCode) {
CodeMap code_map;
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
StaticNameCodeEntry entry3(i::Logger::FUNCTION_TAG, "ccc");
StaticNameCodeEntry entry4(i::Logger::FUNCTION_TAG, "ddd");
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
code_map.AddCode(ToAddress(0x1950), &entry4, 0x10);
CHECK_EQ(NULL, code_map.FindEntry(0));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1500 - 1)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x100)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500 + 0x200 - 1)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x50)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700 + 0x100 - 1)));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700 + 0x100)));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1900 - 1)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900)));
CHECK_EQ(&entry3, code_map.FindEntry(ToAddress(0x1900 + 0x28)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x7)));
CHECK_EQ(&entry4, code_map.FindEntry(ToAddress(0x1950 + 0x10 - 1)));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1950 + 0x10)));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0xFFFFFFFF)));
}
TEST(CodeMapMoveAndDeleteCode) {
CodeMap code_map;
StaticNameCodeEntry entry1(i::Logger::FUNCTION_TAG, "aaa");
StaticNameCodeEntry entry2(i::Logger::FUNCTION_TAG, "bbb");
code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
code_map.MoveCode(ToAddress(0x1500), ToAddress(0x1800));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1500)));
CHECK_EQ(&entry2, code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1800)));
code_map.DeleteCode(ToAddress(0x1700));
CHECK_EQ(NULL, code_map.FindEntry(ToAddress(0x1700)));
CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1800)));
}

37
deps/v8/test/mjsunit/array-push.js

@ -66,3 +66,40 @@
"after .push(22, 23, 24, 25, 26, 27, 28)");
}
})();
// Excerises various pushes to the array at the end of new space.
(function() {
var a = undefined;
for (var i = 0; i < 7; i++) {
a = [];
assertEquals(1, a.push(1));
assertEquals(2, a.push(2));
assertEquals(3, a.push(3));
assertEquals(4, a.push(4));
assertEquals(5, a.push(5));
assertEquals(6, a.push(6));
assertEquals(7, a.push(7));
assertEquals(8, a.push(8));
assertEquals(9, a.push(9));
assertEquals(10, a.push(10));
assertEquals(11, a.push(11));
assertEquals(12, a.push(12));
assertEquals(13, a.push(13));
assertEquals(14, a.push(14));
assertEquals(15, a.push(15));
assertEquals(16, a.push(16));
assertEquals(17, a.push(17));
assertEquals(18, a.push(18));
assertEquals(19, a.push(19));
assertEquals(20, a.push(20));
assertEquals(21, a.push(21));
assertEquals(22, a.push(22));
assertEquals(23, a.push(23));
assertEquals(24, a.push(24));
assertEquals(25, a.push(25));
assertEquals(26, a.push(26));
assertEquals(27, a.push(27));
assertEquals(28, a.push(28));
assertEquals(29, a.push(29));
}
})();

56
deps/v8/src/grisu3.h → deps/v8/test/mjsunit/compiler/loopcount.js

@ -25,31 +25,31 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_GRISU3_H_
#define V8_GRISU3_H_
namespace v8 {
namespace internal {
// Grisu3 will produce at most kGrisu3MaximalLength digits. This does not
// include the terminating '\0' character.
static const int kGrisu3MaximalLength = 17;
// Provides a decimal representation of v.
// v must satisfy v != 0 and it must not be Infinity or NaN.
// Returns true if it succeeds, otherwise the result can not be trusted.
// There will be *length digits inside the buffer followed by a null terminator.
// If the function returns true then
// v == (double) (buffer * 10^(decimal-point - length)).
// The digits in the buffer are the shortest representation possible: no
// 0.099999999999 instead of 0.1.
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the buffer will contain the
// one closest to v.
// The variable 'sign' will be '0' if the given number is positive, and '1'
// otherwise.
bool grisu3(double d, char* buffer, int* sign, int* length, int* decimal_point);
} } // namespace v8::internal
#endif // V8_GRISU3_H_
// Test postfix count operations with smis.
function f1() { var x = 0x3fffffff; x++; return x; }
assertEquals(0x40000000, f1());
function f2() { var x = -0x40000000; x--; return x; }
assertEquals(-0x40000001, f2());
function f3(x) { x = x & 0x3fffffff; x++; return x; }
assertEquals(0x40000000, f3(0x3fffffff));
function f4() {
var i;
for (i = 0x3ffffffe; i <= 0x3fffffff; i++) {}
return i;
}
assertEquals(0x40000000, f4());
function f5() {
var i;
for (i = -0x3fffffff; i >= -0x40000000; i--) {}
return i;
}
assertEquals(-0x40000001, f5());

83
deps/v8/test/mjsunit/debug-liveedit-patch-positions-replace.js

@ -0,0 +1,83 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
// Scenario: a function is being changed, which causes enclosing function to
// have its positions patched; position changing requires new instance of Code
// object to be introduced; the function happens to be on stack at this moment;
// later it will resume over new instance of Code.
// Before the change 2 rinfo are 22 characters away from each other. After the
// change they are 114 characters away from each other. New instance of Code is
// required when those numbers cross the border value of 64 (in any direction).
Debug = debug.Debug
eval(
"function BeingReplaced(changer, opt_x, opt_y) {\n" +
" changer();\n" +
" var res = new Object();\n" +
" if (opt_x) { res.y = opt_y; }\n" +
" res.a = (function() {})();\n" +
" return res.a;\n" +
"}"
);
var script = Debug.findScript(BeingReplaced);
var orig_body = "{}";
var patch_pos = script.source.indexOf(orig_body);
// Line long enough to change rinfo encoding.
var new_body_patch = "{return 'Capybara';" +
" " +
"}";
var change_log = new Array();
function Changer() {
Debug.LiveEditChangeScript(script, patch_pos, orig_body.length, new_body_patch, change_log);
print("Change log: " + JSON.stringify(change_log) + "\n");
}
function NoOp() {
}
function CallM(changer) {
// We expect call IC here after several function runs.
return BeingReplaced(changer);
}
// This several iterations should cause call IC for BeingReplaced call. This IC
// will keep reference to code object of BeingRepalced function. This reference
// should also be patched. Unfortunately, this is a manually checked fact (from
// debugger or debug print) and doesn't work as an automatic test.
CallM(NoOp);
CallM(NoOp);
CallM(NoOp);
var res = CallM(Changer);
assertEquals("Capybara", res);

93
deps/v8/test/mjsunit/debug-liveedit-patch-positions.js

@ -0,0 +1,93 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug
// Get the Debug object exposed from the debug context global object.
// Scenario: some function is being edited; the outer function has to have its
// positions patched. Accoring to a special markup of function text
// corresponding byte-code PCs should conicide before change and after it.
Debug = debug.Debug
eval(
"function F1() { return 5; }\n" +
"function ChooseAnimal(/*$*/ ) {\n" +
"/*$*/ var x = F1(/*$*/ );\n" +
"/*$*/ var res/*$*/ =/*$*/ (function() { return 'Cat'; } )();\n" +
"/*$*/ var y/*$*/ = F2(/*$*/ F1()/*$*/ , F1(/*$*/ )/*$*/ );\n" +
"/*$*/ if (/*$*/ x.toString(/*$*/ )) { /*$*/ y = 3;/*$*/ } else {/*$*/ y = 8;/*$*/ }\n" +
"/*$*/ var z = /*$*/ x * y;\n" +
"/*$*/ return/*$*/ res/*$*/ + z;/*$*/ }\n" +
"function F2(x, y) { return x + y; }"
);
// Find all *$* markers in text of the function and read corresponding statement
// PCs.
function ReadMarkerPositions(func) {
var text = func.toString();
var positions = new Array();
var match;
var pattern = /\/\*\$\*\//g;
while ((match = pattern.exec(text)) != null) {
positions.push(match.index);
}
return positions;
}
function ReadPCMap(func, positions) {
var res = new Array();
for (var i = 0; i < positions.length; i++) {
res.push(Debug.LiveEditChangeScript.GetPcFromSourcePos(func, positions[i]));
}
return res;
}
var res = ChooseAnimal();
assertEquals("Cat15", res);
var markerPositionsBefore = ReadMarkerPositions(ChooseAnimal);
var pcArrayBefore = ReadPCMap(ChooseAnimal, markerPositionsBefore);
var script = Debug.findScript(ChooseAnimal);
var orig_animal = "'Cat'";
var patch_pos = script.source.indexOf(orig_animal);
var new_animal_patch = "'Capybara'";
var change_log = new Array();
Debug.LiveEditChangeScript(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
print("Change log: " + JSON.stringify(change_log) + "\n");
var res = ChooseAnimal();
assertEquals("Capybara15", res);
var markerPositionsAfter = ReadMarkerPositions(ChooseAnimal);
var pcArrayAfter = ReadPCMap(ChooseAnimal, markerPositionsAfter);
assertArrayEquals(pcArrayBefore, pcArrayAfter);

26
deps/v8/test/cctest/gay_shortest.h → deps/v8/test/mjsunit/regexp-cache-replace.js

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,20 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef GAY_SHORTEST_H_
#define GAY_SHORTEST_H_
// Tests that regexp caching isn't messing things up.
namespace v8 {
namespace internal {
struct GayShortest {
double v;
const char* representation;
int decimal_point;
};
Vector<const GayShortest> PrecomputedShortestRepresentations();
} } // namespace v8::internal
#endif // GAY_SHORTEST_H_
var re1 = /(o)/g;
assertEquals("FxxBar", "FooBar".replace(re1, "x"));
assertEquals("o", RegExp.$1);
assertTrue(/(x)/.test("abcxdef"));
assertEquals("x", RegExp.$1);
assertEquals("FxxBar", "FooBar".replace(re1, "x"));
assertEquals("o", RegExp.$1);

286
deps/v8/tools/generate-ten-powers.scm

@ -1,286 +0,0 @@
;; Copyright 2010 the V8 project authors. All rights reserved.
;; Redistribution and use in source and binary forms, with or without
;; modification, are permitted provided that the following conditions are
;; met:
;;
;; * Redistributions of source code must retain the above copyright
;; notice, this list of conditions and the following disclaimer.
;; * Redistributions in binary form must reproduce the above
;; copyright notice, this list of conditions and the following
;; disclaimer in the documentation and/or other materials provided
;; with the distribution.
;; * Neither the name of Google Inc. nor the names of its
;; contributors may be used to endorse or promote products derived
;; from this software without specific prior written permission.
;;
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
;; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
;; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
;; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
;; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
;; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
;; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;; This is a Scheme script for the Bigloo compiler. Bigloo must be compiled with
;; support for bignums. The compilation of the script can be done as follows:
;; bigloo -static-bigloo -o generate-ten-powers generate-ten-powers.scm
;;
;; Generate approximations of 10^k.
(module gen-ten-powers
(static (class Cached-Fast
v::bignum
e::bint
exact?::bool))
(main my-main))
;;----------------bignum shifts -----------------------------------------------
(define (bit-lshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(*bx x (exptbx #z2 (fixnum->bignum by)))))
(define (bit-rshbx::bignum x::bignum by::bint)
(if (<fx by 0)
#z0
(/bx x (exptbx #z2 (fixnum->bignum by)))))
;;----------------the actual power generation -------------------------------
;; e should be an indication. it might be too small.
(define (round-n-cut n e nb-bits)
(define max-container (- (bit-lshbx #z1 nb-bits) 1))
(define (round n)
(case *round*
((down) n)
((up)
(+bx n
;; with the -1 it will only round up if the cut off part is
;; non-zero
(-bx (bit-lshbx #z1
(-fx (+fx e nb-bits) 1))
#z1)))
((round)
(+bx n
(bit-lshbx #z1
(-fx (+fx e nb-bits) 2))))))
(let* ((shift (-fx (+fx e nb-bits) 1))
(cut (bit-rshbx (round n) shift))
(exact? (=bx n (bit-lshbx cut shift))))
(if (<=bx cut max-container)
(values cut e exact?)
(round-n-cut n (+fx e 1) nb-bits))))
(define (rounded-/bx x y)
(case *round*
((down) (/bx x y))
((up) (+bx (/bx x y) #z1))
((round) (let ((tmp (/bx (*bx #z2 x) y)))
(if (zerobx? (remainderbx tmp #z2))
(/bx tmp #z2)
(+bx (/bx tmp #z2) #z1))))))
(define (generate-powers from to mantissa-size)
(let* ((nb-bits mantissa-size)
(offset (- from))
(nb-elements (+ (- from) to 1))
(vec (make-vector nb-elements))
(max-container (- (bit-lshbx #z1 nb-bits) 1)))
;; the negative ones. 10^-1, 10^-2, etc.
;; We already know, that we can't be exact, so exact? will always be #f.
;; Basically we will have a ten^i that we will *10 at each iteration. We
;; want to create the matissa of 1/ten^i. However the mantissa must be
;; normalized (start with a 1). -> we have to shift the number.
;; We shift by multiplying with two^e. -> We encode two^e*(1/ten^i) ==
;; two^e/ten^i.
(let loop ((i 1)
(ten^i #z10)
(two^e #z1)
(e 0))
(unless (< (- i) from)
(if (>bx (/bx (*bx #z2 two^e) ten^i) max-container)
;; another shift would make the number too big. We are
;; hence normalized now.
(begin
(vector-set! vec (-fx offset i)
(instantiate::Cached-Fast
(v (rounded-/bx two^e ten^i))
(e (negfx e))
(exact? #f)))
(loop (+fx i 1) (*bx ten^i #z10) two^e e))
(loop i ten^i (bit-lshbx two^e 1) (+fx e 1)))))
;; the positive ones 10^0, 10^1, etc.
;; start with 1.0. mantissa: 10...0 (1 followed by nb-bits-1 bits)
;; -> e = -(nb-bits-1)
;; exact? is true when the container can still hold the complete 10^i
(let loop ((i 0)
(n (bit-lshbx #z1 (-fx nb-bits 1)))
(e (-fx 1 nb-bits)))
(when (<= i to)
(receive (cut e exact?)
(round-n-cut n e nb-bits)
(vector-set! vec (+fx i offset)
(instantiate::Cached-Fast
(v cut)
(e e)
(exact? exact?)))
(loop (+fx i 1) (*bx n #z10) e))))
vec))
(define (print-c powers from to struct-type
cache-name max-distance-name offset-name macro64)
(define (display-power power k)
(with-access::Cached-Fast power (v e exact?)
(let ((tmp-p (open-output-string)))
;; really hackish way of getting the digits
(display (format "~x" v) tmp-p)
(let ((str (close-output-port tmp-p)))
(printf " {~a(0x~a, ~a), ~a, ~a},\n"
macro64
(substring str 0 8)
(substring str 8 16)
e
k)))))
(define (print-powers-reduced n)
(print "static const " struct-type " " cache-name
"(" n ")"
"[] = {")
(let loop ((i 0)
(nb-elements 0)
(last-e 0)
(max-distance 0))
(cond
((>= i (vector-length powers))
(print " };")
(print "static const int " max-distance-name "(" n ") = "
max-distance ";")
(print "// nb elements (" n "): " nb-elements))
(else
(let* ((power (vector-ref powers i))
(e (Cached-Fast-e power)))
(display-power power (+ i from))
(loop (+ i n)
(+ nb-elements 1)
e
(cond
((=fx i 0) max-distance)
((> (- e last-e) max-distance) (- e last-e))
(else max-distance))))))))
(print "// Copyright 2010 the V8 project authors. All rights reserved.")
(print "// ------------ GENERATED FILE ----------------")
(print "// command used:")
(print "// "
(apply string-append (map (lambda (str)
(string-append " " str))
*main-args*))
" // NOLINT")
(print)
(print
"// This file is intended to be included inside another .h or .cc files\n"
"// with the following defines set:\n"
"// GRISU_CACHE_STRUCT: should expand to the name of a struct that will\n"
"// hold the cached powers of ten. Each entry will hold a 64-bit\n"
"// significand, a 16-bit signed binary exponent, and a 16-bit\n"
"// signed decimal exponent. Each entry will be constructed as follows:\n"
"// { significand, binary_exponent, decimal_exponent }.\n"
"// GRISU_CACHE_NAME(i): generates the name for the different caches.\n"
"// The parameter i will be a number in the range 1-20. A cache will\n"
"// hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will\n"
"// thus hold all elements. The higher i the fewer elements it has.\n"
"// Ideally the user should only reference one cache and let the\n"
"// compiler remove the unused ones.\n"
"// GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum\n"
"// binary exponent distance between all elements of a given cache.\n"
"// GRISU_CACHE_OFFSET: is used as variable name for the decimal\n"
"// exponent offset. It is equal to -cache[0].decimal_exponent.\n"
"// GRISU_UINT64_C: used to construct 64-bit values in a platform\n"
"// independent way. In order to encode 0x123456789ABCDEF0 the macro\n"
"// will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).\n")
(print)
(print-powers-reduced 1)
(print-powers-reduced 2)
(print-powers-reduced 3)
(print-powers-reduced 4)
(print-powers-reduced 5)
(print-powers-reduced 6)
(print-powers-reduced 7)
(print-powers-reduced 8)
(print-powers-reduced 9)
(print-powers-reduced 10)
(print-powers-reduced 11)
(print-powers-reduced 12)
(print-powers-reduced 13)
(print-powers-reduced 14)
(print-powers-reduced 15)
(print-powers-reduced 16)
(print-powers-reduced 17)
(print-powers-reduced 18)
(print-powers-reduced 19)
(print-powers-reduced 20)
(print "static const int GRISU_CACHE_OFFSET = " (- from) ";"))
;;----------------main --------------------------------------------------------
(define *main-args* #f)
(define *mantissa-size* #f)
(define *dest* #f)
(define *round* #f)
(define *from* #f)
(define *to* #f)
(define (my-main args)
(set! *main-args* args)
(args-parse (cdr args)
(section "Help")
(("?") (args-parse-usage #f))
((("-h" "--help") (help "?, -h, --help" "This help message"))
(args-parse-usage #f))
(section "Misc")
(("-o" ?file (help "The output file"))
(set! *dest* file))
(("--mantissa-size" ?size (help "Container-size in bits"))
(set! *mantissa-size* (string->number size)))
(("--round" ?direction (help "Round bignums (down, round or up)"))
(set! *round* (string->symbol direction)))
(("--from" ?from (help "start at 10^from"))
(set! *from* (string->number from)))
(("--to" ?to (help "go up to 10^to"))
(set! *to* (string->number to)))
(else
(print "Illegal argument `" else "'. Usage:")
(args-parse-usage #f)))
(when (not *from*)
(error "generate-ten-powers"
"Missing from"
#f))
(when (not *to*)
(error "generate-ten-powers"
"Missing to"
#f))
(when (not *mantissa-size*)
(error "generate-ten-powers"
"Missing mantissa size"
#f))
(when (not (memv *round* '(up down round)))
(error "generate-ten-powers"
"Missing round-method"
*round*))
(let ((dividers (generate-powers *from* *to* *mantissa-size*))
(p (if (not *dest*)
(current-output-port)
(open-output-file *dest*))))
(unwind-protect
(with-output-to-port p
(lambda ()
(print-c dividers *from* *to*
"GRISU_CACHE_STRUCT" "GRISU_CACHE_NAME"
"GRISU_CACHE_MAX_DISTANCE" "GRISU_CACHE_OFFSET"
"GRISU_UINT64_C"
)))
(if *dest*
(close-output-port p)))))

9
deps/v8/tools/gyp/v8.gyp

@ -229,7 +229,6 @@
'../../src/builtins.cc',
'../../src/builtins.h',
'../../src/bytecodes-irregexp.h',
'../../src/cached_powers.h',
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.cc',
@ -265,8 +264,6 @@
'../../src/disassembler.cc',
'../../src/disassembler.h',
'../../src/dtoa-config.c',
'../../src/diy_fp.h',
'../../src/double.h',
'../../src/execution.cc',
'../../src/execution.h',
'../../src/factory.cc',
@ -287,8 +284,6 @@
'../../src/global-handles.cc',
'../../src/global-handles.h',
'../../src/globals.h',
'../../src/grisu3.h',
'../../src/grisu3.cc',
'../../src/handles-inl.h',
'../../src/handles.cc',
'../../src/handles.h',
@ -335,11 +330,13 @@
'../../src/parser.cc',
'../../src/parser.h',
'../../src/platform.h',
'../../src/powers_ten.h',
'../../src/prettyprinter.cc',
'../../src/prettyprinter.h',
'../../src/property.cc',
'../../src/property.h',
'../../src/profile-generator-inl.h',
'../../src/profile-generator.cc',
'../../src/profile-generator.h',
'../../src/regexp-macro-assembler-irregexp-inl.h',
'../../src/regexp-macro-assembler-irregexp.cc',
'../../src/regexp-macro-assembler-irregexp.h',

10
deps/v8/tools/v8.xcodeproj/project.pbxproj

@ -212,6 +212,8 @@
9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F11D99E105AF0A300EBE5B2 /* heap-profiler.cc */; };
9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F4B7B870FCC877A00DC4117 /* log-utils.cc */; };
9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F73E3AF114E61A100F84A5A /* profile-generator.cc */; };
9F92FAA90F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9F92FAAA0F8F28AD0089F02C /* func-name-inferrer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */; };
9FBE03DE10BD409900F8BFBA /* fast-codegen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */; };
@ -552,6 +554,9 @@
9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "heap-profiler.h"; sourceTree = "<group>"; };
9F4B7B870FCC877A00DC4117 /* log-utils.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "log-utils.cc"; sourceTree = "<group>"; };
9F4B7B880FCC877A00DC4117 /* log-utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-utils.h"; sourceTree = "<group>"; };
9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "profile-generator-inl.h"; sourceTree = "<group>"; };
9F73E3AF114E61A100F84A5A /* profile-generator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "profile-generator.cc"; sourceTree = "<group>"; };
9F73E3B0114E61A100F84A5A /* profile-generator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "profile-generator.h"; sourceTree = "<group>"; };
9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "func-name-inferrer.cc"; sourceTree = "<group>"; };
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "func-name-inferrer.h"; sourceTree = "<group>"; };
9FBE03DC10BD409900F8BFBA /* fast-codegen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "fast-codegen.cc"; sourceTree = "<group>"; };
@ -804,6 +809,9 @@
897FF16A0E719B8F00D62E90 /* platform.h */,
897FF16B0E719B8F00D62E90 /* prettyprinter.cc */,
897FF16C0E719B8F00D62E90 /* prettyprinter.h */,
9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */,
9F73E3AF114E61A100F84A5A /* profile-generator.cc */,
9F73E3B0114E61A100F84A5A /* profile-generator.h */,
897FF16D0E719B8F00D62E90 /* property.cc */,
897FF16E0E719B8F00D62E90 /* property.h */,
89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */,
@ -1239,6 +1247,7 @@
9F11D9A0105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
9FBE03DE10BD409900F8BFBA /* fast-codegen.cc in Sources */,
9FBE03E210BD40EA00F8BFBA /* fast-codegen-ia32.cc in Sources */,
9F73E3B2114E61A100F84A5A /* profile-generator.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@ -1348,6 +1357,7 @@
9F11D9A1105AF0A300EBE5B2 /* heap-profiler.cc in Sources */,
9FBE03DF10BD409900F8BFBA /* fast-codegen.cc in Sources */,
9FBE03E510BD412600F8BFBA /* fast-codegen-arm.cc in Sources */,
9F73E3B1114E61A100F84A5A /* profile-generator.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};

12
deps/v8/tools/visual_studio/v8_base.vcproj

@ -684,6 +684,18 @@
RelativePath="..\..\src\parser.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator.cc"
>
</File>
<File
RelativePath="..\..\src\profile-generator.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator-inl.h"
>
</File>
<File
RelativePath="..\..\src\platform-win32.cc"
>

12
deps/v8/tools/visual_studio/v8_base_arm.vcproj

@ -688,6 +688,18 @@
RelativePath="..\..\src\parser.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator.cc"
>
</File>
<File
RelativePath="..\..\src\profile-generator.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator-inl.h"
>
</File>
<File
RelativePath="..\..\src\platform-win32.cc"
>

12
deps/v8/tools/visual_studio/v8_base_x64.vcproj

@ -685,6 +685,18 @@
RelativePath="..\..\src\parser.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator.cc"
>
</File>
<File
RelativePath="..\..\src\profile-generator.h"
>
</File>
<File
RelativePath="..\..\src\profile-generator-inl.h"
>
</File>
<File
RelativePath="..\..\src\platform-win32.cc"
>

2
doc/api.txt

@ -687,7 +687,7 @@ Asynchronously reads the entire contents of a file. Example:
--------------------------------
fs.readFile("/etc/passwd", function (err, data) {
if (err) throw err;
sys.puts(content);
sys.puts(data);
});
--------------------------------
+

26
lib/dns.js

@ -1,4 +1,4 @@
process.binding('dns');
var dns = process.binding('dns');
exports.resolve = function (domain, type_, callback_) {
var type, callback;
@ -19,32 +19,32 @@ exports.resolve = function (domain, type_, callback_) {
}
}
exports.resolve4 = process.dns.resolve4;
exports.resolve6 = process.dns.resolve6;
exports.resolveMx = process.dns.resolveMx;
exports.resolveTxt = process.dns.resolveTxt;
exports.resolveSrv = process.dns.resolveSrv;
exports.reverse = process.dns.reverse;
exports.resolve4 = dns.resolve4;
exports.resolve6 = dns.resolve6;
exports.resolveMx = dns.resolveMx;
exports.resolveTxt = dns.resolveTxt;
exports.resolveSrv = dns.resolveSrv;
exports.reverse = dns.reverse;
// ERROR CODES
// timeout, SERVFAIL or similar.
exports.TEMPFAIL = process.dns.TEMPFAIL;
exports.TEMPFAIL = dns.TEMPFAIL;
// got garbled reply.
exports.PROTOCOL = process.dns.PROTOCOL;
exports.PROTOCOL = dns.PROTOCOL;
// domain does not exists.
exports.NXDOMAIN = process.dns.NXDOMAIN;
exports.NXDOMAIN = dns.NXDOMAIN;
// domain exists but no data of reqd type.
exports.NODATA = process.dns.NODATA;
exports.NODATA = dns.NODATA;
// out of memory while processing.
exports.NOMEM = process.dns.NOMEM;
exports.NOMEM = dns.NOMEM;
// the query is malformed.
exports.BADQUERY = process.dns.BADQUERY;
exports.BADQUERY = dns.BADQUERY;
var resolveMap = {
'A': exports.resolve4,

16
src/node.cc

@ -875,6 +875,9 @@ Handle<Value> EvalCX(const Arguments& args) {
// Create the new context
Persistent<Context> context = Context::New();
// Enter and compile script
context->Enter();
// Copy objects from global context, to our brand new context
Handle<Array> keys = sandbox->GetPropertyNames();
@ -882,12 +885,9 @@ Handle<Value> EvalCX(const Arguments& args) {
for (i = 0; i < keys->Length(); i++) {
Handle<String> key = keys->Get(Integer::New(i))->ToString();
Handle<Value> value = sandbox->Get(key);
context->Global()->Set(key, value->ToObject()->Clone());
context->Global()->Set(key, value);
}
// Enter and compile script
context->Enter();
// Catch errors
TryCatch try_catch;
@ -900,6 +900,14 @@ Handle<Value> EvalCX(const Arguments& args) {
result = script->Run();
if (result.IsEmpty()) {
result = ThrowException(try_catch.Exception());
} else {
// success! copy changes back onto the sandbox object.
keys = context->Global()->GetPropertyNames();
for (i = 0; i < keys->Length(); i++) {
Handle<String> key = keys->Get(Integer::New(i))->ToString();
Handle<Value> value = context->Global()->Get(key);
sandbox->Set(key, value);
}
}
}

10
src/node_dns.cc

@ -315,6 +315,11 @@ static Handle<Value> ResolveA(int type, const Arguments& args) {
String::New("Argument must be a string.")));
}
if (!args[1]->IsFunction()) {
return ThrowException(Exception::Error(
String::New("Missing callback argument")));
}
String::Utf8Value name(args[0]->ToString());
struct dns_query *query;
@ -421,6 +426,11 @@ static Handle<Value> Reverse(const Arguments& args) {
String::New("Argument must be a string.")));
}
if (!args[1]->IsFunction()) {
return ThrowException(Exception::Error(
String::New("Missing callback argument")));
}
String::Utf8Value ip_address(args[0]->ToString());
union {

4
src/node_stat_watcher.cc

@ -21,7 +21,7 @@ void StatWatcher::Initialize(Handle<Object> target) {
constructor_template = Persistent<FunctionTemplate>::New(t);
constructor_template->Inherit(EventEmitter::constructor_template);
constructor_template->InstanceTemplate()->SetInternalFieldCount(1);
constructor_template->SetClassName(String::NewSymbol("StatWatcherWatcher"));
constructor_template->SetClassName(String::NewSymbol("StatWatcher"));
change_symbol = NODE_PSYMBOL("change");
stop_symbol = NODE_PSYMBOL("stop");
@ -29,7 +29,7 @@ void StatWatcher::Initialize(Handle<Object> target) {
NODE_SET_PROTOTYPE_METHOD(constructor_template, "start", StatWatcher::Start);
NODE_SET_PROTOTYPE_METHOD(constructor_template, "stop", StatWatcher::Stop);
target->Set(String::NewSymbol("StatWatcherWatcher"), constructor_template->GetFunction());
target->Set(String::NewSymbol("StatWatcher"), constructor_template->GetFunction());
}

49
test/disabled/test-dns.js

@ -3,6 +3,17 @@ require("../common");
var dns = require("dns"),
sys = require("sys");
// Try resolution without callback
assert.throws(function () {
dns.resolve('google.com', 'A');
});
assert.throws(function () {
dns.resolve('127.0.0.1', 'PTR');
});
var hosts = ['example.com', 'example.org',
'ietf.org', // AAAA
'google.com', // MX, multiple A records
@ -21,22 +32,24 @@ while (i--) {
"| sed -E 's/[[:space:]]+/ /g' | cut -d ' ' -f 5- " +
"| sed -e 's/\\.$//'";
sys.exec(hostCmd).addCallback(checkDnsRecord(hosts[i], records[j]));
sys.exec(hostCmd, checkDnsRecord(hosts[i], records[j]));
}
}
function checkDnsRecord(host, record) {
var myHost = host,
myRecord = record;
return function(stdout) {
var expected = stdout.substr(0, stdout.length - 1).split("\n");
var resolution = dns.resolve(myHost, myRecord);
return function(err, stdout) {
var expected = [];
if(stdout.length)
expected = stdout.substr(0, stdout.length - 1).split("\n");
switch (myRecord) {
case "A":
case "AAAA":
resolution.addCallback(function (result, ttl, cname) {
dns.resolve(myHost, myRecord, function (error, result, ttl, cname) {
if(error) result = [];
cmpResults(expected, result, ttl, cname);
// do reverse lookup check
@ -48,12 +61,14 @@ function checkDnsRecord(host, record) {
"| cut -d \" \" -f 5-" +
"| sed -e 's/\\.$//'";
sys.exec(reverseCmd).addCallback(checkReverse(ip));
sys.exec(reverseCmd, checkReverse(ip));
}
});
break;
case "MX":
resolution.addCallback(function (result, ttl, cname) {
dns.resolve(myHost, myRecord, function (error, result, ttl, cname) {
if(error) result = [];
var strResult = [];
var ll = result.length;
while (ll--) {
@ -64,7 +79,9 @@ function checkDnsRecord(host, record) {
});
break;
case "TXT":
resolution.addCallback(function (result, ttl, cname) {
dns.resolve(myHost, myRecord, function (error, result, ttl, cname) {
if(error) result = [];
var strResult = [];
var ll = result.length;
while (ll--) {
@ -74,7 +91,9 @@ function checkDnsRecord(host, record) {
});
break;
case "SRV":
resolution.addCallback(function (result, ttl, cname) {
dns.resolve(myHost, myRecord, function (error, result, ttl, cname) {
if(error) result = [];
var strResult = [];
var ll = result.length;
while (ll--) {
@ -93,13 +112,11 @@ function checkDnsRecord(host, record) {
function checkReverse(ip) {
var myIp = ip;
return function (stdout) {
return function (errr, stdout) {
var expected = stdout.substr(0, stdout.length - 1).split("\n");
var reversing = dns.reverse(myIp);
reversing.addCallback(
function (domains, ttl, cname) {
reversing = dns.reverse(myIp, function (error, domains, ttl, cname) {
if(error) domains = [];
cmpResults(expected, domains, ttl, cname);
});
}
@ -114,6 +131,6 @@ function cmpResults(expected, result, ttl, cname) {
ll = expected.length;
while (ll--) {
assert.equal(result[ll], expected[ll]);
// puts("Result " + result[ll] + " was equal to expected " + expected[ll]);
puts("Result " + result[ll] + " was equal to expected " + expected[ll]);
}
}

24
test/simple/test-eval-cx.js

@ -14,14 +14,24 @@ process.evalcx('hello = 2');
assert.equal(5, hello);
code = "foo = 1; bar = 2;";
debug("pass values in and out");
code = "foo = 1;"
+ "bar = 2;"
+ "if (baz !== 3) throw new Error('test fail');";
foo = 2;
obj = { foo : 0 };
process.evalcx(code, obj);
/* TODO?
obj = { foo : 0, baz : 3 };
var baz = process.evalcx(code, obj);
assert.equal(1, obj.foo);
assert.equal(2, obj.bar);
*/
assert.equal(2, foo);
debug("call a function by reference");
function changeFoo () { foo = 100 }
process.evalcx("f()", { f : changeFoo });
assert.equal(foo, 100);
debug("modify an object by reference");
var f = { a : 1 };
process.evalcx("f.a = 2", { f : f });
assert.equal(f.a, 2);

Loading…
Cancel
Save