Browse Source

Upgrade V8 to 3.0.3

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
7d425a0a16
  1. 12
      deps/v8/ChangeLog
  2. 7
      deps/v8/include/v8-preparser.h
  3. 4
      deps/v8/include/v8-profiler.h
  4. 29
      deps/v8/include/v8.h
  5. 26
      deps/v8/preparser/preparser-process.cc
  6. 51
      deps/v8/samples/samples.gyp
  7. 6
      deps/v8/samples/shell.cc
  8. 3
      deps/v8/src/allocation.h
  9. 34
      deps/v8/src/api.cc
  10. 8
      deps/v8/src/arm/codegen-arm.cc
  11. 39
      deps/v8/src/arm/full-codegen-arm.cc
  12. 10
      deps/v8/src/arm/ic-arm.cc
  13. 54
      deps/v8/src/arm/lithium-arm.cc
  14. 59
      deps/v8/src/arm/lithium-arm.h
  15. 27
      deps/v8/src/arm/lithium-codegen-arm.cc
  16. 9
      deps/v8/src/arm/lithium-codegen-arm.h
  17. 5
      deps/v8/src/arm/macro-assembler-arm.cc
  18. 8
      deps/v8/src/arm/stub-cache-arm.cc
  19. 13
      deps/v8/src/array.js
  20. 52
      deps/v8/src/assembler.cc
  21. 8
      deps/v8/src/assembler.h
  22. 12
      deps/v8/src/ast-inl.h
  23. 18
      deps/v8/src/ast.cc
  24. 69
      deps/v8/src/ast.h
  25. 25
      deps/v8/src/bootstrapper.cc
  26. 3
      deps/v8/src/checks.h
  27. 16
      deps/v8/src/code-stubs.h
  28. 26
      deps/v8/src/compiler.cc
  29. 85
      deps/v8/src/d8.gyp
  30. 80
      deps/v8/src/date.js
  31. 36
      deps/v8/src/debug-debugger.js
  32. 1
      deps/v8/src/execution.cc
  33. 263
      deps/v8/src/extensions/experimental/i18n-extension.cc
  34. 64
      deps/v8/src/extensions/experimental/i18n-extension.h
  35. 4
      deps/v8/src/flag-definitions.h
  36. 35
      deps/v8/src/full-codegen.cc
  37. 13
      deps/v8/src/full-codegen.h
  38. 2
      deps/v8/src/globals.h
  39. 2
      deps/v8/src/heap-inl.h
  40. 35
      deps/v8/src/heap-profiler.cc
  41. 18
      deps/v8/src/heap-profiler.h
  42. 18
      deps/v8/src/heap.cc
  43. 7
      deps/v8/src/heap.h
  44. 101
      deps/v8/src/hydrogen-instructions.cc
  45. 74
      deps/v8/src/hydrogen-instructions.h
  46. 1162
      deps/v8/src/hydrogen.cc
  47. 119
      deps/v8/src/hydrogen.h
  48. 41
      deps/v8/src/ia32/assembler-ia32.cc
  49. 14
      deps/v8/src/ia32/assembler-ia32.h
  50. 1
      deps/v8/src/ia32/builtins-ia32.cc
  51. 274
      deps/v8/src/ia32/code-stubs-ia32.cc
  52. 25
      deps/v8/src/ia32/code-stubs-ia32.h
  53. 9
      deps/v8/src/ia32/codegen-ia32.cc
  54. 68
      deps/v8/src/ia32/disasm-ia32.cc
  55. 186
      deps/v8/src/ia32/full-codegen-ia32.cc
  56. 54
      deps/v8/src/ia32/ic-ia32.cc
  57. 164
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  58. 11
      deps/v8/src/ia32/lithium-codegen-ia32.h
  59. 80
      deps/v8/src/ia32/lithium-ia32.cc
  60. 61
      deps/v8/src/ia32/lithium-ia32.h
  61. 50
      deps/v8/src/ia32/macro-assembler-ia32.cc
  62. 28
      deps/v8/src/ia32/macro-assembler-ia32.h
  63. 8
      deps/v8/src/ia32/stub-cache-ia32.cc
  64. 21
      deps/v8/src/ic.cc
  65. 5
      deps/v8/src/ic.h
  66. 105
      deps/v8/src/json.js
  67. 377
      deps/v8/src/lithium-allocator.cc
  68. 79
      deps/v8/src/lithium-allocator.h
  69. 179
      deps/v8/src/log-utils.cc
  70. 64
      deps/v8/src/log-utils.h
  71. 182
      deps/v8/src/log.cc
  72. 101
      deps/v8/src/log.h
  73. 7
      deps/v8/src/macros.py
  74. 5
      deps/v8/src/mark-compact.cc
  75. 8
      deps/v8/src/math.js
  76. 1
      deps/v8/src/messages.js
  77. 4
      deps/v8/src/mirror-debugger.js
  78. 21
      deps/v8/src/objects-inl.h
  79. 3
      deps/v8/src/objects.cc
  80. 82
      deps/v8/src/objects.h
  81. 119
      deps/v8/src/parser.cc
  82. 33
      deps/v8/src/parser.h
  83. 14
      deps/v8/src/platform-freebsd.cc
  84. 29
      deps/v8/src/platform-linux.cc
  85. 12
      deps/v8/src/platform-openbsd.cc
  86. 3
      deps/v8/src/platform.h
  87. 109
      deps/v8/src/preparser-api.cc
  88. 1
      deps/v8/src/preparser.cc
  89. 14
      deps/v8/src/profile-generator-inl.h
  90. 359
      deps/v8/src/profile-generator.cc
  91. 41
      deps/v8/src/profile-generator.h
  92. 62
      deps/v8/src/regexp.js
  93. 103
      deps/v8/src/runtime-profiler.cc
  94. 127
      deps/v8/src/runtime.cc
  95. 5
      deps/v8/src/runtime.h
  96. 20
      deps/v8/src/runtime.js
  97. 22
      deps/v8/src/scanner-base.cc
  98. 86
      deps/v8/src/scanner-base.h
  99. 365
      deps/v8/src/scanner.cc
  100. 191
      deps/v8/src/scanner.h

12
deps/v8/ChangeLog

@ -1,3 +1,15 @@
2010-12-17: Version 3.0.3
Reapplied all changes for version 3.0.1.
Improved debugger protocol for remote debugging.
Added experimental support for using gyp to generate build files
for V8.
Fixed implementation of String::Write in the API (issue 975).
2010-12-15: Version 3.0.2
Revert version 3.0.1 and patch 3.0.1.1.

7
deps/v8/include/v8-preparser.h

@ -99,13 +99,6 @@ class UnicodeInputStream {
// Returns the next Unicode code-point in the input, or a negative value when
// there is no more input in the stream.
virtual int32_t Next() = 0;
// Pushes a read character back into the stream, so that it will be the next
// to be read by Advance(). The character pushed back must be the most
// recently read character that hasn't already been pushed back (i.e., if
// pushing back more than one character, they must occur in the opposite order
// of the one they were read in).
virtual void PushBack(int32_t ch) = 0;
};

4
deps/v8/include/v8-profiler.h

@ -245,7 +245,6 @@ class V8EXPORT HeapGraphPath {
class V8EXPORT HeapGraphNode {
public:
enum Type {
kInternal = 0, // For compatibility, will be removed.
kHidden = 0, // Hidden node, may be filtered when shown to user.
kArray = 1, // An array of elements.
kString = 2, // A string.
@ -413,7 +412,8 @@ class V8EXPORT HeapProfiler {
*/
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull);
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL);
};

29
deps/v8/include/v8.h

@ -992,18 +992,23 @@ class String : public Primitive {
* the contents of the string and the NULL terminator into the
* buffer.
*
* WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
* before the end of the buffer.
*
* Copies up to length characters into the output buffer.
* Only null-terminates if there is enough space in the buffer.
*
* \param buffer The buffer into which the string will be copied.
* \param start The starting position within the string at which
* copying begins.
* \param length The number of bytes to copy from the string.
* \param length The number of characters to copy from the string. For
* WriteUtf8 the number of bytes in the buffer.
* \param nchars_ref The number of characters written, can be NULL.
* \param hints Various hints that might affect performance of this or
* subsequent operations.
* \return The number of bytes copied to the buffer
* excluding the NULL terminator.
* \return The number of characters copied to the buffer excluding the null
* terminator. For WriteUtf8: The number of bytes copied to the buffer
* including the null terminator.
*/
enum WriteHints {
NO_HINTS = 0,
@ -3281,6 +3286,24 @@ class V8EXPORT OutputStream { // NOLINT
};
/**
* An interface for reporting progress and controlling long-running
* activities.
*/
class V8EXPORT ActivityControl { // NOLINT
public:
enum ControlOption {
kContinue = 0,
kAbort = 1
};
virtual ~ActivityControl() {}
/**
* Notify about current progress. The activity can be stopped by
* returning kAbort as the callback result.
*/
virtual ControlOption ReportProgressValue(int done, int total) = 0;
};
// --- I m p l e m e n t a t i o n ---

26
deps/v8/preparser/preparser-process.cc

@ -127,7 +127,7 @@ uint32_t ReadUInt32(FILE* source, bool* ok) {
bool ReadBuffer(FILE* source, void* buffer, size_t length) {
size_t actually_read = fread(buffer, 1, length, stdin);
size_t actually_read = fread(buffer, 1, length, source);
return (actually_read == length);
}
@ -150,22 +150,25 @@ class ScopedPointer {
};
// Preparse stdin and output result on stdout.
int PreParseIO() {
// Preparse input and output result on stdout.
int PreParseIO(FILE* input) {
fprintf(stderr, "LOG: Enter parsing loop\n");
bool ok = true;
uint32_t length = ReadUInt32(stdin, &ok);
uint32_t length = ReadUInt32(input, &ok);
fprintf(stderr, "LOG: Input length: %d\n", length);
if (!ok) return kErrorReading;
ScopedPointer<uint8_t> buffer(new uint8_t[length]);
if (!ReadBuffer(stdin, *buffer, length)) {
if (!ReadBuffer(input, *buffer, length)) {
return kErrorReading;
}
UTF8InputStream input_buffer(*buffer, static_cast<size_t>(length));
v8::PreParserData data =
v8::Preparse(&input_buffer, 64 * sizeof(void*)); // NOLINT
v8::Preparse(&input_buffer, 64 * 1024 * sizeof(void*)); // NOLINT
if (data.stack_overflow()) {
fprintf(stderr, "LOG: Stack overflow\n");
fflush(stderr);
// Report stack overflow error/no-preparser-data.
WriteUInt32(stdout, 0, &ok);
if (!ok) return kErrorWriting;
@ -173,6 +176,8 @@ int PreParseIO() {
}
uint32_t size = data.size();
fprintf(stderr, "LOG: Success, data size: %u\n", size);
fflush(stderr);
WriteUInt32(stdout, size, &ok);
if (!ok) return kErrorWriting;
if (!WriteBuffer(stdout, data.data(), size)) {
@ -185,10 +190,17 @@ int PreParseIO() {
int main(int argc, char* argv[]) {
FILE* input = stdin;
if (argc > 1) {
char* arg = argv[1];
input = fopen(arg, "rb");
if (input == NULL) return EXIT_FAILURE;
}
int status = 0;
do {
status = v8::internal::PreParseIO();
status = v8::internal::PreParseIO(input);
} while (status == 0);
fprintf(stderr, "EXIT: Failure %d\n", status);
fflush(stderr);
return EXIT_FAILURE;
}

51
deps/v8/samples/samples.gyp

@ -0,0 +1,51 @@
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'targets': [
{
'target_name': 'shell',
'type': 'executable',
'dependencies': [
'../tools/gyp/v8.gyp:v8',
],
'sources': [
'shell.cc',
],
},
{
'target_name': 'process',
'type': 'executable',
'dependencies': [
'../tools/gyp/v8.gyp:v8',
],
'sources': [
'process.cc',
],
}
],
}

6
deps/v8/samples/shell.cc

@ -45,7 +45,6 @@ v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::TryCatch* handler);
void SetFlagsFromString(const char* flags);
int RunMain(int argc, char* argv[]) {
@ -345,8 +344,3 @@ void ReportException(v8::TryCatch* try_catch) {
}
}
}
void SetFlagsFromString(const char* flags) {
v8::V8::SetFlagsFromString(flags, strlen(flags));
}

3
deps/v8/src/allocation.h

@ -28,6 +28,9 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
#include "checks.h"
#include "globals.h"
namespace v8 {
namespace internal {

34
deps/v8/src/api.cc

@ -1165,14 +1165,22 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
unibrow::Utf8InputBuffer<> buf(input, length);
return i::ParserApi::PreParse(i::Handle<i::String>(), &buf, NULL);
i::Utf8ToUC16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
return i::ParserApi::PreParse(&stream, NULL);
}
ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
i::Handle<i::String> str = Utils::OpenHandle(*source);
return i::ParserApi::PreParse(str, NULL, NULL);
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUC16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::ParserApi::PreParse(&stream, NULL);
} else {
i::GenericStringUC16CharacterStream stream(str, 0, str->length());
return i::ParserApi::PreParse(&stream, NULL);
}
}
@ -3119,14 +3127,15 @@ int String::Write(uint16_t* buffer,
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
}
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
int end = start + length;
if ((length == -1) || (length > str->length() - start) )
end = str->length();
if (end < 0) return 0;
i::String::WriteToFlat(*str, buffer, start, end);
if (length == -1 || end < length)
buffer[end] = '\0';
return end;
if (length == -1 || end - start < length) {
buffer[end - start] = '\0';
}
return end - start;
}
@ -4939,7 +4948,8 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type) {
HeapSnapshot::Type type,
ActivityControl* control) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
@ -4953,7 +4963,8 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
UNREACHABLE();
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
i::HeapProfiler::TakeSnapshot(
*Utils::OpenHandle(*title), internal_type, control));
}
#endif // ENABLE_LOGGING_AND_PROFILING
@ -4968,6 +4979,7 @@ void Testing::SetStressRunType(Testing::StressType type) {
}
int Testing::GetStressRuns() {
if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG
// In debug mode the code runs much slower so stressing will only make two
// runs.

8
deps/v8/src/arm/codegen-arm.cc

@ -5592,6 +5592,12 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ tst(tmp2, Operand(kSmiTagMask));
deferred->Branch(nz);
// Check that both indices are valid.
__ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
__ cmp(tmp2, index1);
__ cmp(tmp2, index2, hi);
deferred->Branch(ls);
// Bring the offsets into the fixed array in tmp1 into index1 and
// index2.
__ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@ -6463,7 +6469,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::INSTANCEOF: {
Load(left);
Load(right);
InstanceofStub stub;
InstanceofStub stub(InstanceofStub::kNoFlags);
frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
__ tst(r0, Operand(r0));

39
deps/v8/src/arm/full-codegen-arm.cc

@ -206,6 +206,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
void FullCodeGenerator::ClearAccumulator() {
__ mov(r0, Operand(Smi::FromInt(0)));
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
@ -890,7 +895,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&update_each);
__ mov(result_register(), r3);
// Perform the assignment as if via '='.
EmitAssignment(stmt->each());
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->AssignmentId());
}
// Generate code for the body of the loop.
Visit(stmt->body());
@ -1444,7 +1451,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// For property compound assignments we need another deoptimization
// point after the property load.
if (property != NULL) {
PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
}
Token::Value op = expr->binary_op();
@ -1487,6 +1494,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@ -1536,7 +1545,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@ -1584,6 +1593,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
}
PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(r0);
}
@ -1657,8 +1668,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
__ bind(&done);
}
context()->Plug(result_register());
}
@ -1701,10 +1710,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
context()->DropAndPlug(1, r0);
} else {
context()->Plug(r0);
__ Drop(1);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@ -1745,10 +1754,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ push(ip);
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(r0);
context()->DropAndPlug(1, r0);
} else {
context()->Plug(r0);
__ Drop(1);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
@ -3200,6 +3209,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(r0);
}
// For all contexts except EffectConstant We have the result on
// top of the stack.
@ -3209,6 +3220,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(r0);
}
break;
case NAMED_PROPERTY: {
@ -3216,6 +3229,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(r1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@ -3230,6 +3244,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(r2); // Receiver.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@ -3415,7 +3430,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
InstanceofStub stub;
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
// The stub returns 0 for true.

10
deps/v8/src/arm/ic-arm.cc

@ -2360,10 +2360,8 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
#ifdef DEBUG
State previous_state = GetState();
#endif
State state = TargetState(x, y);
State state = TargetState(previous_state, false, x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
rewritten = stub.GetCode();
@ -2383,6 +2381,12 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
#endif
}
void PatchInlinedSmiCode(Address address) {
UNIMPLEMENTED();
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

54
deps/v8/src/arm/lithium-arm.cc

@ -206,6 +206,13 @@ void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
}
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_object(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_smi(");
input()->PrintTo(stream);
@ -460,12 +467,6 @@ int LChunk::NearestGapPos(int index) const {
}
int LChunk::NearestNextGapPos(int index) const {
while (!IsGapAt(index)) index++;
return index;
}
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
}
@ -1244,6 +1245,17 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
temp,
first_id,
second_id);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2,
first_id,
second_id);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1347,7 +1359,7 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
MathFunctionId op = instr->op();
BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
LInstruction* result = new LUnaryMathOperation(input);
switch (op) {
@ -1357,6 +1369,12 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
case kMathLog:
Abort("MathLog LUnaryMathOperation not implemented");
return NULL;
default:
UNREACHABLE();
return NULL;
@ -1554,6 +1572,12 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Abort("LPower instruction not implemented on ARM");
return NULL;
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
if (instr->left()->representation().IsInteger32()) {
@ -1594,6 +1618,14 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsObject(value, TempRegister()));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());
@ -1688,11 +1720,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsDouble()) {
if (to.IsTagged()) {
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
// Make sure that temp and result_temp are different registers.
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
LInstruction* result = new LNumberTagD(value, temp);
LInstruction* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {

59
deps/v8/src/arm/lithium-arm.h

@ -121,6 +121,8 @@ class Translation;
// LInteger32ToDouble
// LIsNull
// LIsNullAndBranch
// LIsObject
// LIsObjectAndBranch
// LIsSmi
// LIsSmiAndBranch
// LLoadNamedField
@ -203,6 +205,8 @@ class Translation;
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(HasInstanceType) \
@ -665,7 +669,7 @@ class LUnaryMathOperation: public LUnaryOperation {
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
MathFunctionId op() const { return hydrogen()->op(); }
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
@ -742,6 +746,48 @@ class LIsNullAndBranch: public LIsNull {
};
class LIsObject: public LUnaryOperation {
public:
LIsObject(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temp_(temp) {}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
LOperand* temp() const { return temp_; }
private:
LOperand* temp_;
};
class LIsObjectAndBranch: public LIsObject {
public:
LIsObjectAndBranch(LOperand* value,
LOperand* temp,
LOperand* temp2,
int true_block_id,
int false_block_id)
: LIsObject(value, temp),
temp2_(temp2),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream) const;
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temp2() const { return temp2_; }
private:
LOperand* temp2_;
int true_block_id_;
int false_block_id_;
};
class LIsSmi: public LUnaryOperation {
public:
explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
@ -1395,15 +1441,17 @@ class LNumberTagI: public LUnaryOperation {
class LNumberTagD: public LUnaryOperation {
public:
explicit LNumberTagD(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temp_(temp) { }
LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2)
: LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
LOperand* temp() const { return temp_; }
LOperand* temp1() const { return temp1_; }
LOperand* temp2() const { return temp2_; }
private:
LOperand* temp_;
LOperand* temp1_;
LOperand* temp2_;
};
@ -1887,7 +1935,6 @@ class LChunk: public ZoneObject {
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
int NearestNextGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {

27
deps/v8/src/arm/lithium-codegen-arm.cc

@ -136,7 +136,7 @@ bool LCodeGen::GeneratePrologue() {
Label loop;
__ bind(&loop);
__ push(r2);
__ sub(r0, r0, Operand(1));
__ sub(r0, r0, Operand(1), SetCC);
__ b(ne, &loop);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
@ -1213,6 +1213,26 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
}
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object) {
Abort("EmitIsObject unimplemented.");
return ne;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Abort("DoIsObject unimplemented.");
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Abort("DoIsObjectAndBranch unimplemented.");
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
@ -1733,13 +1753,14 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DoubleRegister input_reg = ToDoubleRegister(instr->input());
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
Register scratch = r9;
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(reg, tmp, ip, scratch, deferred->entry());
__ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ jmp(deferred->entry());
}

9
deps/v8/src/arm/lithium-codegen-arm.h

@ -208,6 +208,15 @@ class LCodeGen BASE_EMBEDDED {
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

5
deps/v8/src/arm/macro-assembler-arm.cc

@ -1060,9 +1060,14 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
return;
}
// Assert that the register arguments are different and that none of
// them are ip. ip is used explicitly in the code generated below.
ASSERT(!result.is(scratch1));
ASSERT(!result.is(scratch2));
ASSERT(!scratch1.is(scratch2));
ASSERT(!result.is(ip));
ASSERT(!scratch1.is(ip));
ASSERT(!scratch2.is(ip));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDM.

8
deps/v8/src/arm/stub-cache-arm.cc

@ -2112,8 +2112,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -- lr : return address
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
const int id = function_info->custom_call_generator_id();
if (function_info->HasBuiltinFunctionId()) {
BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, NULL, function, name);
Object* result;
@ -2323,8 +2323,8 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
const int id = function_info->custom_call_generator_id();
if (function_info->HasBuiltinFunctionId()) {
BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, cell, function, name);
Object* result;

13
deps/v8/src/array.js

@ -159,9 +159,11 @@ function Join(array, length, separator, convert) {
}
function ConvertToString(e) {
if (e == null) return '';
else return ToString(e);
function ConvertToString(x) {
if (IS_STRING(x)) return x;
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
}
@ -365,14 +367,13 @@ function ArrayJoin(separator) {
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
separator = ToString(separator);
separator = NonStringToString(separator);
}
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
var length = TO_UINT32(this.length);
return Join(this, length, separator, ConvertToString);
return Join(this, TO_UINT32(this.length), separator, ConvertToString);
}

52
deps/v8/src/assembler.cc

@ -66,6 +66,7 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
// -----------------------------------------------------------------------------
@ -722,6 +723,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
@ -793,6 +800,51 @@ static double mod_two_doubles(double x, double y) {
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
// S. Warren, Jr., figure 11-6, page 213.
double power_double_int(double x, int y) {
double m = (y < 0) ? 1 / x : x;
unsigned n = (y < 0) ? -y : y;
double p = 1;
while (n != 0) {
if ((n & 1) != 0) p *= m;
m *= m;
if ((n & 2) != 0) p *= m;
m *= m;
n >>= 2;
}
return p;
}
double power_double_double(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
}
if (!isinf(x)) {
if (y == 0.5) return sqrt(x);
if (y == -0.5) return 1.0 / sqrt(x);
}
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return OS::nan_value();
}
return pow(x, y);
}
ExternalReference ExternalReference::power_double_double_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double)));
}
ExternalReference ExternalReference::power_double_int_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int)));
}
static int native_compare_doubles(double y, double x) {
if (x == y) return EQUAL;
return x < y ? LESS : GREATER;

8
deps/v8/src/assembler.h

@ -50,6 +50,7 @@ class DoubleConstant: public AllStatic {
public:
static const double min_int;
static const double one_half;
static const double negative_infinity;
};
@ -539,6 +540,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
static ExternalReference power_double_double_function();
static ExternalReference power_double_int_function();
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
@ -549,6 +552,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
static ExternalReference address_of_negative_infinity();
Address address() const {return reinterpret_cast<Address>(address_);}
@ -710,6 +714,10 @@ static inline int NumberOfBitsSet(uint32_t x) {
return num_bits_set;
}
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_double_int(double x, int y);
double power_double_double(double x, double y);
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_

12
deps/v8/src/ast-inl.h

@ -71,14 +71,16 @@ DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
condition_position_(-1),
next_id_(GetNextId()) {
continue_id_(GetNextId()),
back_edge_id_(GetNextId()) {
}
WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels),
cond_(NULL),
may_have_function_literal_(true) {
may_have_function_literal_(true),
body_id_(GetNextId()) {
}
@ -89,12 +91,14 @@ ForStatement::ForStatement(ZoneStringList* labels)
next_(NULL),
may_have_function_literal_(true),
loop_variable_(NULL),
next_id_(GetNextId()) {
continue_id_(GetNextId()),
body_id_(GetNextId()) {
}
ForInStatement::ForInStatement(ZoneStringList* labels)
: IterationStatement(labels), each_(NULL), enumerable_(NULL) {
: IterationStatement(labels), each_(NULL), enumerable_(NULL),
assignment_id_(GetNextId()) {
}

18
deps/v8/src/ast.cc

@ -32,6 +32,7 @@
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
@ -125,17 +126,18 @@ Assignment::Assignment(Token::Value op,
target_(target),
value_(value),
pos_(pos),
compound_bailout_id_(kNoNumber),
binary_operation_(NULL),
compound_load_id_(kNoNumber),
assignment_id_(GetNextId()),
block_start_(false),
block_end_(false),
is_monomorphic_(false),
receiver_types_(NULL) {
ASSERT(Token::IsAssignmentOp(op));
binary_operation_ = is_compound()
? new BinaryOperation(binary_op(), target, value, pos + 1)
: NULL;
if (is_compound()) {
compound_bailout_id_ = GetNextId();
binary_operation_ =
new BinaryOperation(binary_op(), target, value, pos + 1);
compound_load_id_ = GetNextId();
}
}
@ -558,16 +560,18 @@ void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
SharedFunctionInfo* info = target->shared();
if (target->NeedsArgumentsAdaption()) {
// If the number of formal parameters of the target function
// does not match the number of arguments we're passing, we
// don't want to deal with it.
return target->shared()->formal_parameter_count() == arity;
return info->formal_parameter_count() == arity;
} else {
// If the target doesn't need arguments adaption, we can call
// it directly, but we avoid to do so if it has a custom call
// generator, because that is likely to generate better code.
return !target->shared()->HasCustomCallGenerator();
return !info->HasBuiltinFunctionId() ||
!CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
}
}

69
deps/v8/src/ast.h

@ -435,7 +435,6 @@ class IterationStatement: public BreakableStatement {
virtual IterationStatement* AsIterationStatement() { return this; }
Statement* body() const { return body_; }
void set_body(Statement* stmt) { body_ = stmt; }
// Bailout support.
int OsrEntryId() const { return osr_entry_id_; }
@ -477,12 +476,14 @@ class DoWhileStatement: public IterationStatement {
void set_condition_position(int pos) { condition_position_ = pos; }
// Bailout support.
virtual int ContinueId() const { return next_id_; }
virtual int ContinueId() const { return continue_id_; }
int BackEdgeId() const { return back_edge_id_; }
private:
Expression* cond_;
int condition_position_;
int next_id_;
int continue_id_;
int back_edge_id_;
};
@ -507,11 +508,13 @@ class WhileStatement: public IterationStatement {
// Bailout support.
virtual int ContinueId() const { return EntryId(); }
int BodyId() const { return body_id_; }
private:
Expression* cond_;
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
int body_id_;
};
@ -532,11 +535,8 @@ class ForStatement: public IterationStatement {
}
Statement* init() const { return init_; }
void set_init(Statement* stmt) { init_ = stmt; }
Expression* cond() const { return cond_; }
void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; }
bool may_have_function_literal() const {
return may_have_function_literal_;
@ -546,7 +546,8 @@ class ForStatement: public IterationStatement {
}
// Bailout support.
virtual int ContinueId() const { return next_id_; }
virtual int ContinueId() const { return continue_id_; }
int BodyId() const { return body_id_; }
bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; }
@ -559,7 +560,8 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition.
bool may_have_function_literal_;
Variable* loop_variable_;
int next_id_;
int continue_id_;
int body_id_;
};
@ -579,11 +581,13 @@ class ForInStatement: public IterationStatement {
Expression* enumerable() const { return enumerable_; }
// Bailout support.
int AssignmentId() const { return assignment_id_; }
virtual int ContinueId() const { return EntryId(); }
private:
Expression* each_;
Expression* enumerable_;
int assignment_id_;
};
@ -737,7 +741,10 @@ class IfStatement: public Statement {
Statement* else_statement)
: condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement) { }
else_statement_(else_statement),
then_id_(GetNextId()),
else_id_(GetNextId()) {
}
DECLARE_NODE_TYPE(IfStatement)
@ -748,14 +755,17 @@ class IfStatement: public Statement {
Expression* condition() const { return condition_; }
Statement* then_statement() const { return then_statement_; }
void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
Statement* else_statement() const { return else_statement_; }
void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
int ThenId() const { return then_id_; }
int ElseId() const { return else_id_; }
private:
Expression* condition_;
Statement* then_statement_;
Statement* else_statement_;
int then_id_;
int else_id_;
};
@ -1380,6 +1390,9 @@ class BinaryOperation: public Expression {
int pos)
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR)
? GetNextId()
: AstNode::kNoNumber;
}
// Create the binary operation corresponding to a compound assignment.
@ -1400,12 +1413,18 @@ class BinaryOperation: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiOnly() const { return is_smi_only_; }
// Bailout support.
int RightId() const { return right_id_; }
private:
Token::Value op_;
Expression* left_;
Expression* right_;
int pos_;
bool is_smi_only_;
// The short-circuit logical operations have an AST ID for their
// right-hand subexpression.
int right_id_;
};
@ -1432,7 +1451,9 @@ class IncrementOperation: public Expression {
class CountOperation: public Expression {
public:
CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
: is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
: is_prefix_(is_prefix), increment_(increment), pos_(pos),
assignment_id_(GetNextId()) {
}
DECLARE_NODE_TYPE(CountOperation)
@ -1452,10 +1473,14 @@ class CountOperation: public Expression {
virtual bool IsInlineable() const;
// Bailout support.
int AssignmentId() const { return assignment_id_; }
private:
bool is_prefix_;
IncrementOperation* increment_;
int pos_;
int assignment_id_;
};
@ -1524,7 +1549,10 @@ class Conditional: public Expression {
then_expression_(then_expression),
else_expression_(else_expression),
then_expression_position_(then_expression_position),
else_expression_position_(else_expression_position) { }
else_expression_position_(else_expression_position),
then_id_(GetNextId()),
else_id_(GetNextId()) {
}
DECLARE_NODE_TYPE(Conditional)
@ -1534,8 +1562,11 @@ class Conditional: public Expression {
Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; }
int then_expression_position() { return then_expression_position_; }
int else_expression_position() { return else_expression_position_; }
int then_expression_position() const { return then_expression_position_; }
int else_expression_position() const { return else_expression_position_; }
int ThenId() const { return then_id_; }
int ElseId() const { return else_id_; }
private:
Expression* condition_;
@ -1543,6 +1574,8 @@ class Conditional: public Expression {
Expression* else_expression_;
int then_expression_position_;
int else_expression_position_;
int then_id_;
int else_id_;
};
@ -1585,7 +1618,8 @@ class Assignment: public Expression {
}
// Bailout support.
int compound_bailout_id() const { return compound_bailout_id_; }
int CompoundLoadId() const { return compound_load_id_; }
int AssignmentId() const { return assignment_id_; }
private:
Token::Value op_;
@ -1593,7 +1627,8 @@ class Assignment: public Expression {
Expression* value_;
int pos_;
BinaryOperation* binary_operation_;
int compound_bailout_id_;
int compound_load_id_;
int assignment_id_;
bool block_start_;
bool block_end_;

25
deps/v8/src/bootstrapper.cc

@ -38,7 +38,6 @@
#include "natives.h"
#include "objects-visiting.h"
#include "snapshot.h"
#include "stub-cache.h"
#include "extensions/externalize-string-extension.h"
#include "extensions/gc-extension.h"
@ -234,7 +233,7 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
bool InstallNatives();
void InstallCustomCallGenerators();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
// Used both for deserialized and from-scratch contexts to add the extensions
@ -1270,7 +1269,7 @@ bool Genesis::InstallNatives() {
global_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
InstallCustomCallGenerators();
InstallBuiltinFunctionIds();
// Install Function.prototype.call and apply.
{ Handle<String> key = Factory::function_class_symbol();
@ -1369,7 +1368,7 @@ bool Genesis::InstallNatives() {
}
static Handle<JSObject> ResolveCustomCallGeneratorHolder(
static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> global_context,
const char* holder_expr) {
Handle<GlobalObject> global(global_context->global());
@ -1387,9 +1386,9 @@ static Handle<JSObject> ResolveCustomCallGeneratorHolder(
}
static void InstallCustomCallGenerator(Handle<JSObject> holder,
static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
int id) {
BuiltinFunctionId id) {
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
@ -1397,17 +1396,17 @@ static void InstallCustomCallGenerator(Handle<JSObject> holder,
}
void Genesis::InstallCustomCallGenerators() {
void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope;
#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name) \
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
global_context(), #holder_expr); \
const int id = CallStubCompiler::k##name##CallGenerator; \
InstallCustomCallGenerator(holder, #fun_name, id); \
BuiltinFunctionId id = k##name; \
InstallBuiltinFunctionId(holder, #fun_name, id); \
}
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
#undef INSTALL_CALL_GENERATOR
FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
#undef INSTALL_BUILTIN_ID
}

3
deps/v8/src/checks.h

@ -30,6 +30,7 @@
#include <string.h>
#include "../include/v8stdint.h"
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@ -231,6 +232,8 @@ static inline void CheckNonEqualsHelper(const char* file,
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
#define CHECK_LT(a, b) CHECK((a) < (b))
#define CHECK_LE(a, b) CHECK((a) <= (b))
// This is inspired by the static assertion facility in boost. This

16
deps/v8/src/code-stubs.h

@ -47,6 +47,7 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(TranscendentalCacheSSE2) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
@ -325,13 +326,24 @@ class FastCloneShallowArrayStub : public CodeStub {
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
enum Flags {
kNoFlags = 0,
kArgsInRegisters = 1 << 0
};
explicit InstanceofStub(Flags flags) : flags_(flags) { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
int MinorKey() { return 0; }
int MinorKey() { return args_in_registers() ? 1 : 0; }
bool args_in_registers() {
return (flags_ & kArgsInRegisters) != 0;
}
Flags flags_;
};

26
deps/v8/src/compiler.cc

@ -116,13 +116,26 @@ static bool AlwaysFullCompiler() {
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
if (!FLAG_trace_opt) return;
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
if (FLAG_trace_opt) {
PrintF("[optimizing: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
PrintF(" - took %0.3f ms]\n", ms);
}
if (FLAG_trace_opt_stats) {
static double compilation_time = 0.0;
static int compiled_functions = 0;
static int code_size = 0;
compilation_time += ms;
compiled_functions++;
code_size += function->shared()->SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
compiled_functions,
code_size,
compilation_time);
}
}
@ -461,7 +474,14 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
ScriptDataImpl* pre_data = input_pre_data;
if (pre_data == NULL
&& source_length >= FLAG_min_preparse_length) {
pre_data = ParserApi::PartialPreParse(source, NULL, extension);
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream, extension);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
pre_data = ParserApi::PartialPreParse(&stream, extension);
}
}
// Create a script object describing the script to be compiled.

85
deps/v8/src/d8.gyp

@ -0,0 +1,85 @@
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'targets': [
{
'target_name': 'd8',
'type': 'executable',
'dependencies': [
'd8_js2c#host',
'../tools/gyp/v8.gyp:v8',
],
'include_dirs+': [
'../src',
],
'defines': [
'ENABLE_DEBUGGER_SUPPORT',
],
'sources': [
'd8.cc',
'd8-debug.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
[ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'sources': [ 'd8-posix.cc', ]
}],
],
},
{
'target_name': 'd8_js2c',
'type': 'none',
'toolsets': ['host'],
'variables': {
'js_files': [
'd8.js',
],
},
'actions': [
{
'action_name': 'd8_js2c',
'inputs': [
'../tools/js2c.py',
'<@(js_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
],
'action': [
'python',
'../tools/js2c.py',
'<@(_outputs)',
'D8',
'<@(js_files)'
],
},
],
}
],
}

80
deps/v8/src/date.js

@ -81,12 +81,12 @@ function TimeFromYear(year) {
function InLeapYear(time) {
return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
return DAY(time) - DayFromYear(YearFromTime(time));
}
@ -114,9 +114,9 @@ function EquivalentTime(t) {
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
MONTH_FROM_TIME(t),
DATE_FROM_TIME(t));
var day = MakeDay(EquivalentYear(YearFromTime(t)),
MonthFromTime(t),
DateFromTime(t));
return MakeDate(day, TimeWithinDay(t));
}
@ -253,9 +253,6 @@ var ltcache = {
function LocalTimeNoCheck(time) {
var ltc = ltcache;
if (%_ObjectEquals(time, ltc.key)) return ltc.val;
if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
return $NaN;
}
// Inline the DST offset cache checks for speed.
// The cache is hit, or DaylightSavingsOffset is called,
@ -371,16 +368,21 @@ function MakeDay(year, month, date) {
// ECMA 262 - 15.9.1.13
function MakeDate(day, time) {
if (!$isFinite(day)) return $NaN;
if (!$isFinite(time)) return $NaN;
return day * msPerDay + time;
var time = day * msPerDay + time;
// Some of our runtime funtions for computing UTC(time) rely on
// times not being significantly larger than MAX_TIME_MS. If there
// is no way that the time can be within range even after UTC
// conversion we return NaN immediately instead of relying on
// TimeClip to do it.
if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
return time;
}
// ECMA 262 - 15.9.1.14
function TimeClip(time) {
if (!$isFinite(time)) return $NaN;
if ($abs(time) > 8.64E15) return $NaN;
if ($abs(time) > MAX_TIME_MS) return $NaN;
return TO_INTEGER(time);
}
@ -424,7 +426,7 @@ var Date_cache = {
value = DateParse(year);
if (!NUMBER_IS_NAN(value)) {
cache.time = value;
cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
cache.year = YearFromTime(LocalTimeNoCheck(value));
cache.string = year;
}
}
@ -642,7 +644,7 @@ function DateGetFullYear() {
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
return YEAR_FROM_TIME(LocalTimeNoCheck(t));
return YearFromTime(LocalTimeNoCheck(t));
}
@ -650,7 +652,7 @@ function DateGetFullYear() {
function DateGetUTCFullYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return YEAR_FROM_TIME(t);
return YearFromTime(t);
}
@ -658,7 +660,7 @@ function DateGetUTCFullYear() {
function DateGetMonth() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(LocalTimeNoCheck(t));
return MonthFromTime(LocalTimeNoCheck(t));
}
@ -666,7 +668,7 @@ function DateGetMonth() {
function DateGetUTCMonth() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return MONTH_FROM_TIME(t);
return MonthFromTime(t);
}
@ -674,7 +676,7 @@ function DateGetUTCMonth() {
function DateGetDate() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return t;
return DATE_FROM_TIME(LocalTimeNoCheck(t));
return DateFromTime(LocalTimeNoCheck(t));
}
@ -869,7 +871,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
function DateSetDate(date) {
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -878,7 +880,7 @@ function DateSetDate(date) {
function DateSetUTCDate(date) {
var t = DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -888,7 +890,7 @@ function DateSetMonth(month, date) {
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
var day = MakeDay(YearFromTime(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -898,7 +900,7 @@ function DateSetUTCMonth(month, date) {
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
var day = MakeDay(YearFromTime(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -909,8 +911,8 @@ function DateSetFullYear(year, month, date) {
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
date = argc < 3 ? DateFromTime(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -922,8 +924,8 @@ function DateSetUTCFullYear(year, month, date) {
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
date = argc < 3 ? DateFromTime(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -935,9 +937,9 @@ function DateToUTCString() {
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
+ TwoDigitString(DATE_FROM_TIME(t)) + ' '
+ Months[MONTH_FROM_TIME(t)] + ' '
+ YEAR_FROM_TIME(t) + ' '
+ TwoDigitString(DateFromTime(t)) + ' '
+ Months[MonthFromTime(t)] + ' '
+ YearFromTime(t) + ' '
+ TimeString(t) + ' GMT';
}
@ -946,7 +948,7 @@ function DateToUTCString() {
function DateGetYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
return YearFromTime(LocalTimeNoCheck(t)) - 1900;
}
@ -958,7 +960,7 @@ function DateSetYear(year) {
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -984,16 +986,24 @@ function PadInt(n, digits) {
function DateToISOString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
'-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
return this.getUTCFullYear() +
'-' + PadInt(this.getUTCMonth() + 1, 2) +
'-' + PadInt(this.getUTCDate(), 2) +
'T' + PadInt(this.getUTCHours(), 2) +
':' + PadInt(this.getUTCMinutes(), 2) +
':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
}
function DateToJSON(key) {
return CheckJSONPrimitive(this.toISOString());
var o = ToObject(this);
var tv = DefaultNumber(o);
if (IS_NUMBER(tv) && !$isFinite(tv)) {
return null;
}
return o.toISOString();
}

36
deps/v8/src/debug-debugger.js

@ -858,6 +858,7 @@ Debug.debuggerFlags = function() {
return debugger_flags;
};
Debug.MakeMirror = MakeMirror;
function MakeExecutionState(break_id) {
return new ExecutionState(break_id);
@ -876,9 +877,11 @@ ExecutionState.prototype.prepareStep = function(opt_action, opt_count) {
return %PrepareStep(this.break_id, action, count);
}
ExecutionState.prototype.evaluateGlobal = function(source, disable_break) {
return MakeMirror(
%DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break)));
ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
opt_additional_context) {
return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
Boolean(disable_break),
opt_additional_context));
};
ExecutionState.prototype.frameCount = function() {
@ -1837,6 +1840,7 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
var frame = request.arguments.frame;
var global = request.arguments.global;
var disable_break = request.arguments.disable_break;
var additional_context = request.arguments.additional_context;
// The expression argument could be an integer so we convert it to a
// string.
@ -1851,11 +1855,29 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
return response.failed('Arguments "frame" and "global" are exclusive');
}
var additional_context_object;
if (additional_context) {
additional_context_object = {};
for (var i = 0; i < additional_context.length; i++) {
var mapping = additional_context[i];
if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
return response.failed("Context element #" + i +
" must contain name:string and handle:number");
}
var context_value_mirror = LookupMirror(mapping.handle);
if (!context_value_mirror) {
return response.failed("Context object '" + mapping.name +
"' #" + mapping.handle + "# not found");
}
additional_context_object[mapping.name] = context_value_mirror.value();
}
}
// Global evaluate.
if (global) {
// Evaluate in the global context.
response.body =
this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
response.body = this.exec_state_.evaluateGlobal(
expression, Boolean(disable_break), additional_context_object);
return;
}
@ -1877,12 +1899,12 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
}
// Evaluate in the specified frame.
response.body = this.exec_state_.frame(frame_number).evaluate(
expression, Boolean(disable_break));
expression, Boolean(disable_break), additional_context_object);
return;
} else {
// Evaluate in the selected frame.
response.body = this.exec_state_.frame().evaluate(
expression, Boolean(disable_break));
expression, Boolean(disable_break), additional_context_object);
return;
}
};

1
deps/v8/src/execution.cc

@ -720,7 +720,6 @@ MaybeObject* Execution::HandleStackGuardInterrupt() {
return Top::TerminateExecution();
}
if (StackGuard::IsInterrupted()) {
// interrupt
StackGuard::Continue(INTERRUPT);
return Top::StackOverflow();
}

263
deps/v8/src/extensions/experimental/i18n-extension.cc

@ -0,0 +1,263 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "i18n-extension.h"
#include <algorithm>
#include <string>
#include "unicode/locid.h"
#include "unicode/uloc.h"
namespace v8 {
namespace internal {
I18NExtension* I18NExtension::extension_ = NULL;
// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
const char* const I18NExtension::kSource =
"Locale = function(optLocale) {"
" native function NativeJSLocale();"
" var properties = NativeJSLocale(optLocale);"
" this.locale = properties.locale;"
" this.language = properties.language;"
" this.script = properties.script;"
" this.region = properties.region;"
"};"
"Locale.availableLocales = function() {"
" native function NativeJSAvailableLocales();"
" return NativeJSAvailableLocales();"
"};"
"Locale.prototype.maximizedLocale = function() {"
" native function NativeJSMaximizedLocale();"
" return new Locale(NativeJSMaximizedLocale(this.locale));"
"};"
"Locale.prototype.minimizedLocale = function() {"
" native function NativeJSMinimizedLocale();"
" return new Locale(NativeJSMinimizedLocale(this.locale));"
"};"
"Locale.prototype.displayLocale_ = function(displayLocale) {"
" var result = this.locale;"
" if (displayLocale !== undefined) {"
" result = displayLocale.locale;"
" }"
" return result;"
"};"
"Locale.prototype.displayLanguage = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayLanguage();"
" return NativeJSDisplayLanguage(this.locale, displayLocale);"
"};"
"Locale.prototype.displayScript = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayScript();"
" return NativeJSDisplayScript(this.locale, displayLocale);"
"};"
"Locale.prototype.displayRegion = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayRegion();"
" return NativeJSDisplayRegion(this.locale, displayLocale);"
"};"
"Locale.prototype.displayName = function(optDisplayLocale) {"
" var displayLocale = this.displayLocale_(optDisplayLocale);"
" native function NativeJSDisplayName();"
" return NativeJSDisplayName(this.locale, displayLocale);"
"};";
v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
v8::Handle<v8::String> name) {
if (name->Equals(v8::String::New("NativeJSLocale"))) {
return v8::FunctionTemplate::New(JSLocale);
} else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
return v8::FunctionTemplate::New(JSAvailableLocales);
} else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
return v8::FunctionTemplate::New(JSMaximizedLocale);
} else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
return v8::FunctionTemplate::New(JSMinimizedLocale);
} else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
return v8::FunctionTemplate::New(JSDisplayLanguage);
} else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
return v8::FunctionTemplate::New(JSDisplayScript);
} else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
return v8::FunctionTemplate::New(JSDisplayRegion);
} else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
return v8::FunctionTemplate::New(JSDisplayName);
}
return v8::Handle<v8::FunctionTemplate>();
}
v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
// TODO(cira): Fetch browser locale. Accept en-US as good default for now.
// We could possibly pass browser locale as a parameter in the constructor.
std::string locale_name("en-US");
if (args.Length() == 1 && args[0]->IsString()) {
locale_name = *v8::String::Utf8Value(args[0]->ToString());
}
v8::Local<v8::Object> locale = v8::Object::New();
locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
icu::Locale icu_locale(locale_name.c_str());
const char* language = icu_locale.getLanguage();
locale->Set(v8::String::New("language"), v8::String::New(language));
const char* script = icu_locale.getScript();
if (strlen(script)) {
locale->Set(v8::String::New("script"), v8::String::New(script));
}
const char* region = icu_locale.getCountry();
if (strlen(region)) {
locale->Set(v8::String::New("region"), v8::String::New(region));
}
return locale;
}
// TODO(cira): Filter out locales that Chrome doesn't support.
v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
const v8::Arguments& args) {
v8::Local<v8::Array> all_locales = v8::Array::New();
int count = 0;
const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
for (int i = 0; i < count; ++i) {
all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
}
return all_locales;
}
// Use - as tag separator, not _ that ICU uses.
static std::string NormalizeLocale(const std::string& locale) {
std::string result(locale);
// TODO(cira): remove STL dependency.
std::replace(result.begin(), result.end(), '_', '-');
return result;
}
v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
const v8::Arguments& args) {
if (!args.Length() || !args[0]->IsString()) {
return v8::Undefined();
}
UErrorCode status = U_ZERO_ERROR;
std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
char max_locale[ULOC_FULLNAME_CAPACITY];
uloc_addLikelySubtags(locale_name.c_str(), max_locale,
sizeof(max_locale), &status);
if (U_FAILURE(status)) {
return v8::Undefined();
}
return v8::String::New(NormalizeLocale(max_locale).c_str());
}
v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
const v8::Arguments& args) {
if (!args.Length() || !args[0]->IsString()) {
return v8::Undefined();
}
UErrorCode status = U_ZERO_ERROR;
std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
char min_locale[ULOC_FULLNAME_CAPACITY];
uloc_minimizeSubtags(locale_name.c_str(), min_locale,
sizeof(min_locale), &status);
if (U_FAILURE(status)) {
return v8::Undefined();
}
return v8::String::New(NormalizeLocale(min_locale).c_str());
}
// Common code for JSDisplayXXX methods.
static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
const std::string& item) {
if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
return v8::Undefined();
}
std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
icu::Locale icu_locale(base_locale.c_str());
icu::Locale display_locale =
icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
UnicodeString result;
if (item == "language") {
icu_locale.getDisplayLanguage(display_locale, result);
} else if (item == "script") {
icu_locale.getDisplayScript(display_locale, result);
} else if (item == "region") {
icu_locale.getDisplayCountry(display_locale, result);
} else if (item == "name") {
icu_locale.getDisplayName(display_locale, result);
} else {
return v8::Undefined();
}
if (result.length()) {
return v8::String::New(
reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
}
return v8::Undefined();
}
v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
const v8::Arguments& args) {
return GetDisplayItem(args, "language");
}
v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
const v8::Arguments& args) {
return GetDisplayItem(args, "script");
}
v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
const v8::Arguments& args) {
return GetDisplayItem(args, "region");
}
v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
return GetDisplayItem(args, "name");
}
I18NExtension* I18NExtension::get() {
if (!extension_) {
extension_ = new I18NExtension();
}
return extension_;
}
void I18NExtension::Register() {
static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
}
} } // namespace v8::internal

64
deps/v8/src/extensions/experimental/i18n-extension.h

@ -0,0 +1,64 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
#include <v8.h>
namespace v8 {
namespace internal {
class I18NExtension : public v8::Extension {
public:
I18NExtension() : v8::Extension("v8/i18n", kSource) {}
virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
v8::Handle<v8::String> name);
// Implementations of window.Locale methods.
static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
// V8 code prefers Register, while Chrome and WebKit use get kind of methods.
static void Register();
static I18NExtension* get();
private:
static const char* const kSource;
static I18NExtension* extension_;
};
} } // namespace v8::internal
#endif // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_

4
deps/v8/src/flag-definitions.h

@ -141,6 +141,7 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
@ -194,6 +195,7 @@ DEFINE_bool(mask_constants_with_cookie,
// codegen.cc
DEFINE_bool(lazy, true, "use lazy compilation")
DEFINE_bool(trace_opt, false, "trace lazy optimization")
DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
DEFINE_bool(always_opt, false, "always try to optimize functions")
@ -456,8 +458,6 @@ DEFINE_bool(log_snapshot_positions, false,
"log positions of (de)serialized objects in the snapshot.")
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
DEFINE_bool(compress_log, false,
"Compress log to save space (makes log less human-readable).")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true,

35
deps/v8/src/full-codegen.cc

@ -761,6 +761,7 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
context()->EmitLogicalLeft(expr, &eval_right, &done);
PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
__ bind(&eval_right);
if (context()->IsTest()) ForwardBailoutToChild(expr);
context()->HandleExpression(expr->right());
@ -925,16 +926,21 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
if (stmt->HasElseStatement()) {
VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
__ jmp(&done);
PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
__ bind(&else_part);
Visit(stmt->else_statement());
} else {
VisitForControl(stmt->condition(), &then_part, &done, &then_part);
PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
__ bind(&then_part);
Visit(stmt->then_statement());
PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
}
__ bind(&done);
PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
@ -946,6 +952,11 @@ void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
// When continuing, we clobber the unpredictable value in the accumulator
// with one that's safe for GC. If we hit an exit from the try block of
// try...finally on our way out, we will unconditionally preserve the
// accumulator on the stack.
ClearAccumulator();
while (!current->IsContinueTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
@ -962,6 +973,11 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
// When breaking, we clobber the unpredictable value in the accumulator
// with one that's safe for GC. If we hit an exit from the try block of
// try...finally on our way out, we will unconditionally preserve the
// accumulator on the stack.
ClearAccumulator();
while (!current->IsBreakTarget(stmt->target())) {
stack_depth = current->Exit(stack_depth);
current = current->outer();
@ -1043,12 +1059,13 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
&stack_check);
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
EmitStackCheck(stmt);
__ jmp(&body);
__ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_target());
decrement_loop_depth();
}
@ -1063,6 +1080,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
// Emit the test at the bottom of the loop.
__ jmp(&test);
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
@ -1080,8 +1098,8 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
loop_statement.break_target(),
loop_statement.break_target());
__ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_target());
decrement_loop_depth();
}
@ -1099,12 +1117,12 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&body);
Visit(stmt->body());
__ bind(loop_statement.continue_target());
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
__ bind(loop_statement.continue_target());
SetStatementPosition(stmt);
if (stmt->next() != NULL) {
Visit(stmt->next());
@ -1127,8 +1145,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ jmp(&body);
}
__ bind(loop_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ bind(loop_statement.break_target());
decrement_loop_depth();
}
@ -1235,7 +1253,10 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Visit(stmt->try_block());
__ PopTryHandler();
}
// Execute the finally block on the way out.
// Execute the finally block on the way out. Clobber the unpredictable
// value in the accumulator with one that's safe for GC. The finally
// block will unconditionally preserve the accumulator on the stack.
ClearAccumulator();
__ Call(&finally_entry);
}
@ -1256,6 +1277,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
Label true_case, false_case, done;
VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression(),
expr->then_expression_position());
@ -1270,6 +1292,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
__ jmp(&done);
}
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
if (context()->IsTest()) ForwardBailoutToChild(expr);
SetExpressionPosition(expr->else_expression(),

13
deps/v8/src/full-codegen.h

@ -38,6 +38,9 @@
namespace v8 {
namespace internal {
// Forward declarations.
class JumpPatchSite;
// AST node visitor which can tell whether a given statement will be breakable
// when the code is compiled by the full compiler in the debugger. This means
// that there will be an IC (load/store/call) in the code generated for the
@ -283,6 +286,10 @@ class FullCodeGenerator: public AstVisitor {
static const InlineFunctionGenerator kInlineFunctionGenerators[];
// A platform-specific utility to overwrite the accumulator register
// with a GC-safe value.
void ClearAccumulator();
// Compute the frame pointer relative offset for a given local or
// parameter slot.
int SlotOffset(Slot* slot);
@ -481,7 +488,7 @@ class FullCodeGenerator: public AstVisitor {
// Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator.
void EmitAssignment(Expression* expr);
void EmitAssignment(Expression* expr, int bailout_ast_id);
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
@ -533,6 +540,10 @@ class FullCodeGenerator: public AstVisitor {
// Helper for calling an IC stub.
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// indicates no inlined smi code and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative
// offsets defined in, e.g., StandardFrameConstants.
void StoreToFrameField(int frame_offset, Register value);

2
deps/v8/src/globals.h

@ -28,6 +28,8 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
#include "../include/v8stdint.h"
namespace v8 {
namespace internal {

2
deps/v8/src/heap-inl.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2006-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:

35
deps/v8/src/heap-profiler.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2009-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -348,27 +348,34 @@ void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control) {
ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name, type);
return singleton_->TakeSnapshotImpl(name, type, control);
}
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
int type,
v8::ActivityControl* control) {
ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name, type);
return singleton_->TakeSnapshotImpl(name, type, control);
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
int type,
v8::ActivityControl* control) {
Heap::CollectAllGarbage(true);
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
}
case HeapSnapshot::kAggregated: {
@ -381,13 +388,19 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
default:
UNREACHABLE();
}
snapshots_->SnapshotGenerationFinished();
if (!generation_completed) {
delete result;
result = NULL;
}
snapshots_->SnapshotGenerationFinished(result);
return result;
}
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
return TakeSnapshotImpl(snapshots_->GetName(name), type);
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control) {
return TakeSnapshotImpl(snapshots_->GetName(name), type, control);
}

18
deps/v8/src/heap-profiler.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2009-2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -56,8 +56,12 @@ class HeapProfiler {
static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name, int type);
static HeapSnapshot* TakeSnapshot(String* name, int type);
static HeapSnapshot* TakeSnapshot(const char* name,
int type,
v8::ActivityControl* control);
static HeapSnapshot* TakeSnapshot(String* name,
int type,
v8::ActivityControl* control);
static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid);
@ -75,8 +79,12 @@ class HeapProfiler {
private:
HeapProfiler();
~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
HeapSnapshot* TakeSnapshotImpl(String* name, int type);
HeapSnapshot* TakeSnapshotImpl(const char* name,
int type,
v8::ActivityControl* control);
HeapSnapshot* TakeSnapshotImpl(String* name,
int type,
v8::ActivityControl* control);
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;

18
deps/v8/src/heap.cc

@ -3757,14 +3757,21 @@ bool Heap::IdleNotification() {
static const int kIdlesBeforeScavenge = 4;
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
static const int kGCsBetweenCleanup = 4;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
bool uncommit = true;
bool finished = false;
if (last_gc_count == gc_count_) {
number_idle_notifications++;
// Reset the number of idle notifications received when a number of
// GCs have taken place. This allows another round of cleanup based
// on idle notifications if enough work has been carried out to
// provoke a number of garbage collections.
if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
number_idle_notifications =
Min(number_idle_notifications + 1, kMaxIdleCount);
} else {
number_idle_notifications = 0;
last_gc_count = gc_count_;
@ -3779,7 +3786,6 @@ bool Heap::IdleNotification() {
}
new_space_.Shrink();
last_gc_count = gc_count_;
} else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
// Before doing the mark-sweep collections we clear the
// compilation cache to avoid hanging on to source code and
@ -3794,7 +3800,6 @@ bool Heap::IdleNotification() {
CollectAllGarbage(true);
new_space_.Shrink();
last_gc_count = gc_count_;
number_idle_notifications = 0;
finished = true;
} else if (contexts_disposed_ > 0) {
@ -3813,6 +3818,11 @@ bool Heap::IdleNotification() {
number_idle_notifications = 0;
uncommit = false;
}
} else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
// If we have received more than kIdlesBeforeMarkCompact idle
// notifications we do not perform any cleanup because we don't
// expect to gain much by doing so.
finished = true;
}
// Make sure that we have no pending context disposals and

7
deps/v8/src/heap.h

@ -1119,9 +1119,9 @@ class Heap : public AllStatic {
static int contexts_disposed_;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
static const int kMaxObjectSizeInNewSpace = 1024*KB;
#else
static const int kMaxObjectSizeInNewSpace = 256*KB;
static const int kMaxObjectSizeInNewSpace = 512*KB;
#endif
static NewSpace new_space_;
@ -2054,8 +2054,9 @@ class TranscendentalCache {
// Allow access to the caches_ array as an ExternalReference.
friend class ExternalReference;
// Inline implementation of the caching.
// Inline implementation of the cache.
friend class TranscendentalCacheStub;
friend class TranscendentalCacheSSE2Stub;
static TranscendentalCache* caches_[kNumberOfCaches];
Element elements_[kCacheSize];

101
deps/v8/src/hydrogen-instructions.cc

@ -64,69 +64,34 @@ const char* Representation::Mnemonic() const {
}
static int32_t AddAssertNoOverflow(int32_t a, int32_t b) {
ASSERT(static_cast<int64_t>(a + b) == (static_cast<int64_t>(a) +
static_cast<int64_t>(b)));
return a + b;
}
static int32_t SubAssertNoOverflow(int32_t a, int32_t b) {
ASSERT(static_cast<int64_t>(a - b) == (static_cast<int64_t>(a) -
static_cast<int64_t>(b)));
return a - b;
}
static int32_t MulAssertNoOverflow(int32_t a, int32_t b) {
ASSERT(static_cast<int64_t>(a * b) == (static_cast<int64_t>(a) *
static_cast<int64_t>(b)));
return a * b;
}
static int32_t AddWithoutOverflow(int32_t a, int32_t b) {
if (b > 0) {
if (a <= kMaxInt - b) return AddAssertNoOverflow(a, b);
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
return kMaxInt;
} else {
if (a >= kMinInt - b) return AddAssertNoOverflow(a, b);
}
if (result < kMinInt) {
*overflow = true;
return kMinInt;
}
return static_cast<int32_t>(result);
}
static int32_t SubWithoutOverflow(int32_t a, int32_t b) {
if (b < 0) {
if (a <= kMaxInt + b) return SubAssertNoOverflow(a, b);
return kMaxInt;
} else {
if (a >= kMinInt + b) return SubAssertNoOverflow(a, b);
return kMinInt;
}
static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
return ConvertAndSetOverflow(result, overflow);
}
static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
if (b == 0 || a == 0) return 0;
if (a == 1) return b;
if (b == 1) return a;
int sign = 1;
if ((a < 0 && b > 0) || (a > 0 && b < 0)) sign = -1;
if (a < 0) a = -a;
if (b < 0) b = -b;
static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
return ConvertAndSetOverflow(result, overflow);
}
if (kMaxInt / b > a && a != kMinInt && b != kMinInt) {
return MulAssertNoOverflow(a, b) * sign;
}
*overflow = true;
if (sign == 1) {
return kMaxInt;
} else {
return kMinInt;
}
static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
return ConvertAndSetOverflow(result, overflow);
}
@ -143,39 +108,32 @@ int32_t Range::Mask() const {
}
void Range::Add(int32_t value) {
void Range::AddConstant(int32_t value) {
if (value == 0) return;
lower_ = AddWithoutOverflow(lower_, value);
upper_ = AddWithoutOverflow(upper_, value);
bool may_overflow = false; // Overflow is ignored here.
lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
Verify();
}
// Returns whether the add may overflow.
bool Range::AddAndCheckOverflow(Range* other) {
int old_lower = lower_;
int old_upper = upper_;
lower_ = AddWithoutOverflow(lower_, other->lower());
upper_ = AddWithoutOverflow(upper_, other->upper());
bool r = (old_lower + other->lower() != lower_ ||
old_upper + other->upper() != upper_);
bool may_overflow = false;
lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
KeepOrder();
Verify();
return r;
return may_overflow;
}
// Returns whether the sub may overflow.
bool Range::SubAndCheckOverflow(Range* other) {
int old_lower = lower_;
int old_upper = upper_;
lower_ = SubWithoutOverflow(lower_, other->lower());
upper_ = SubWithoutOverflow(upper_, other->upper());
bool r = (old_lower - other->lower() != lower_ ||
old_upper - other->upper() != upper_);
bool may_overflow = false;
lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
KeepOrder();
Verify();
return r;
return may_overflow;
}
@ -193,7 +151,6 @@ void Range::Verify() const {
}
// Returns whether the mul may overflow.
bool Range::MulAndCheckOverflow(Range* other) {
bool may_overflow = false;
int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);

74
deps/v8/src/hydrogen-instructions.h

@ -77,6 +77,7 @@ class LChunkBuilder;
// HLoadKeyedFastElement
// HLoadKeyedGeneric
// HLoadNamedGeneric
// HPower
// HStoreNamed
// HStoreNamedField
// HStoreNamedGeneric
@ -93,13 +94,13 @@ class LChunkBuilder;
// HCallStub
// HConstant
// HControlInstruction
// HDeoptimize
// HGoto
// HUnaryControlInstruction
// HBranch
// HCompareMapAndBranch
// HReturn
// HThrow
// HDeoptimize
// HEnterInlined
// HFunctionLiteral
// HGlobalObject
@ -139,6 +140,7 @@ class LChunkBuilder;
// HHasCachedArrayIndex
// HHasInstanceType
// HIsNull
// HIsObject
// HIsSmi
// HValueOf
// HUnknownOSRValue
@ -207,6 +209,7 @@ class LChunkBuilder;
V(Goto) \
V(InstanceOf) \
V(IsNull) \
V(IsObject) \
V(IsSmi) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
@ -223,6 +226,7 @@ class LChunkBuilder;
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -330,6 +334,9 @@ class Range: public ZoneObject {
set_can_be_minus_zero(false);
}
// Adds a constant to the lower and upper bound of the range.
void AddConstant(int32_t value);
void StackUpon(Range* other) {
Intersect(other);
next_ = other;
@ -349,7 +356,8 @@ class Range: public ZoneObject {
set_can_be_minus_zero(b);
}
void Add(int32_t value);
// Compute a new result range and return true, if the operation
// can overflow.
bool AddAndCheckOverflow(Range* other);
bool SubAndCheckOverflow(Range* other);
bool MulAndCheckOverflow(Range* other);
@ -1364,7 +1372,7 @@ class HBitNot: public HUnaryOperation {
class HUnaryMathOperation: public HUnaryOperation {
public:
HUnaryMathOperation(HValue* value, MathFunctionId op)
HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
: HUnaryOperation(value), op_(op) {
switch (op) {
case kMathFloor:
@ -1377,8 +1385,12 @@ class HUnaryMathOperation: public HUnaryOperation {
SetFlag(kFlexibleRepresentation);
break;
case kMathSqrt:
default:
case kMathPowHalf:
case kMathLog:
set_representation(Representation::Double());
break;
default:
UNREACHABLE();
}
SetFlag(kUseGVN);
}
@ -1395,6 +1407,8 @@ class HUnaryMathOperation: public HUnaryOperation {
case kMathRound:
case kMathCeil:
case kMathSqrt:
case kMathPowHalf:
case kMathLog:
return Representation::Double();
break;
case kMathAbs:
@ -1415,13 +1429,19 @@ class HUnaryMathOperation: public HUnaryOperation {
return this;
}
MathFunctionId op() const { return op_; }
BuiltinFunctionId op() const { return op_; }
const char* OpName() const;
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
protected:
virtual bool DataEquals(HValue* other) const {
HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
return op_ == b->op();
}
private:
MathFunctionId op_;
BuiltinFunctionId op_;
};
@ -2087,11 +2107,25 @@ class HIsNull: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
protected:
virtual bool DataEquals(HValue* other) const {
HIsNull* b = HIsNull::cast(other);
return is_strict_ == b->is_strict();
}
private:
bool is_strict_;
};
class HIsObject: public HUnaryPredicate {
public:
explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
};
class HIsSmi: public HUnaryPredicate {
public:
explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
@ -2116,6 +2150,12 @@ class HHasInstanceType: public HUnaryPredicate {
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
protected:
virtual bool DataEquals(HValue* other) const {
HHasInstanceType* b = HHasInstanceType::cast(other);
return (from_ == b->from()) && (to_ == b->to());
}
private:
InstanceType from_;
InstanceType to_; // Inclusive range, not all combinations work.
@ -2141,6 +2181,12 @@ class HClassOfTest: public HUnaryPredicate {
Handle<String> class_name() const { return class_name_; }
protected:
virtual bool DataEquals(HValue* other) const {
HClassOfTest* b = HClassOfTest::cast(other);
return class_name_.is_identical_to(b->class_name_);
}
private:
Handle<String> class_name_;
};
@ -2184,6 +2230,22 @@ class HInstanceOf: public HBinaryOperation {
};
class HPower: public HBinaryOperation {
public:
HPower(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
set_representation(Representation::Double());
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return (index == 1) ? Representation::None() : Representation::Double();
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
};
class HAdd: public HArithmeticBinaryOperation {
public:
HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {

1162
deps/v8/src/hydrogen.cc

File diff suppressed because it is too large

119
deps/v8/src/hydrogen.h

@ -136,14 +136,6 @@ class HBasicBlock: public ZoneObject {
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
// If this block is a successor of a branch, his flags tells whether the
// preceding branch was inverted or not.
bool inverted() { return inverted_; }
void set_inverted(bool b) { inverted_ = b; }
HBasicBlock* deopt_predecessor() { return deopt_predecessor_; }
void set_deopt_predecessor(HBasicBlock* block) { deopt_predecessor_ = block; }
Handle<Object> cond() { return cond_; }
void set_cond(Handle<Object> value) { cond_ = value; }
@ -176,8 +168,6 @@ class HBasicBlock: public ZoneObject {
ZoneList<int> deleted_phis_;
SetOncePointer<HBasicBlock> parent_loop_header_;
bool is_inline_return_target_;
bool inverted_;
HBasicBlock* deopt_predecessor_;
Handle<Object> cond_;
};
@ -557,10 +547,29 @@ class AstContext {
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
// 'Fill' this context with a hydrogen value. The value is assumed to
// have already been inserted in the instruction stream (or not need to
// be, e.g., HPhi). Call this function in tail position in the Visit
// functions for expressions.
virtual void ReturnValue(HValue* value) = 0;
// Add a hydrogen instruction to the instruction stream (recording an
// environment simulation if necessary) and then fill this context with
// the instruction as value.
virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
protected:
AstContext(HGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
HGraphBuilder* owner() const { return owner_; }
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
#ifdef DEBUG
int original_count_;
#endif
private:
HGraphBuilder* owner_;
Expression::Context kind_;
@ -573,6 +582,10 @@ class EffectContext: public AstContext {
explicit EffectContext(HGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
};
@ -581,6 +594,10 @@ class ValueContext: public AstContext {
explicit ValueContext(HGraphBuilder* owner)
: AstContext(owner, Expression::kValue) {
}
virtual ~ValueContext();
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
};
@ -588,16 +605,15 @@ class TestContext: public AstContext {
public:
TestContext(HGraphBuilder* owner,
HBasicBlock* if_true,
HBasicBlock* if_false,
bool invert_true,
bool invert_false)
HBasicBlock* if_false)
: AstContext(owner, Expression::kTest),
if_true_(if_true),
if_false_(if_false),
invert_true_(invert_true),
invert_false_(invert_false) {
if_false_(if_false) {
}
virtual void ReturnValue(HValue* value);
virtual void ReturnInstruction(HInstruction* instr, int ast_id);
static TestContext* cast(AstContext* context) {
ASSERT(context->IsTest());
return reinterpret_cast<TestContext*>(context);
@ -606,14 +622,13 @@ class TestContext: public AstContext {
HBasicBlock* if_true() const { return if_true_; }
HBasicBlock* if_false() const { return if_false_; }
bool invert_true() { return invert_true_; }
bool invert_false() { return invert_false_; }
private:
// Build the shared core part of the translation unpacking a value into
// control flow.
void BuildBranch(HValue* value);
HBasicBlock* if_true_;
HBasicBlock* if_false_;
bool invert_true_;
bool invert_false_;
};
@ -631,9 +646,25 @@ class HGraphBuilder: public AstVisitor {
HGraph* CreateGraph(CompilationInfo* info);
// Simple accessors.
HGraph* graph() const { return graph_; }
HSubgraph* subgraph() const { return current_subgraph_; }
HEnvironment* environment() const { return subgraph()->environment(); }
HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(int id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count);
typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count,
int ast_id);
// Forward declarations for inner scope classes.
class SubgraphScope;
@ -650,19 +681,14 @@ class HGraphBuilder: public AstVisitor {
// Simple accessors.
TypeFeedbackOracle* oracle() const { return oracle_; }
HGraph* graph() const { return graph_; }
HSubgraph* subgraph() const { return current_subgraph_; }
AstContext* ast_context() const { return ast_context_; }
void set_ast_context(AstContext* context) { ast_context_ = context; }
AstContext* call_context() const { return call_context_; }
HBasicBlock* function_return() const { return function_return_; }
HEnvironment* environment() const { return subgraph()->environment(); }
HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
// Generators for inline runtime functions.
#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
void Generate##Name(int argument_count);
void Generate##Name(int argument_count, int ast_id);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
@ -678,13 +704,7 @@ class HGraphBuilder: public AstVisitor {
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
void AddToSubgraph(HSubgraph* graph, Expression* expr);
void AddConditionToSubgraph(HSubgraph* subgraph,
Expression* expr,
HSubgraph* true_graph,
HSubgraph* false_graph);
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
HValue* Top() const { return environment()->Top(); }
void Drop(int n) { environment()->Drop(n); }
void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
@ -693,33 +713,21 @@ class HGraphBuilder: public AstVisitor {
void VisitForEffect(Expression* expr);
void VisitForControl(Expression* expr,
HBasicBlock* true_block,
HBasicBlock* false_block,
bool invert_true,
bool invert_false);
// Visit an expression in a 'condition' context, i.e., in a control
// context but not a subexpression of logical and, or, or not.
void VisitCondition(Expression* expr,
HBasicBlock* true_graph,
HBasicBlock* false_graph,
bool invert_true,
bool invert_false);
HBasicBlock* false_block);
// Visit an argument and wrap it in a PushArgument instruction.
HValue* VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
HInstruction* AddInstruction(HInstruction* instr);
void AddSimulate(int id);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
void PushAndAdd(HInstruction* instr, int position);
void PushArgumentsForStubCall(int argument_count);
// Initialize the arguments to the call based on then environment, add it
// to the graph, and drop the arguments from the environment.
void ProcessCall(HCall* call, int source_position);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
void ProcessCall(HCall* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@ -743,7 +751,7 @@ class HGraphBuilder: public AstVisitor {
FunctionLiteral* function);
// Helpers for flow graph construction.
void LookupGlobalPropertyCell(VariableProxy* expr,
void LookupGlobalPropertyCell(Variable* var,
LookupResult* lookup,
bool is_store);
@ -753,10 +761,11 @@ class HGraphBuilder: public AstVisitor {
bool TryMathFunctionInline(Call* expr);
void TraceInline(Handle<JSFunction> target, bool result);
void HandleGlobalVariableAssignment(VariableProxy* proxy,
void HandleGlobalVariableAssignment(Variable* var,
HValue* value,
int position);
void HandleGlobalVariableLoad(VariableProxy* expr);
int position,
int ast_id);
void HandlePropertyAssignment(Assignment* expr);
void HandleCompoundAssignment(Assignment* expr);
void HandlePolymorphicLoadNamedField(Property* expr,

41
deps/v8/src/ia32/assembler-ia32.cc

@ -2409,6 +2409,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2431,6 +2432,17 @@ void Assembler::movd(XMMRegister dst, const Operand& src) {
}
void Assembler::movd(const Operand& dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x7E);
emit_sse_operand(src, dst);
}
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@ -2465,7 +2477,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) {
}
void Assembler::psllq(XMMRegister reg, int8_t imm8) {
void Assembler::psllq(XMMRegister reg, int8_t shift) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2473,7 +2485,32 @@ void Assembler::psllq(XMMRegister reg, int8_t imm8) {
EMIT(0x0F);
EMIT(0x73);
emit_sse_operand(esi, reg); // esi == 6
EMIT(imm8);
EMIT(shift);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x70);
emit_sse_operand(dst, src);
EMIT(shuffle);
}
void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x16);
emit_sse_operand(src, dst);
EMIT(offset);
}

14
deps/v8/src/ia32/assembler-ia32.h

@ -571,6 +571,15 @@ class Assembler : public Malloced {
static const byte kTestEaxByte = 0xA9;
// One byte opcode for test al, 0xXX.
static const byte kTestAlByte = 0xA8;
// One byte opcode for nop.
static const byte kNopByte = 0x90;
// One byte opcode for a short unconditional jump.
static const byte kJmpShortOpcode = 0xEB;
// One byte prefix for a short conditional jump.
static const byte kJccShortPrefix = 0x70;
static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
static const byte kJcShortOpcode = kJccShortPrefix | carry;
// ---------------------------------------------------------------------------
// Code generation
@ -905,13 +914,16 @@ class Assembler : public Malloced {
void movdbl(const Operand& dst, XMMRegister src);
void movd(XMMRegister dst, const Operand& src);
void movd(const Operand& src, XMMRegister dst);
void movsd(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t imm8);
void psllq(XMMRegister reg, int8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);

1
deps/v8/src/ia32/builtins-ia32.cc

@ -29,7 +29,6 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "code-stubs.h"
#include "codegen-inl.h"
#include "deoptimizer.h"
#include "full-codegen.h"

274
deps/v8/src/ia32/code-stubs-ia32.cc

@ -2683,6 +2683,145 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
}
void TranscendentalCacheSSE2Stub::Generate(MacroAssembler* masm) {
// Input on stack:
// esp[0]: return address.
// Input in registers:
// xmm1: untagged double input argument.
// Output:
// xmm1: untagged double result.
Label skip_cache;
Label call_runtime;
// Input is an untagged double in xmm1.
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
__ movd(Operand(edx), xmm0);
}
__ movd(Operand(ebx), xmm1);
// xmm1 = double value
// ebx = low 32 bits of double value
// edx = high 32 bits of double value
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
__ xor_(ecx, Operand(edx));
__ mov(eax, ecx);
__ sar(eax, 16);
__ xor_(ecx, Operand(eax));
__ mov(eax, ecx);
__ sar(eax, 8);
__ xor_(ecx, Operand(eax));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
__ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
// xmm1 = double value.
// ebx = low 32 bits of double value.
// edx = high 32 bits of double value.
// ecx = TranscendentalCache::hash(double value).
__ mov(eax,
Immediate(ExternalReference::transcendental_cache_array_address()));
// Eax points to cache array.
__ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
__ test(eax, Operand(eax));
__ j(zero, &call_runtime);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
{ TranscendentalCache::Element test_elem[2];
char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
CHECK_EQ(0, elem_in0 - elem_start);
CHECK_EQ(kIntSize, elem_in1 - elem_start);
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
}
#endif
// Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
__ lea(ecx, Operand(ecx, ecx, times_2, 0));
__ lea(ecx, Operand(eax, ecx, times_4, 0));
// Check if cache matches: Double value is stored in uint32_t[2] array.
NearLabel cache_miss;
__ cmp(ebx, Operand(ecx, 0));
__ j(not_equal, &cache_miss);
__ cmp(edx, Operand(ecx, kIntSize));
__ j(not_equal, &cache_miss);
// Cache hit!
__ mov(eax, Operand(ecx, 2 * kIntSize));
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
__ bind(&cache_miss);
// Update cache with new value.
// We are short on registers, so use no_reg as scratch.
// This gives slightly larger code.
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
GenerateOperation(masm);
__ mov(Operand(ecx, 0), ebx);
__ mov(Operand(ecx, kIntSize), edx);
__ mov(Operand(ecx, 2 * kIntSize), eax);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
__ bind(&skip_cache);
__ sub(Operand(esp), Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ Ret();
__ bind(&call_runtime);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
__ EnterInternalFrame();
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
__ LeaveInternalFrame();
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
Runtime::FunctionId TranscendentalCacheSSE2Stub::RuntimeFunction() {
switch (type_) {
// Add more cases when necessary.
case TranscendentalCache::LOG: return Runtime::kMath_log;
default:
UNIMPLEMENTED();
return Runtime::kAbort;
}
}
void TranscendentalCacheSSE2Stub::GenerateOperation(MacroAssembler* masm) {
// Only free register is edi.
// Input value is on FP stack and in xmm1.
ASSERT(type_ == TranscendentalCache::LOG);
__ fldln2();
__ fxch();
__ fyl2x();
}
// Get the integer part of a heap number. Surprisingly, all this bit twiddling
// is faster than using the built-in instructions on floating point registers.
// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
@ -4901,76 +5040,125 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
void InstanceofStub::Generate(MacroAssembler* masm) {
// Get the object - go slow case if it's a smi.
Label slow;
__ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
// Fixed register usage throughout the stub.
Register object = eax; // Object (lhs).
Register map = ebx; // Map of the object.
Register function = edx; // Function (rhs).
Register prototype = edi; // Prototype of the function.
Register scratch = ecx;
// Check that the left hand is a JS object.
__ IsObjectJSObjectType(eax, eax, edx, &slow);
// Get the object and function - they are always both needed.
Label slow, not_js_object;
if (!args_in_registers()) {
__ mov(object, Operand(esp, 2 * kPointerSize));
__ mov(function, Operand(esp, 1 * kPointerSize));
}
// Get the prototype of the function.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
// edx is function, eax is map.
// Check that the left hand is a JS object.
__ test(object, Immediate(kSmiTagMask));
__ j(zero, &not_js_object, not_taken);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// Look up the function and the map in the instanceof cache.
NearLabel miss;
ExternalReference roots_address = ExternalReference::roots_address();
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
__ ret(2 * kPointerSize);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ IncrementCounter(&Counters::instance_of_cache, 1);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&miss);
__ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ test(ebx, Immediate(kSmiTagMask));
__ test(prototype, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
__ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
// Register mapping:
// eax is object map.
// edx is function.
// ebx is function prototype.
__ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
__ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
// Loop through the prototype chain looking for the function prototype.
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the golbal instanceof cache with the current map and function. The
// cached answer will be set when it is known.
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
function);
// Loop through the prototype chain of the object looking for the function
// prototype.
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
NearLabel loop, is_instance, is_not_instance;
__ bind(&loop);
__ cmp(ecx, Operand(ebx));
__ cmp(scratch, Operand(prototype));
__ j(equal, &is_instance);
__ cmp(Operand(ecx), Immediate(Factory::null_value()));
__ cmp(Operand(scratch), Immediate(Factory::null_value()));
__ j(equal, &is_not_instance);
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
__ IncrementCounter(&Counters::instance_of_stub_true, 1);
__ Set(eax, Immediate(0));
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ ret(2 * kPointerSize);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&is_not_instance);
__ IncrementCounter(&Counters::instance_of_stub_false, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
__ ret(2 * kPointerSize);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
__ test(function, Immediate(kSmiTagMask));
__ j(zero, &slow, not_taken);
__ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
__ j(not_equal, &slow, not_taken);
// Null is not instance of anything.
__ cmp(object, Factory::null_value());
__ j(not_equal, &object_not_null);
__ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &object_not_null_or_smi, not_taken);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow);
__ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
if (args_in_registers()) {
// Push arguments below return address.
__ pop(scratch);
__ push(object);
__ push(function);
__ push(scratch);
}
__ IncrementCounter(&Counters::instance_of_slow, 1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}

25
deps/v8/src/ia32/code-stubs-ia32.h

@ -45,6 +45,7 @@ class TranscendentalCacheStub: public CodeStub {
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
@ -52,6 +53,24 @@ class TranscendentalCacheStub: public CodeStub {
};
// Check the transcendental cache, or generate the result, using SSE2.
// The argument and result will be in xmm1.
// Only supports TranscendentalCache::LOG at this point.
class TranscendentalCacheSSE2Stub: public CodeStub {
public:
explicit TranscendentalCacheSSE2Stub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCacheSSE2; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm);
};
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@ -231,7 +250,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(int key,
TypeRecordingBinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
@ -239,8 +259,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
use_sse3_(SSE3Bits::decode(key)),
operands_type_(operands_type),
result_type_(result_type),
name_(NULL) {
}
name_(NULL) { }
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the

9
deps/v8/src/ia32/codegen-ia32.cc

@ -7676,6 +7676,13 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
__ test(tmp2.reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
// Check that both indices are valid.
__ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
__ cmp(tmp2.reg(), Operand(index1.reg()));
deferred->Branch(below_equal);
__ cmp(tmp2.reg(), Operand(index2.reg()));
deferred->Branch(below_equal);
// Bring addresses into index1 and index2.
__ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
__ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
@ -9133,7 +9140,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::INSTANCEOF: {
if (!left_already_loaded) Load(left);
Load(right);
InstanceofStub stub;
InstanceofStub stub(InstanceofStub::kNoFlags);
Result answer = frame_->CallStub(&stub, 2);
answer.ToRegister();
__ test(answer.reg(), Operand(answer.reg()));

68
deps/v8/src/ia32/disasm-ia32.cc

@ -1107,6 +1107,21 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else {
UnimplementedInstruction();
}
} else if (*data == 0x3A) {
data++;
if (*data == 0x16) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else {
UnimplementedInstruction();
}
} else if (*data == 0x2E || *data == 0x2F) {
const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
data++;
@ -1129,6 +1144,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x54) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("andpd %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
@ -1149,6 +1172,25 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else if (*data == 0x70) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pshufd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x73) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("psllq %s,%d",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
@ -1156,21 +1198,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xE7) {
AppendToBuffer("movntdq ");
} else if (*data == 0x7E) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movd ");
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("pxor %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0xDB) {
data++;
int mod, regop, rm;
@ -1179,20 +1213,18 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x73) {
} else if (*data == 0xE7) {
AppendToBuffer("movntdq ");
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("psllq %s,%d",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x54) {
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (*data == 0xEF) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("andpd %s,%s",
AppendToBuffer("pxor %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;

186
deps/v8/src/ia32/full-codegen-ia32.cc

@ -41,8 +41,61 @@
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
: masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(carry, target); // Never taken before patched.
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
ASSERT(is_int8(delta_to_patch_site));
__ test(eax, Immediate(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
#endif
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
// jc will be patched with jz, jnc will become jnz.
void EmitJump(Condition cc, NearLabel* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
ASSERT(cc == carry || cc == not_carry);
__ bind(&patch_site_);
__ j(cc, target);
}
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@ -198,6 +251,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
}
void FullCodeGenerator::ClearAccumulator() {
__ Set(eax, Immediate(Smi::FromInt(0)));
}
void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
Comment cmnt(masm_, "[ Stack check");
NearLabel ok;
@ -687,10 +745,9 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
@ -715,12 +772,13 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ mov(edx, Operand(esp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
__ mov(ecx, edx);
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case, not_taken);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
__ cmp(edx, Operand(eax));
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
@ -730,9 +788,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET);
EmitCallIC(ic, &patch_site);
__ test(eax, Operand(eax));
__ j(not_equal, &next_test);
@ -911,7 +968,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&update_each);
__ mov(result_register(), ebx);
// Perform the assignment as if via '='.
EmitAssignment(stmt->each());
{ EffectContext context(this);
EmitAssignment(stmt->each(), stmt->AssignmentId());
}
// Generate code for the body of the loop.
Visit(stmt->body());
@ -1478,7 +1537,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// For property compound assignments we need another deoptimization
// point after the property load.
if (property != NULL) {
PrepareForBailoutForId(expr->compound_bailout_id(), TOS_REG);
PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
}
Token::Value op = expr->binary_op();
@ -1521,6 +1580,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@ -1552,12 +1613,11 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub;
Label done;
NearLabel call_stub, done;
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &done);
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
@ -1570,7 +1630,8 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(eax);
}
@ -1580,7 +1641,7 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
NearLabel call_stub, done;
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
@ -1589,8 +1650,8 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
__ sub(Operand(eax), Immediate(value));
}
__ j(overflow, &call_stub);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &done);
__ bind(&call_stub);
if (left_is_constant_smi) {
@ -1603,7 +1664,8 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
}
Token::Value op = Token::SUB;
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(eax);
}
@ -1613,19 +1675,21 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label call_stub, smi_case, done;
NearLabel call_stub, smi_case, done;
int shift_value = value->value() & 0x1f;
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &smi_case);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
// Call stub.
__ bind(&call_stub);
__ mov(edx, eax);
__ mov(eax, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::SHL:
@ -1675,17 +1739,19 @@ void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label smi_case, done;
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &smi_case);
NearLabel smi_case, done;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
// The order of the arguments does not matter for bit-ops with a
// constant operand.
__ mov(edx, Immediate(value));
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::BIT_OR:
@ -1753,19 +1819,20 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
Label done, stub_call, smi_case;
NearLabel done, smi_case, stub_call;
__ pop(edx);
__ mov(ecx, eax);
__ or_(eax, Operand(edx));
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &smi_case);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case);
__ bind(&stub_call);
__ mov(eax, ecx);
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
__ mov(eax, edx); // Copy left operand in case of a stub call.
@ -1844,12 +1911,12 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(edx);
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), NULL); // NULL signals no inlined smi code.
context()->Plug(eax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
// Invalid left-hand sides are rewritten to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
@ -1897,6 +1964,8 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
}
PrepareForBailoutForId(bailout_ast_id, TOS_REG);
context()->Plug(eax);
}
@ -1969,8 +2038,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
__ bind(&done);
}
context()->Plug(eax);
}
@ -2007,10 +2074,10 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ push(Operand(esp, kPointerSize)); // Receiver is under value.
__ CallRuntime(Runtime::kToFastProperties, 1);
__ pop(eax);
context()->DropAndPlug(1, eax);
} else {
context()->Plug(eax);
__ Drop(1);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@ -2048,6 +2115,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
__ pop(eax);
}
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
@ -3103,6 +3171,13 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
__ test(temp, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
__ cmp(temp, Operand(index_1));
__ j(below_equal, &slow_case);
__ cmp(temp, Operand(index_2));
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
__ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
__ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
@ -3708,8 +3783,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Inline smi case if we are in a loop.
NearLabel stub_call;
Label done;
NearLabel stub_call, done;
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
@ -3719,8 +3795,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ j(overflow, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &done);
patch_site.EmitJumpIfSmi(eax, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
@ -3738,7 +3814,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(eax, Immediate(Smi::FromInt(1)));
TypeRecordingBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE);
__ CallStub(&stub);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in eax.
@ -3749,6 +3825,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context.Plug(eax);
}
// For all contexts except EffectContext We have the result on
// top of the stack.
@ -3759,6 +3837,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Perform the assignment as if via '='.
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(eax);
}
break;
case NAMED_PROPERTY: {
@ -3766,6 +3846,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
context()->PlugTOS();
@ -3780,6 +3861,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ pop(edx);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
// Result is on the stack
if (!context()->IsEffect()) {
@ -3957,7 +4039,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
InstanceofStub stub;
__ IncrementCounter(&Counters::instance_of_full, 1);
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
@ -4005,21 +4088,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
NearLabel slow_case;
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case, not_taken);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
__ cmp(edx, Operand(eax));
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
// Record position and call the compare IC.
Handle<Code> ic = CompareIC::GetUninitialized(op);
SetSourcePosition(expr->position());
__ call(ic, RelocInfo::CODE_TARGET);
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ test(eax, Operand(eax));
Split(cc, if_true, if_false, fall_through);
@ -4123,6 +4207,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
__ call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ mov(Operand(ebp, frame_offset), value);

54
deps/v8/src/ia32/ic-ia32.cc

@ -2049,13 +2049,23 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
static bool HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
return *test_instruction_address == Assembler::kTestAlByte;
}
void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
HandleScope scope;
Handle<Code> rewritten;
#ifdef DEBUG
State previous_state = GetState();
#endif
State state = TargetState(x, y);
State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
if (state == GENERIC) {
CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
rewritten = stub.GetCode();
@ -2073,6 +2083,44 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_));
}
#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a test al, nothing
// was inlined.
if (*test_instruction_address != Assembler::kTestAlByte) {
ASSERT(*test_instruction_address == Assembler::kNopByte);
return;
}
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, test=%p, delta=%d\n",
address, test_instruction_address, delta);
}
// Patch with a short conditional jump. There must be a
// short jump-if-carry/not-carry at this position.
Address jmp_address = test_instruction_address - delta;
ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
*jmp_address == Assembler::kJcShortOpcode);
Condition cc = *jmp_address == Assembler::kJncShortOpcode
? not_zero
: zero;
*jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
}

164
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -315,6 +315,13 @@ void LCodeGen::CallCode(Handle<Code> code,
__ call(code, mode);
RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
}
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
@ -1403,6 +1410,71 @@ void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
}
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object) {
ASSERT(!input.is(temp1));
ASSERT(!input.is(temp2));
ASSERT(!temp1.is(temp2));
__ test(input, Immediate(kSmiTagMask));
__ j(equal, is_not_object);
__ cmp(input, Factory::null_value());
__ j(equal, is_object);
__ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
__ test(temp2, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object);
__ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
__ cmp(temp2, FIRST_JS_OBJECT_TYPE);
__ j(below, is_not_object);
__ cmp(temp2, LAST_JS_OBJECT_TYPE);
return below_equal;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
Register reg = ToRegister(instr->input());
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
Label is_false, is_true, done;
Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
__ j(true_cond, &is_true);
__ bind(&is_false);
__ mov(result, Handle<Object>(Heap::false_value()));
__ jmp(&done);
__ bind(&is_true);
__ mov(result, Handle<Object>(Heap::true_value()));
__ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
Operand input = ToOperand(instr->input());
Register result = ToRegister(instr->result());
@ -1627,9 +1699,8 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub;
__ push(ToOperand(instr->left()));
__ push(ToOperand(instr->right()));
// Object and function are in fixed registers eax and edx.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
NearLabel true_value, done;
@ -1647,9 +1718,7 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
InstanceofStub stub;
__ push(ToOperand(instr->left()));
__ push(ToOperand(instr->right()));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ test(eax, Operand(eax));
EmitBranch(true_block, false_block, zero);
@ -2174,6 +2243,82 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->input());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
ExternalReference negative_infinity =
ExternalReference::address_of_negative_infinity();
__ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
__ ucomisd(xmm_scratch, input_reg);
DeoptimizeIf(equal, instr->environment());
__ sqrtsd(input_reg, input_reg);
}
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
__ CallCFunction(ExternalReference::power_double_double_function(), 4);
} else if (exponent_type.IsInteger32()) {
// It is safe to use ebx directly since the instruction is marked
// as a call.
ASSERT(!ToRegister(right).is(ebx));
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
__ CallCFunction(ExternalReference::power_double_int_function(), 4);
} else {
ASSERT(exponent_type.IsTagged());
CpuFeatures::Scope scope(SSE2);
Register right_reg = ToRegister(right);
Label non_smi, call;
__ test(right_reg, Immediate(kSmiTagMask));
__ j(not_zero, &non_smi);
__ SmiUntag(right_reg);
__ cvtsi2sd(result_reg, Operand(right_reg));
__ jmp(&call);
__ bind(&non_smi);
// It is safe to use ebx directly since the instruction is marked
// as a call.
ASSERT(!right_reg.is(ebx));
__ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
DeoptimizeIf(not_equal, instr->environment());
__ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
__ bind(&call);
__ PrepareCallCFunction(4, ebx);
__ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
__ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
__ CallCFunction(ExternalReference::power_double_double_function(), 4);
}
// Return value is in st(0) on ia32.
// Store it into the (fixed) result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
__ movdbl(result_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheSSE2Stub stub(TranscendentalCache::LOG);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@ -2188,6 +2333,13 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSqrt:
DoMathSqrt(instr);
break;
case kMathPowHalf:
DoMathPowHalf(instr);
break;
case kMathLog:
DoMathLog(instr);
break;
default:
UNREACHABLE();
}

11
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -175,6 +175,8 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
@ -195,6 +197,15 @@ class LCodeGen BASE_EMBEDDED {
Condition EmitTypeofIs(Label* true_label, Label* false_label,
Register input, Handle<String> type_name);
// Emits optimized code for %_IsObject(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

80
deps/v8/src/ia32/lithium-ia32.cc

@ -206,6 +206,13 @@ void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
}
void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_object(");
input()->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
stream->Add("if is_smi(");
input()->PrintTo(stream);
@ -460,12 +467,6 @@ int LChunk::NearestGapPos(int index) const {
}
int LChunk::NearestNextGapPos(int index) const {
while (!IsGapAt(index)) index++;
return index;
}
void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
}
@ -880,19 +881,6 @@ LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
HBasicBlock* deopt_predecessor = instr->block()->deopt_predecessor();
if (deopt_predecessor != NULL &&
deopt_predecessor->inverted()) {
HEnvironment* env = current_block_->last_environment();
HValue* value = env->Pop();
ASSERT(value->IsConstant());
Handle<Object> obj = HConstant::cast(value)->handle();
ASSERT(*obj == *Factory::true_value() || *obj == *Factory::false_value());
env->Push(*obj == *Factory::true_value()
? current_block_->graph()->GetConstantFalse()
: current_block_->graph()->GetConstantTrue());
}
return new LLabel(instr->block());
}
@ -1257,6 +1245,17 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
temp,
first_id,
second_id);
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
temp2,
first_id,
second_id);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@ -1266,8 +1265,8 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
new LInstanceOfAndBranch(Use(instance_of->left()),
Use(instance_of->right()),
new LInstanceOfAndBranch(UseFixed(instance_of->left(), eax),
UseFixed(instance_of->right(), edx),
first_id,
second_id);
return MarkAsCall(result, instr);
@ -1317,7 +1316,8 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* result =
new LInstanceOf(Use(instr->left()), Use(instr->right()));
new LInstanceOf(UseFixed(instr->left(), eax),
UseFixed(instr->right(), edx));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1337,7 +1337,7 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
++argument_count_;
LOperand* argument = Use(instr->argument());
LOperand* argument = UseOrConstant(instr->argument());
return new LPushArgument(argument);
}
@ -1360,7 +1360,12 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
MathFunctionId op = instr->op();
BuiltinFunctionId op = instr->op();
if (op == kMathLog) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LInstruction* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LInstruction* result = new LUnaryMathOperation(input);
switch (op) {
@ -1372,10 +1377,13 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
return AssignEnvironment(DefineSameAsFirst(result));
default:
UNREACHABLE();
return NULL;
}
}
}
@ -1572,6 +1580,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), xmm1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), xmm2) :
UseFixed(instr->right(), eax);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
if (instr->left()->representation().IsInteger32()) {
@ -1612,6 +1636,14 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LIsObject(value, TempRegister()));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseAtStart(instr->value());

61
deps/v8/src/ia32/lithium-ia32.h

@ -67,6 +67,7 @@ class LGapNode;
// LLoadKeyedGeneric
// LModI
// LMulI
// LPower
// LShiftI
// LSubI
// LCallConstantFunction
@ -123,6 +124,8 @@ class LGapNode;
// LInteger32ToDouble
// LIsNull
// LIsNullAndBranch
// LIsObject
// LIsObjectAndBranch
// LIsSmi
// LIsSmiAndBranch
// LLoadNamedField
@ -205,6 +208,8 @@ class LGapNode;
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(HasInstanceType) \
@ -229,6 +234,7 @@ class LGapNode;
V(ObjectLiteral) \
V(OsrEntry) \
V(Parameter) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -668,7 +674,7 @@ class LUnaryMathOperation: public LUnaryOperation {
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
MathFunctionId op() const { return hydrogen()->op(); }
BuiltinFunctionId op() const { return hydrogen()->op(); }
};
@ -745,6 +751,48 @@ class LIsNullAndBranch: public LIsNull {
};
class LIsObject: public LUnaryOperation {
public:
LIsObject(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temp_(temp) {}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
LOperand* temp() const { return temp_; }
private:
LOperand* temp_;
};
class LIsObjectAndBranch: public LIsObject {
public:
LIsObjectAndBranch(LOperand* value,
LOperand* temp,
LOperand* temp2,
int true_block_id,
int false_block_id)
: LIsObject(value, temp),
temp2_(temp2),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
virtual void PrintDataTo(StringStream* stream) const;
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temp2() const { return temp2_; }
private:
LOperand* temp2_;
int true_block_id_;
int false_block_id_;
};
class LIsSmi: public LUnaryOperation {
public:
explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
@ -1154,6 +1202,16 @@ class LAddI: public LBinaryOperation {
};
class LPower: public LBinaryOperation {
public:
LPower(LOperand* left, LOperand* right)
: LBinaryOperation(left, right) { }
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
class LArithmeticD: public LBinaryOperation {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@ -1890,7 +1948,6 @@ class LChunk: public ZoneObject {
LGap* GetGapAt(int index) const;
bool IsGapAt(int index) const;
int NearestGapPos(int index) const;
int NearestNextGapPos(int index) const;
void MarkEmptyBlocks();
const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
LLabel* GetLabel(int block_id) const {

50
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -74,30 +74,6 @@ void MacroAssembler::RecordWriteHelper(Register object,
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
ASSERT(cc == equal || cc == not_equal);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
j(cc, branch);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(scratch, Operand(object, -new_space_start));
and_(scratch, Heap::NewSpaceMask());
j(cc, branch);
}
}
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
@ -109,7 +85,7 @@ void MacroAssembler::RecordWrite(Register object,
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
NearLabel done;
// Skip barrier if writing a smi.
ASSERT_EQ(0, kSmiTag);
@ -1216,25 +1192,29 @@ MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
}
// If true, a Handle<T> passed by value is passed and returned by
// using the location_ field directly. If false, it is passed and
// returned as a pointer to a handle.
#ifdef USING_BSD_ABI
static const bool kPassHandlesDirectly = true;
// If true, a Handle<T> returned by value from a function with cdecl calling
// convention will be returned directly as a value of location_ field in a
// register eax.
// If false, it is returned as a pointer to a preallocated by caller memory
// region. Pointer to this region should be passed to a function as an
// implicit first argument.
#if defined(USING_BSD_ABI) || defined(__MINGW32__)
static const bool kReturnHandlesDirectly = true;
#else
static const bool kPassHandlesDirectly = false;
static const bool kReturnHandlesDirectly = false;
#endif
Operand ApiParameterOperand(int index) {
return Operand(esp, (index + (kPassHandlesDirectly ? 0 : 1)) * kPointerSize);
return Operand(
esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
}
void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
if (kPassHandlesDirectly) {
if (kReturnHandlesDirectly) {
EnterApiExitFrame(argc);
// When handles as passed directly we don't have to allocate extra
// When handles are returned directly we don't have to allocate extra
// space for and pass an out parameter.
} else {
// We allocate two additional slots: return value and pointer to it.
@ -1279,7 +1259,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
// Call the api function!
call(function->address(), RelocInfo::RUNTIME_ENTRY);
if (!kPassHandlesDirectly) {
if (!kReturnHandlesDirectly) {
// The returned value is a pointer to the handle holding the result.
// Dereference this to get to the location.
mov(eax, Operand(eax, 0));

28
deps/v8/src/ia32/macro-assembler-ia32.h

@ -70,10 +70,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space.
// scratch can be object itself, but it will be clobbered.
template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
LabelType* branch);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@ -658,6 +659,31 @@ class MacroAssembler: public Assembler {
};
template <typename LabelType>
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
LabelType* branch) {
ASSERT(cc == equal || cc == not_equal);
if (Serializer::enabled()) {
// Can't do arithmetic on external references if it might get serialized.
mov(scratch, Operand(object));
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
// and the running system.
and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
j(cc, branch);
} else {
int32_t new_space_start = reinterpret_cast<int32_t>(
ExternalReference::new_space_start().address());
lea(scratch, Operand(object, -new_space_start));
and_(scratch, Heap::NewSpaceMask());
j(cc, branch);
}
}
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit

8
deps/v8/src/ia32/stub-cache-ia32.cc

@ -2133,8 +2133,8 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
const int id = function_info->custom_call_generator_id();
if (function_info->HasBuiltinFunctionId()) {
BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, NULL, function, name);
Object* result;
@ -2375,8 +2375,8 @@ MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
SharedFunctionInfo* function_info = function->shared();
if (function_info->HasCustomCallGenerator()) {
const int id = function_info->custom_call_generator_id();
if (function_info->HasBuiltinFunctionId()) {
BuiltinFunctionId id = function_info->builtin_function_id();
MaybeObject* maybe_result = CompileCustomCall(
id, object, holder, cell, function, name);
Object* result;

21
deps/v8/src/ic.cc

@ -2041,6 +2041,11 @@ MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
TRBinaryOpIC::GetName(result_type),
Token::Name(op));
}
// Activate inlined smi code.
if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address());
}
}
Handle<JSBuiltinsObject> builtins = Top::builtins();
@ -2127,13 +2132,17 @@ const char* CompareIC::GetStateName(State state) {
}
CompareIC::State CompareIC::TargetState(Handle<Object> x, Handle<Object> y) {
State state = GetState();
if (state != UNINITIALIZED) return GENERIC;
if (x->IsSmi() && y->IsSmi()) return SMIS;
if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
if (x->IsJSObject() && y->IsJSObject()) return OBJECTS;
if (state == UNINITIALIZED &&
x->IsJSObject() && y->IsJSObject()) return OBJECTS;
return GENERIC;
}

5
deps/v8/src/ic.h

@ -582,7 +582,8 @@ class CompareIC: public IC {
static const char* GetStateName(State state);
private:
State TargetState(Handle<Object> x, Handle<Object> y);
State TargetState(State state, bool has_inlined_smi_code,
Handle<Object> x, Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
@ -591,6 +592,8 @@ class CompareIC: public IC {
Token::Value op_;
};
// Helper for TRBinaryOpIC and CompareIC.
void PatchInlinedSmiCode(Address address);
} } // namespace v8::internal

105
deps/v8/src/json.js

@ -66,21 +66,10 @@ function JSONParse(text, reviver) {
}
}
function StackContains(stack, val) {
var length = stack.length;
for (var i = 0; i < length; i++) {
if (stack[i] === val) {
return true;
}
}
return false;
}
function SerializeArray(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) {
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
@ -108,10 +97,9 @@ function SerializeArray(value, replacer, stack, indent, gap) {
}
function SerializeObject(value, replacer, stack, indent, gap) {
if (StackContains(stack, value)) {
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
var stepback = indent;
indent += gap;
var partial = [];
@ -158,49 +146,47 @@ function SerializeObject(value, replacer, stack, indent, gap) {
function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
if (IS_OBJECT(value) && value) {
if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) {
value = toJSON.call(value, key);
value = %_CallFunction(value, key, toJSON);
}
}
if (IS_FUNCTION(replacer)) {
value = replacer.call(holder, key, value);
value = %_CallFunction(holder, key, value, replacer);
}
// Unwrap value if necessary
if (IS_OBJECT(value)) {
if (IS_NUMBER_WRAPPER(value)) {
value = $Number(value);
} else if (IS_STRING_WRAPPER(value)) {
value = $String(value);
} else if (IS_BOOLEAN_WRAPPER(value)) {
value = %_ValueOf(value);
}
}
switch (typeof value) {
case "string":
if (IS_STRING(value)) {
return %QuoteJSONString(value);
case "object":
if (!value) {
} else if (IS_NUMBER(value)) {
return $isFinite(value) ? $String(value) : "null";
} else if (IS_BOOLEAN(value)) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
return "null";
} else if (IS_ARRAY(value)) {
} else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
// Non-callable object. If it's a primitive wrapper, it must be unwrapped.
if (IS_ARRAY(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
return $isFinite(value) ? ToString(value) : "null";
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
return %_ValueOf(value) ? "true" : "false";
} else {
return SerializeObject(value, replacer, stack, indent, gap);
}
case "number":
return $isFinite(value) ? $String(value) : "null";
case "boolean":
return value ? "true" : "false";
}
// Undefined or a callable object.
return void 0;
}
function BasicSerializeArray(value, stack, builder) {
if (StackContains(stack, value)) {
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
builder.push("[");
var len = value.length;
for (var i = 0; i < len; i++) {
@ -220,10 +206,9 @@ function BasicSerializeArray(value, stack, builder) {
function BasicSerializeObject(value, stack, builder) {
if (StackContains(stack, value)) {
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
stack.push(value);
builder.push("{");
for (var p in value) {
if (%HasLocalProperty(value, p)) {
@ -250,40 +235,41 @@ function BasicSerializeObject(value, stack, builder) {
function BasicJSONSerialize(key, holder, stack, builder) {
var value = holder[key];
if (IS_OBJECT(value) && value) {
if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) value = toJSON.call(value, $String(key));
if (IS_FUNCTION(toJSON)) {
value = %_CallFunction(value, ToString(key), toJSON);
}
}
if (IS_STRING(value)) {
builder.push(%QuoteJSONString(value));
} else if (IS_NUMBER(value)) {
builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
} else if (IS_BOOLEAN(value)) {
builder.push((value ? "true" : "false"));
} else if (IS_OBJECT(value)) {
builder.push(value ? "true" : "false");
} else if (IS_NULL(value)) {
builder.push("null");
} else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
// Value is a non-callable object.
// Unwrap value if necessary
if (IS_NUMBER_WRAPPER(value)) {
value = %_ValueOf(value);
value = ToNumber(value);
builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
} else if (IS_STRING_WRAPPER(value)) {
builder.push(%QuoteJSONString(%_ValueOf(value)));
builder.push(%QuoteJSONString(ToString(value)));
} else if (IS_BOOLEAN_WRAPPER(value)) {
builder.push((%_ValueOf(value) ? "true" : "false"));
} else {
// Regular non-wrapped object
if (!value) {
builder.push("null");
builder.push(%_ValueOf(value) ? "true" : "false");
} else if (IS_ARRAY(value)) {
BasicSerializeArray(value, stack, builder);
} else {
BasicSerializeObject(value, stack, builder);
}
}
}
}
function JSONStringify(value, replacer, space) {
if (IS_UNDEFINED(replacer) && IS_UNDEFINED(space)) {
if (%_ArgumentsLength() == 1) {
var builder = [];
BasicJSONSerialize('', {'': value}, [], builder);
if (builder.length == 0) return;
@ -294,21 +280,18 @@ function JSONStringify(value, replacer, space) {
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
if (IS_NUMBER_WRAPPER(space)) {
space = $Number(space);
space = ToNumber(space);
} else if (IS_STRING_WRAPPER(space)) {
space = $String(space);
space = ToString(space);
}
}
var gap;
if (IS_NUMBER(space)) {
space = $Math.min(ToInteger(space), 10);
gap = "";
for (var i = 0; i < space; i++) {
gap += " ";
}
space = MathMax(0, MathMin(ToInteger(space), 10));
gap = SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
gap = space.substring(0, 10);
gap = SubString(space, 0, 10);
} else {
gap = space;
}

377
deps/v8/src/lithium-allocator.cc

@ -247,7 +247,7 @@ LOperand* LiveRange::CreateAssignedOperand() {
LOperand* op = NULL;
if (HasRegisterAssigned()) {
ASSERT(!IsSpilled());
if (assigned_double_) {
if (IsDouble()) {
op = LDoubleRegister::Create(assigned_register());
} else {
op = LRegister::Create(assigned_register());
@ -290,19 +290,27 @@ void LiveRange::AdvanceLastProcessedMarker(
void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
ASSERT(Start().Value() <= position.Value());
ASSERT(Start().Value() < position.Value());
ASSERT(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
UseInterval* current = FirstSearchIntervalForPosition(position);
// If the split position coincides with the beginning of a use interval
// we need to split use positons in a special way.
bool split_at_start = false;
while (current != NULL) {
if (current->Contains(position)) {
current->SplitAt(position);
break;
}
UseInterval* next = current->next();
if (next->start().Value() >= position.Value()) break;
if (next->start().Value() >= position.Value()) {
split_at_start = (next->start().Value() == position.Value());
break;
}
current = next;
}
@ -319,10 +327,20 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
// position after it.
UsePosition* use_after = first_pos_;
UsePosition* use_before = NULL;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
// end of a lifetime hole). Use at this position should be attributed to
// the split child because split child owns use interval covering it.
while (use_after != NULL && use_after->pos().Value() < position.Value()) {
use_before = use_after;
use_after = use_after->next();
}
} else {
while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
use_before = use_after;
use_after = use_after->next();
}
}
// Partition original use positions to the two live ranges.
if (use_before != NULL) {
@ -508,7 +526,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
}
if (a->start().Value() < b->start().Value()) {
a = a->next();
if (a == NULL && a->start().Value() > other->End().Value()) break;
if (a == NULL || a->start().Value() > other->End().Value()) break;
AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
} else {
b = b->next();
@ -567,17 +585,12 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
LifetimePosition start = LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::FromInstructionIndex(
block->last_instruction_index());
block->last_instruction_index()).NextInstruction();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
LiveRange* range = LiveRangeFor(operand_index);
if (!range->IsEmpty() &&
range->Start().Value() == end.NextInstruction().Value()) {
range->AddUseInterval(start, end.NextInstruction());
} else {
range->AddUseInterval(start, end);
}
iterator.Advance();
}
}
@ -625,7 +638,7 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
if (result == NULL) {
result = new LiveRange(FixedLiveRangeID(index));
ASSERT(result->IsFixed());
result->set_assigned_register(index, false);
result->set_assigned_register(index, GENERAL_REGISTERS);
fixed_live_ranges_[index] = result;
}
return result;
@ -642,7 +655,7 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
if (result == NULL) {
result = new LiveRange(FixedDoubleLiveRangeID(index));
ASSERT(result->IsFixed());
result->set_assigned_register(index, true);
result->set_assigned_register(index, DOUBLE_REGISTERS);
fixed_double_live_ranges_[index] = result;
}
return result;
@ -960,8 +973,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
}
Use(block_start_position, curr_position, temp, NULL);
Define(curr_position.PrevInstruction(), temp, NULL);
Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
Define(curr_position, temp, NULL);
}
}
}
@ -1258,14 +1271,6 @@ void LAllocator::BuildLiveRanges() {
}
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
mode_ = CPU_REGISTERS;
AllocateRegisters();
}
bool LAllocator::SafePointsAreInOrder() const {
const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
int safe_point = 0;
@ -1397,10 +1402,18 @@ void LAllocator::ProcessOsrEntry() {
}
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("Allocate general registers", this);
num_registers_ = Register::kNumAllocatableRegisters;
mode_ = GENERAL_REGISTERS;
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("Allocate double registers", this);
num_registers_ = DoubleRegister::kNumAllocatableRegisters;
mode_ = XMM_REGISTERS;
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@ -1411,7 +1424,7 @@ void LAllocator::AllocateRegisters() {
for (int i = 0; i < live_ranges_.length(); ++i) {
if (live_ranges_[i] != NULL) {
if (HasDoubleValue(live_ranges_[i]->id()) == (mode_ == XMM_REGISTERS)) {
if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
AddToUnhandledUnsorted(live_ranges_[i]);
}
}
@ -1422,7 +1435,7 @@ void LAllocator::AllocateRegisters() {
ASSERT(active_live_ranges_.is_empty());
ASSERT(inactive_live_ranges_.is_empty());
if (mode_ == XMM_REGISTERS) {
if (mode_ == DOUBLE_REGISTERS) {
for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
@ -1463,11 +1476,7 @@ void LAllocator::AllocateRegisters() {
current->Start().NextInstruction().Value()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
LiveRange* part = Split(current,
current->Start().NextInstruction(),
pos->pos());
Spill(current);
AddToUnhandledSorted(part);
SpillBetween(current, current->Start(), pos->pos());
ASSERT(UnhandledIsSorted());
continue;
}
@ -1521,6 +1530,16 @@ void LAllocator::Setup() {
}
const char* LAllocator::RegisterName(int allocation_index) {
ASSERT(mode_ != NONE);
if (mode_ == GENERAL_REGISTERS) {
return Register::AllocationIndexToString(allocation_index);
} else {
return DoubleRegister::AllocationIndexToString(allocation_index);
}
}
void LAllocator::TraceAlloc(const char* msg, ...) {
if (FLAG_trace_alloc) {
va_list arguments;
@ -1544,10 +1563,12 @@ bool LAllocator::HasTaggedValue(int virtual_register) const {
}
bool LAllocator::HasDoubleValue(int virtual_register) const {
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register);
if (value == NULL) return false;
return value->representation().IsDouble();
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
}
return GENERAL_REGISTERS;
}
@ -1728,16 +1749,22 @@ void LAllocator::InactiveToActive(LiveRange* range) {
}
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition max_pos = LifetimePosition::FromInstructionIndex(
chunk_->instructions()->length() + 1);
ASSERT(DoubleRegister::kNumAllocatableRegisters >=
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
Register::kNumAllocatableRegisters);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
free_pos(max_pos);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* cur_active = active_live_ranges_.at(i);
free_pos[cur_active->assigned_register()] =
free_until_pos[cur_active->assigned_register()] =
LifetimePosition::FromInstructionIndex(0);
}
@ -1748,67 +1775,83 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
free_pos[cur_reg] = Min(free_pos[cur_reg], next_intersection);
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
}
UsePosition* pos = current->FirstPosWithHint();
if (pos != NULL) {
LOperand* hint = pos->hint();
UsePosition* hinted_use = current->FirstPosWithHint();
if (hinted_use != NULL) {
LOperand* hint = hinted_use->hint();
if (hint->IsRegister() || hint->IsDoubleRegister()) {
int register_index = hint->index();
TraceAlloc("Found reg hint %d for live range %d (free [%d, end %d[)\n",
register_index,
TraceAlloc(
"Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
RegisterName(register_index),
free_until_pos[register_index].Value(),
current->id(),
free_pos[register_index].Value(),
current->End().Value());
if (free_pos[register_index].Value() >= current->End().Value()) {
TraceAlloc("Assigning preferred reg %d to live range %d\n",
register_index,
// The desired register is free until the end of the current live range.
if (free_until_pos[register_index].Value() >= current->End().Value()) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
current->set_assigned_register(register_index, mode_ == XMM_REGISTERS);
current->set_assigned_register(register_index, mode_);
return true;
}
}
}
int max_reg = 0;
// Find the register which stays free for the longest time.
int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
if (free_pos[i].Value() > free_pos[max_reg].Value()) {
max_reg = i;
if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
reg = i;
}
}
if (free_pos[max_reg].InstructionIndex() == 0) {
LifetimePosition pos = free_until_pos[reg];
if (pos.Value() <= current->Start().Value()) {
// All registers are blocked.
return false;
} else if (free_pos[max_reg].Value() >= current->End().Value()) {
TraceAlloc("Assigning reg %d to live range %d\n", max_reg, current->id());
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
} else {
// Split the interval at the nearest gap and never split an interval at its
// start position.
LifetimePosition pos =
LifetimePosition::FromInstructionIndex(
chunk_->NearestGapPos(free_pos[max_reg].InstructionIndex()));
if (pos.Value() <= current->Start().Value()) return false;
LiveRange* second_range = Split(current, pos);
AddToUnhandledSorted(second_range);
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
}
if (pos.Value() < current->End().Value()) {
// Register reg is available at the range start but becomes blocked before
// the range end. Split current at position where it becomes blocked.
LiveRange* tail = SplitAt(current, pos);
AddToUnhandledSorted(tail);
}
// Register reg is available at the range start and is free until
// the range end.
ASSERT(pos.Value() >= current->End().Value());
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
current->set_assigned_register(reg, mode_);
return true;
}
void LAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition max_pos =
LifetimePosition::FromInstructionIndex(
chunk_->instructions()->length() + 1);
ASSERT(DoubleRegister::kNumAllocatableRegisters >=
Register::kNumAllocatableRegisters);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
use_pos(max_pos);
EmbeddedVector<LifetimePosition, DoubleRegister::kNumAllocatableRegisters>
block_pos(max_pos);
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == NULL) {
// There is no use in the current live range that requires a register.
// We can just spill it.
Spill(current);
return;
}
LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
@ -1841,47 +1884,63 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
}
int max_reg = 0;
int reg = 0;
for (int i = 1; i < RegisterCount(); ++i) {
if (use_pos[i].Value() > use_pos[max_reg].Value()) {
max_reg = i;
if (use_pos[i].Value() > use_pos[reg].Value()) {
reg = i;
}
}
UsePosition* first_usage = current->NextRegisterPosition(current->Start());
if (first_usage == NULL) {
Spill(current);
} else if (use_pos[max_reg].Value() < first_usage->pos().Value()) {
SplitAndSpill(current, current->Start(), first_usage->pos());
} else {
if (block_pos[max_reg].Value() < current->End().Value()) {
// Split current before blocked position.
LiveRange* second_range = Split(current,
LifetimePosition pos = use_pos[reg];
if (pos.Value() < register_use->pos().Value()) {
// All registers are blocked before the first use that requires a register.
// Spill starting part of live range up to that use.
//
// Corner case: the first use position is equal to the start of the range.
// In this case we have nothing to spill and SpillBetween will just return
// this range to the list of unhandled ones. This will lead to the infinite
// loop.
ASSERT(current->Start().Value() < register_use->pos().Value());
SpillBetween(current, current->Start(), register_use->pos());
return;
}
if (block_pos[reg].Value() < current->End().Value()) {
// Register becomes blocked before the current range end. Split before that
// position.
LiveRange* tail = SplitBetween(current,
current->Start(),
block_pos[max_reg]);
AddToUnhandledSorted(second_range);
block_pos[reg].InstructionStart());
AddToUnhandledSorted(tail);
}
current->set_assigned_register(max_reg, mode_ == XMM_REGISTERS);
// Register reg is not blocked for the whole range.
ASSERT(block_pos[reg].Value() >= current->End().Value());
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
current->set_assigned_register(reg, mode_);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
// at the same lifetime positions as current.
SplitAndSpillIntersecting(current);
}
}
void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
ASSERT(current->HasRegisterAssigned());
int reg = current->assigned_register();
LifetimePosition split_pos =
LifetimePosition::FromInstructionIndex(
chunk_->NearestGapPos(current->Start().InstructionIndex()));
LifetimePosition split_pos = current->Start();
for (int i = 0; i < active_live_ranges_.length(); ++i) {
LiveRange* range = active_live_ranges_[i];
if (range->assigned_register() == reg) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
SplitAndSpill(range, split_pos);
SpillAfter(range, split_pos);
} else {
SplitAndSpill(range, split_pos, next_pos->pos());
SpillBetween(range, split_pos, next_pos->pos());
}
ActiveToHandled(range);
--i;
@ -1896,10 +1955,10 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
if (next_pos == NULL) {
SplitAndSpill(range, split_pos);
SpillAfter(range, split_pos);
} else {
next_intersection = Min(next_intersection, next_pos->pos());
SplitAndSpill(range, split_pos, next_intersection);
SpillBetween(range, split_pos, next_intersection);
}
InactiveToHandled(range);
--i;
@ -1909,19 +1968,50 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
LiveRange* LAllocator::Split(LiveRange* range,
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
}
void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
UsePosition* prev_pos = prev->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
UsePosition* next_pos = next->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
LOperand* prev_operand = prev_pos->operand();
LOperand* next_operand = next_pos->operand();
LGap* gap = chunk_->GetGapAt(pos);
gap->GetOrCreateParallelMove(LGap::START)->
AddMove(prev_operand, next_operand);
next_pos->set_hint(prev_operand);
}
LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) return range;
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
}
LiveRange* LAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d in position between [%d, %d[\n",
TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
range->id(),
start.Value(),
end.Value());
LifetimePosition split_pos = FindOptimalSplitPos(
start, end.PrevInstruction().InstructionEnd());
LifetimePosition split_pos = FindOptimalSplitPos(start, end);
ASSERT(split_pos.Value() >= start.Value());
return Split(range, split_pos);
return SplitAt(range, split_pos);
}
@ -1944,78 +2034,49 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
}
HBasicBlock* block = end_block;
// Move to the most outside loop header.
// Find header of outermost loop.
while (block->parent_loop_header() != NULL &&
block->parent_loop_header()->block_id() > start_block->block_id()) {
block = block->parent_loop_header();
}
if (block == end_block) {
return end;
}
if (block == end_block) return end;
return LifetimePosition::FromInstructionIndex(
block->first_instruction_index());
}
bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
return pos.IsInstructionStart() &&
chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
}
void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
UsePosition* prev_pos = prev->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
UsePosition* next_pos = next->AddUsePosition(
LifetimePosition::FromInstructionIndex(pos));
LOperand* prev_operand = prev_pos->operand();
LOperand* next_operand = next_pos->operand();
LGap* gap = chunk_->GetGapAt(pos);
gap->GetOrCreateParallelMove(LGap::START)->
AddMove(prev_operand, next_operand);
next_pos->set_hint(prev_operand);
}
LiveRange* LAllocator::Split(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) {
return range;
}
LiveRange* result = LiveRangeFor(next_virtual_register_++);
range->SplitAt(pos, result);
return result;
void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
LiveRange* second_part = SplitAt(range, pos);
Spill(second_part);
}
void LAllocator::SplitAndSpill(LiveRange* range,
void LAllocator::SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
// We have an interval range and want to make sure that it is
// spilled at start and at most spilled until end.
ASSERT(start.Value() < end.Value());
LiveRange* tail_part = Split(range, start);
if (tail_part->Start().Value() < end.Value()) {
LiveRange* third_part = Split(tail_part,
tail_part->Start().NextInstruction(),
end);
Spill(tail_part);
ASSERT(third_part != tail_part);
AddToUnhandledSorted(third_part);
} else {
AddToUnhandledSorted(tail_part);
}
}
LiveRange* second_part = SplitAt(range, start);
if (second_part->Start().Value() < end.Value()) {
// The split result intersects with [start, end[.
// Split it at position between ]start+1, end[, spill the middle part
// and put the rest to unhandled.
LiveRange* third_part = SplitBetween(
second_part,
second_part->Start().InstructionEnd(),
end.PrevInstruction().InstructionEnd());
ASSERT(third_part != second_part);
void LAllocator::SplitAndSpill(LiveRange* range, LifetimePosition at) {
at = LifetimePosition::FromInstructionIndex(
chunk_->NearestGapPos(at.InstructionIndex()));
LiveRange* second_part = Split(range, at);
Spill(second_part);
AddToUnhandledSorted(third_part);
} else {
// The split result does not intersect with [start, end[.
// Nothing to spill. Just put it to unhandled as whole.
AddToUnhandledSorted(second_part);
}
}
@ -2026,7 +2087,7 @@ void LAllocator::Spill(LiveRange* range) {
if (!first->HasAllocatedSpillOperand()) {
LOperand* op = TryReuseSpillSlot(range);
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == XMM_REGISTERS);
if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
first->SetSpillOperand(op);
}
range->MakeSpilled();

79
deps/v8/src/lithium-allocator.h

@ -55,6 +55,7 @@ class LPointerMap;
class LStackSlot;
class LRegister;
// This class represents a single point of a LOperand's lifetime.
// For each lithium instruction there are exactly two lifetime positions:
// the beginning and the end of the instruction. Lifetime positions for
@ -121,7 +122,13 @@ class LifetimePosition {
// instruction.
bool IsValid() const { return value_ != -1; }
static LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition Invalid() { return LifetimePosition(); }
static inline LifetimePosition MaxPosition() {
// We have to use this kind of getter instead of static member due to
// crash bug in GDB.
return LifetimePosition(kMaxInt);
}
private:
static const int kStep = 2;
@ -135,6 +142,13 @@ class LifetimePosition {
};
enum RegisterKind {
NONE,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
class LOperand: public ZoneObject {
public:
enum Kind {
@ -594,8 +608,8 @@ class LiveRange: public ZoneObject {
explicit LiveRange(int id)
: id_(id),
spilled_(false),
assigned_double_(false),
assigned_register_(kInvalidAssignment),
assigned_register_kind_(NONE),
last_interval_(NULL),
first_interval_(NULL),
first_pos_(NULL),
@ -620,10 +634,10 @@ class LiveRange: public ZoneObject {
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
void set_assigned_register(int reg, bool double_reg) {
void set_assigned_register(int reg, RegisterKind register_kind) {
ASSERT(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
assigned_double_ = double_reg;
assigned_register_kind_ = register_kind;
ConvertOperands();
}
void MakeSpilled() {
@ -652,9 +666,13 @@ class LiveRange: public ZoneObject {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos);
// Split this live range at the given position which must follow the start of
// the range.
// All uses following the given position will be moved from this
// live range to the result live range.
void SplitAt(LifetimePosition position, LiveRange* result);
bool IsDouble() const { return assigned_double_; }
bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
bool HasRegisterAssigned() const {
return assigned_register_ != kInvalidAssignment;
}
@ -721,8 +739,8 @@ class LiveRange: public ZoneObject {
int id_;
bool spilled_;
bool assigned_double_;
int assigned_register_;
RegisterKind assigned_register_kind_;
UseInterval* last_interval_;
UseInterval* first_interval_;
UsePosition* first_pos_;
@ -774,8 +792,8 @@ class LAllocator BASE_EMBEDDED {
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
// Checks whether the value of a given virtual register is a double.
bool HasDoubleValue(int virtual_register) const;
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
// Begin a new instruction.
void BeginInstruction();
@ -814,12 +832,6 @@ class LAllocator BASE_EMBEDDED {
#endif
private:
enum OperationMode {
NONE,
CPU_REGISTERS,
XMM_REGISTERS
};
void MeetRegisterConstraints();
void ResolvePhis();
void BuildLiveRanges();
@ -871,17 +883,38 @@ class LAllocator BASE_EMBEDDED {
// Helper methods for allocating registers.
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
void SplitAndSpillIntersecting(LiveRange* range);
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
LiveRange* Split(LiveRange* range,
// Live range splitting helpers.
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
// Otherwise returns the live range that starts at pos and contains
// all uses from the original range that follow pos. Uses at pos will
// still be owned by the original range after splitting.
LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
// Split the given range in a position from the interval [start, end].
LiveRange* SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
LiveRange* Split(LiveRange* range, LifetimePosition split_pos);
void SplitAndSpill(LiveRange* range,
// Find a lifetime position in the interval [start, end] which
// is optimal for splitting: it is either header of the outermost
// loop covered by this interval or the latest possible position.
LifetimePosition FindOptimalSplitPos(LifetimePosition start,
LifetimePosition end);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
// Spill the given life range after position start and up to position end.
void SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
void SplitAndSpill(LiveRange* range, LifetimePosition at);
void SplitAndSpillIntersecting(LiveRange* range);
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
@ -914,6 +947,8 @@ class LAllocator BASE_EMBEDDED {
HPhi* LookupPhi(LOperand* operand) const;
LGap* GetLastGap(HBasicBlock* block) const;
const char* RegisterName(int allocation_index);
LChunk* chunk_;
ZoneList<InstructionSummary*> summaries_;
InstructionSummary* next_summary_;
@ -938,7 +973,7 @@ class LAllocator BASE_EMBEDDED {
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
OperationMode mode_;
RegisterKind mode_;
int num_registers_;
HGraph* graph_;

179
deps/v8/src/log-utils.cc

@ -273,29 +273,7 @@ void LogMessageBuilder::Append(String* str) {
void LogMessageBuilder::AppendAddress(Address addr) {
static Address last_address_ = NULL;
AppendAddress(addr, last_address_);
last_address_ = addr;
}
void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
if (!FLAG_compress_log) {
Append("0x%" V8PRIxPTR, addr);
} else if (bias == NULL) {
Append("%" V8PRIxPTR, addr);
} else {
uintptr_t delta;
char sign;
if (addr >= bias) {
delta = addr - bias;
sign = '+';
} else {
delta = bias - addr;
sign = '-';
}
Append("%c%" V8PRIxPTR, sign, delta);
}
}
@ -343,24 +321,6 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) {
}
bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
}
bool LogMessageBuilder::RetrieveCompressedPrevious(
LogRecordCompressor* compressor, const char* prefix) {
pos_ = 0;
if (prefix[0] != '\0') Append(prefix);
Vector<char> prev_record(Log::message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
if (!has_prev) return false;
pos_ += prev_record.length();
return true;
}
void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = Log::Write(Log::message_buffer_, pos_);
@ -369,145 +329,6 @@ void LogMessageBuilder::WriteToLogFile() {
}
}
// Formatting string for back references to the whole line. E.g. "#2" means
// "the second line above".
const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
// Formatting string for back references. E.g. "#2:10" means
// "the second line above, start from char 10 (0-based)".
const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
LogRecordCompressor::~LogRecordCompressor() {
for (int i = 0; i < buffer_.length(); ++i) {
buffer_[i].Dispose();
}
}
static int GetNumberLength(int number) {
ASSERT(number >= 0);
ASSERT(number < 10000);
if (number < 10) return 1;
if (number < 100) return 2;
if (number < 1000) return 3;
return 4;
}
int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
// See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
return pos == 0 ? GetNumberLength(distance) + 1
: GetNumberLength(distance) + GetNumberLength(pos) + 2;
}
void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
int distance,
int pos) {
if (pos == 0) {
OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
} else {
OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
}
}
bool LogRecordCompressor::Store(const Vector<const char>& record) {
// Check if the record is the same as the last stored one.
if (curr_ != -1) {
Vector<const char>& curr = buffer_[curr_];
if (record.length() == curr.length()
&& strncmp(record.start(), curr.start(), record.length()) == 0) {
return false;
}
}
// buffer_ is circular.
prev_ = curr_++;
curr_ %= buffer_.length();
Vector<char> record_copy = Vector<char>::New(record.length());
memcpy(record_copy.start(), record.start(), record.length());
buffer_[curr_].Dispose();
buffer_[curr_] =
Vector<const char>(record_copy.start(), record_copy.length());
return true;
}
bool LogRecordCompressor::RetrievePreviousCompressed(
Vector<char>* prev_record) {
if (prev_ == -1) return false;
int index = prev_;
// Distance from prev_.
int distance = 0;
// Best compression result among records in the buffer.
struct {
intptr_t truncated_len;
int distance;
int copy_from_pos;
int backref_size;
} best = {-1, 0, 0, 0};
Vector<const char>& prev = buffer_[prev_];
const char* const prev_start = prev.start();
const char* const prev_end = prev.start() + prev.length();
do {
// We're moving backwards until we reach the current record.
// Remember that buffer_ is circular.
if (--index == -1) index = buffer_.length() - 1;
++distance;
if (index == curr_) break;
Vector<const char>& data = buffer_[index];
if (data.start() == NULL) break;
const char* const data_end = data.start() + data.length();
const char* prev_ptr = prev_end;
const char* data_ptr = data_end;
// Compare strings backwards, stop on the last matching character.
while (prev_ptr != prev_start && data_ptr != data.start()
&& *(prev_ptr - 1) == *(data_ptr - 1)) {
--prev_ptr;
--data_ptr;
}
const intptr_t truncated_len = prev_end - prev_ptr;
const int copy_from_pos = static_cast<int>(data_ptr - data.start());
// Check if the length of compressed tail is enough.
if (truncated_len <= kMaxBackwardReferenceSize
&& truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
continue;
}
// Record compression results.
if (truncated_len > best.truncated_len) {
best.truncated_len = truncated_len;
best.distance = distance;
best.copy_from_pos = copy_from_pos;
best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
}
} while (true);
if (best.distance == 0) {
// Can't compress the previous record. Return as is.
ASSERT(prev_record->length() >= prev.length());
memcpy(prev_record->start(), prev.start(), prev.length());
prev_record->Truncate(prev.length());
} else {
// Copy the uncompressible part unchanged.
const intptr_t unchanged_len = prev.length() - best.truncated_len;
// + 1 for '\0'.
ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
memcpy(prev_record->start(), prev.start(), unchanged_len);
// Append the backward reference.
Vector<char> backref(
prev_record->start() + unchanged_len, best.backref_size + 1);
PrintBackwardReference(backref, best.distance, best.copy_from_pos);
ASSERT(strlen(backref.start()) - best.backref_size == 0);
prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
}
return true;
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

64
deps/v8/src/log-utils.h

@ -176,50 +176,6 @@ class Log : public AllStatic {
friend class Logger;
friend class LogMessageBuilder;
friend class LogRecordCompressor;
};
// An utility class for performing backward reference compression
// of string ends. It operates using a window of previous strings.
class LogRecordCompressor {
public:
// 'window_size' is the size of backward lookup window.
explicit LogRecordCompressor(int window_size)
: buffer_(window_size + kNoCompressionWindowSize),
kMaxBackwardReferenceSize(
GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
curr_(-1), prev_(-1) {
}
~LogRecordCompressor();
// Fills vector with a compressed version of the previous record.
// Returns false if there is no previous record.
bool RetrievePreviousCompressed(Vector<char>* prev_record);
// Stores a record if it differs from a previous one (or there's no previous).
// Returns true, if the record has been stored.
bool Store(const Vector<const char>& record);
private:
// The minimum size of a buffer: a place needed for the current and
// the previous record. Since there is no place for precedessors of a previous
// record, it can't be compressed at all.
static const int kNoCompressionWindowSize = 2;
// Formatting strings for back references.
static const char* kLineBackwardReferenceFormat;
static const char* kBackwardReferenceFormat;
static int GetBackwardReferenceSize(int distance, int pos);
static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
ScopedVector< Vector<const char> > buffer_;
const int kMaxBackwardReferenceSize;
int curr_;
int prev_;
};
@ -244,32 +200,14 @@ class LogMessageBuilder BASE_EMBEDDED {
// Append a heap string.
void Append(String* str);
// Appends an address, compressing it if needed by offsetting
// from Logger::last_address_.
// Appends an address.
void AppendAddress(Address addr);
// Appends an address, compressing it if needed.
void AppendAddress(Address addr, Address bias);
void AppendDetailed(String* str, bool show_impl_info);
// Append a portion of a string.
void AppendStringPart(const char* str, int len);
// Stores log message into compressor, returns true if the message
// was stored (i.e. doesn't repeat the previous one).
bool StoreInCompressor(LogRecordCompressor* compressor);
// Sets log message to a previous version of compressed message.
// Returns false, if there is no previous message.
bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
return RetrieveCompressedPrevious(compressor, "");
}
// Does the same at the version without arguments, and sets a prefix.
bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
const char* prefix);
// Write the log message to the log file currently opened.
void WriteToLogFile();

182
deps/v8/src/log.cc

@ -303,7 +303,6 @@ void Profiler::Engage() {
Logger::ticker_->SetProfiler(this);
Logger::ProfilerBeginEvent();
Logger::LogAliases();
}
@ -343,43 +342,21 @@ void Profiler::Run() {
Ticker* Logger::ticker_ = NULL;
Profiler* Logger::profiler_ = NULL;
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
int Logger::logging_nesting_ = 0;
int Logger::cpu_profiler_nesting_ = 0;
int Logger::heap_profiler_nesting_ = 0;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
#define DECLARE_EVENT(ignore1, name) name,
const char* kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
};
#undef DECLARE_LONG_EVENT
#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
};
#undef DECLARE_SHORT_EVENT
#undef DECLARE_EVENT
void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return;
LogMessageBuilder msg;
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
if (FLAG_compress_log) {
msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
}
msg.WriteToLogFile();
}
void Logger::LogAliases() {
if (!Log::IsEnabled() || !FLAG_compress_log) return;
LogMessageBuilder msg;
for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
msg.Append("alias,%s,%s\n",
kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
}
msg.WriteToLogFile();
}
@ -686,55 +663,16 @@ void Logger::DeleteEvent(const char* name, void* object) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// A class that contains all common code dealing with record compression.
class CompressionHelper {
public:
explicit CompressionHelper(int window_size)
: compressor_(window_size), repeat_count_(0) { }
// Handles storing message in compressor, retrieving the previous one and
// prefixing it with repeat count, if needed.
// Returns true if message needs to be written to log.
bool HandleMessage(LogMessageBuilder* msg) {
if (!msg->StoreInCompressor(&compressor_)) {
// Current message repeats the previous one, don't write it.
++repeat_count_;
return false;
}
if (repeat_count_ == 0) {
return msg->RetrieveCompressedPrevious(&compressor_);
}
OS::SNPrintF(prefix_, "%s,%d,",
Logger::log_events_[Logger::REPEAT_META_EVENT],
repeat_count_ + 1);
repeat_count_ = 0;
return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
}
private:
LogRecordCompressor compressor_;
int repeat_count_;
EmbeddedVector<char, 20> prefix_;
};
#endif // ENABLE_LOGGING_AND_PROFILING
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,",
log_events_[CODE_CREATION_EVENT], log_events_[CALLBACK_TAG]);
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
msg.Append(",1,\"%s%s\"", prefix, name);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
}
@ -771,6 +709,7 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
case Code::FUNCTION: return code->optimizable() ? "~" : "";
@ -778,6 +717,7 @@ static const char* ComputeMarker(Code* code) {
default: return "";
}
}
#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
@ -786,7 +726,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
for (const char* p = comment; *p != '\0'; p++) {
@ -797,10 +739,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
msg.Append('"');
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -813,14 +751,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -837,7 +773,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s %s:%d\"",
code->ExecutableSize(),
@ -846,10 +784,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
*sourcestr,
line);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -860,14 +794,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -878,7 +810,7 @@ void Logger::CodeMovingGCEvent() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
LogMessageBuilder msg;
msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
msg.WriteToLogFile();
OS::SignalCodeMovingGC();
#endif
@ -890,16 +822,13 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,%s,",
log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[REG_EXP_TAG]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false);
msg.Append('\"');
LowLevelCodeCreateEvent(code, &msg);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -924,13 +853,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
msg.AppendAddress(addr);
msg.Append(",%d", pos);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -942,18 +867,12 @@ void Logger::FunctionCreateEvent(JSFunction* function) {
// This function can be called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
static Address prev_code = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]);
msg.AppendAddress(function->address());
msg.Append(',');
msg.AppendAddress(function->unchecked_code()->address(), prev_code);
prev_code = function->unchecked_code()->address();
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.AppendAddress(function->unchecked_code()->address());
msg.Append('\n');
msg.WriteToLogFile();
#endif
@ -987,18 +906,12 @@ void Logger::FunctionDeleteEvent(Address from) {
void Logger::MoveEventInternal(LogEventsAndTags event,
Address from,
Address to) {
static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[event]);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
msg.Append(',');
msg.AppendAddress(to, prev_to_);
prev_to_ = to;
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.AppendAddress(to);
msg.Append('\n');
msg.WriteToLogFile();
}
@ -1009,12 +922,8 @@ void Logger::MoveEventInternal(LogEventsAndTags event,
void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[event]);
msg.Append("%s,", kLogEventsNames[event]);
msg.AppendAddress(from);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile();
}
@ -1202,30 +1111,20 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return;
static Address prev_sp = NULL;
static Address prev_function = NULL;
LogMessageBuilder msg;
msg.Append("%s,", log_events_[TICK_EVENT]);
Address prev_addr = sample->pc;
msg.AppendAddress(prev_addr);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp, prev_sp);
prev_sp = sample->sp;
msg.AppendAddress(sample->sp);
msg.Append(',');
msg.AppendAddress(sample->function, prev_function);
prev_function = sample->function;
msg.AppendAddress(sample->function);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
}
for (int i = 0; i < sample->frames_count; ++i) {
msg.Append(',');
msg.AppendAddress(sample->stack[i], prev_addr);
prev_addr = sample->stack[i];
}
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
msg.AppendAddress(sample->stack[i]);
}
msg.Append('\n');
msg.WriteToLogFile();
@ -1654,12 +1553,6 @@ bool Logger::Setup() {
sliding_state_window_ = new SlidingStateWindow();
}
log_events_ = FLAG_compress_log ?
kCompressedLogEventsNames : kLongLogEventsNames;
if (FLAG_compress_log) {
compression_helper_ = new CompressionHelper(kCompressionWindowSize);
}
if (start_logging) {
logging_nesting_ = 1;
}
@ -1686,13 +1579,17 @@ bool Logger::Setup() {
void Logger::EnsureTickerStarted() {
#ifdef ENABLE_LOGGING_AND_PROFILING
ASSERT(ticker_ != NULL);
if (!ticker_->IsActive()) ticker_->Start();
#endif
}
void Logger::EnsureTickerStopped() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
#endif
}
@ -1707,9 +1604,6 @@ void Logger::TearDown() {
profiler_ = NULL;
}
delete compression_helper_;
compression_helper_ = NULL;
delete sliding_state_window_;
sliding_state_window_ = NULL;

101
deps/v8/src/log.h

@ -74,7 +74,6 @@ class Profiler;
class Semaphore;
class SlidingStateWindow;
class LogMessageBuilder;
class CompressionHelper;
#undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING
@ -88,58 +87,55 @@ class CompressionHelper;
#endif
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
V(CODE_MOVING_GC, "code-moving-gc", "cg") \
V(FUNCTION_CREATION_EVENT, "function-creation", "fc") \
V(FUNCTION_MOVE_EVENT, "function-move", "fm") \
V(FUNCTION_DELETE_EVENT, "function-delete", "fd") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos", "sp") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
V(BUILTIN_TAG, "Builtin", "bi") \
V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak", "cdb") \
V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi") \
V(CALL_IC_TAG, "CallIC", "cic") \
V(CALL_INITIALIZE_TAG, "CallInitialize", "ci") \
V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic", "cmm") \
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak", "kcdb") \
V(CODE_CREATION_EVENT, "code-creation") \
V(CODE_MOVE_EVENT, "code-move") \
V(CODE_DELETE_EVENT, "code-delete") \
V(CODE_MOVING_GC, "code-moving-gc") \
V(FUNCTION_CREATION_EVENT, "function-creation") \
V(FUNCTION_MOVE_EVENT, "function-move") \
V(FUNCTION_DELETE_EVENT, "function-delete") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
V(TICK_EVENT, "tick") \
V(REPEAT_META_EVENT, "repeat") \
V(BUILTIN_TAG, "Builtin") \
V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak") \
V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn") \
V(CALL_IC_TAG, "CallIC") \
V(CALL_INITIALIZE_TAG, "CallInitialize") \
V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic") \
V(CALL_MISS_TAG, "CallMiss") \
V(CALL_NORMAL_TAG, "CallNormal") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic") \
V(KEYED_CALL_DEBUG_BREAK_TAG, "KeyedCallDebugBreak") \
V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG, \
"KeyedCallDebugPrepareStepIn", \
"kcdbsi") \
V(KEYED_CALL_IC_TAG, "KeyedCallIC", "kcic") \
V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize", "kci") \
V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic", "kcmm") \
V(KEYED_CALL_MISS_TAG, "KeyedCallMiss", "kcm") \
V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal", "kcn") \
V(KEYED_CALL_PRE_MONOMORPHIC_TAG, \
"KeyedCallPreMonomorphic", \
"kcpm") \
V(CALLBACK_TAG, "Callback", "cb") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
V(KEYED_STORE_IC_TAG, "KeyedStoreIC", "ksic") \
V(LAZY_COMPILE_TAG, "LazyCompile", "lc") \
V(LOAD_IC_TAG, "LoadIC", "lic") \
V(REG_EXP_TAG, "RegExp", "re") \
V(SCRIPT_TAG, "Script", "sc") \
V(STORE_IC_TAG, "StoreIC", "sic") \
V(STUB_TAG, "Stub", "s") \
V(NATIVE_FUNCTION_TAG, "Function", "f") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile", "lc") \
V(NATIVE_SCRIPT_TAG, "Script", "sc")
"KeyedCallDebugPrepareStepIn") \
V(KEYED_CALL_IC_TAG, "KeyedCallIC") \
V(KEYED_CALL_INITIALIZE_TAG, "KeyedCallInitialize") \
V(KEYED_CALL_MEGAMORPHIC_TAG, "KeyedCallMegamorphic") \
V(KEYED_CALL_MISS_TAG, "KeyedCallMiss") \
V(KEYED_CALL_NORMAL_TAG, "KeyedCallNormal") \
V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic") \
V(CALLBACK_TAG, "Callback") \
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
V(LAZY_COMPILE_TAG, "LazyCompile") \
V(LOAD_IC_TAG, "LoadIC") \
V(REG_EXP_TAG, "RegExp") \
V(SCRIPT_TAG, "Script") \
V(STORE_IC_TAG, "StoreIC") \
V(STUB_TAG, "Stub") \
V(NATIVE_FUNCTION_TAG, "Function") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \
V(NATIVE_SCRIPT_TAG, "Script")
// Note that 'NATIVE_' cases for functions and scripts are mapped onto
// original tags when writing to the log.
class Logger {
public:
#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
#define DECLARE_ENUM(enum_item, ignore) enum_item,
enum LogEventsAndTags {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
NUMBER_OF_LOG_EVENTS
@ -292,9 +288,6 @@ class Logger {
private:
// Size of window used for log records compression.
static const int kCompressionWindowSize = 4;
// Emits the profiler's first message.
static void ProfilerBeginEvent();
@ -312,9 +305,6 @@ class Logger {
static void DeleteEventInternal(LogEventsAndTags event,
Address from);
// Emits aliases for compressed messages.
static void LogAliases();
// Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp);
@ -357,15 +347,8 @@ class Logger {
// recent VM states.
static SlidingStateWindow* sliding_state_window_;
// An array of log events names.
static const char** log_events_;
// An instance of helper created if log compression is enabled.
static CompressionHelper* compression_helper_;
// Internal implementation classes with access to
// private members.
friend class CompressionHelper;
friend class EventLog;
friend class TimeLog;
friend class Profiler;

7
deps/v8/src/macros.py

@ -140,15 +140,14 @@ macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Limit according to ECMA 262 15.9.1.1
const MAX_TIME_MS = 8640000000000000;
# Limit which is MAX_TIME_MS + msPerMonth.
const MAX_TIME_BEFORE_UTC = 8640002592000000;
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
macro DATE_FROM_TIME(time) = (DateFromTime(time));
macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DATE_FROM_TIME(time));
macro YEAR_FROM_TIME(time) = (YearFromTime(time));
macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));

5
deps/v8/src/mark-compact.cc

@ -1281,6 +1281,11 @@ void MarkCompactCollector::ProcessObjectGroups() {
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
PostponeInterruptsScope postpone;
#ifdef DEBUG
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;

8
deps/v8/src/math.js

@ -258,14 +258,6 @@ function SetupMath() {
"max", MathMax,
"min", MathMin
));
// The values here are from the MathFunctionId enum in objects.h.
%SetMathFunctionId($Math.floor, 1);
%SetMathFunctionId($Math.round, 2);
%SetMathFunctionId($Math.abs, 4);
%SetMathFunctionId($Math.sqrt, 0xd);
// TODO(erikcorry): Set the id of the other functions so they can be
// optimized.
};

1
deps/v8/src/messages.js

@ -190,7 +190,6 @@ function FormatMessage(message) {
illegal_return: "Illegal return statement",
error_loading_debugger: "Error loading debugger",
no_input_to_regexp: "No input to %0",
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
obj_ctor_property_non_object: "Object.%0 called on non-object",

4
deps/v8/src/mirror-debugger.js

@ -1533,9 +1533,9 @@ FrameMirror.prototype.scope = function(index) {
};
FrameMirror.prototype.evaluate = function(source, disable_break) {
FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
source, Boolean(disable_break));
source, Boolean(disable_break), opt_context_object);
return MakeMirror(result);
};

21
deps/v8/src/objects-inl.h

@ -3036,27 +3036,20 @@ FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
}
bool SharedFunctionInfo::HasCustomCallGenerator() {
bool SharedFunctionInfo::HasBuiltinFunctionId() {
return function_data()->IsSmi();
}
MathFunctionId SharedFunctionInfo::math_function_id() {
return static_cast<MathFunctionId>(
(compiler_hints() >> kMathFunctionShift) & kMathFunctionMask);
bool SharedFunctionInfo::IsBuiltinMathFunction() {
return HasBuiltinFunctionId() &&
builtin_function_id() >= kFirstMathFunctionId;
}
void SharedFunctionInfo::set_math_function_id(int math_fn) {
ASSERT(math_fn <= max_math_id_number());
set_compiler_hints(compiler_hints() |
((math_fn & kMathFunctionMask) << kMathFunctionShift));
}
int SharedFunctionInfo::custom_call_generator_id() {
ASSERT(HasCustomCallGenerator());
return Smi::cast(function_data())->value();
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
ASSERT(HasBuiltinFunctionId());
return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
}

3
deps/v8/src/objects.cc

@ -3097,8 +3097,9 @@ MaybeObject* JSObject::SetPropertyCallback(String* name,
MaybeObject* JSObject::DefineAccessor(String* name,
bool is_getter,
JSFunction* fun,
Object* fun,
PropertyAttributes attributes) {
ASSERT(fun->IsJSFunction() || fun->IsUndefined());
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {

82
deps/v8/src/objects.h

@ -1368,7 +1368,7 @@ class JSObject: public HeapObject {
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
bool is_getter,
JSFunction* fun,
Object* fun,
PropertyAttributes attributes);
Object* LookupAccessor(String* name, bool is_getter);
@ -3714,22 +3714,49 @@ class Script: public Struct {
};
enum MathFunctionId {
kNotSpecialMathFunction = 0,
// These numbers must be kept in sync with the ones in math.js.
kMathFloor = 1,
kMathRound = 2,
kMathCeil = 3,
kMathAbs = 4,
kMathLog = 5,
kMathSin = 6,
kMathCos = 7,
kMathTan = 8,
kMathASin = 9,
kMathACos = 0xa,
kMathATan = 0xb,
kMathExp = 0xc,
kMathSqrt = 0xd
// List of builtin functions we want to identify to improve code
// generation.
//
// Each entry has a name of a global object property holding an object
// optionally followed by ".prototype", a name of a builtin function
// on the object (the one the id is set for), and a label.
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
//
// NOTE: Order is important: math functions should be at the end of
// the list and MathFloor should be the first math function.
#define FUNCTIONS_WITH_ID_LIST(V) \
V(Array.prototype, push, ArrayPush) \
V(Array.prototype, pop, ArrayPop) \
V(String.prototype, charCodeAt, StringCharCodeAt) \
V(String.prototype, charAt, StringCharAt) \
V(String, fromCharCode, StringFromCharCode) \
V(Math, floor, MathFloor) \
V(Math, round, MathRound) \
V(Math, ceil, MathCeil) \
V(Math, abs, MathAbs) \
V(Math, log, MathLog) \
V(Math, sin, MathSin) \
V(Math, cos, MathCos) \
V(Math, tan, MathTan) \
V(Math, asin, MathASin) \
V(Math, acos, MathACos) \
V(Math, atan, MathATan) \
V(Math, exp, MathExp) \
V(Math, sqrt, MathSqrt) \
V(Math, pow, MathPow)
enum BuiltinFunctionId {
#define DECLARE_FUNCTION_ID(ignored1, ignore2, name) \
k##name,
FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
#undef DECLARE_FUNCTION_ID
// Fake id for a special case of Math.pow. Note, it continues the
// list of math functions.
kMathPowHalf,
kFirstMathFunctionId = kMathFloor
};
@ -3870,7 +3897,7 @@ class SharedFunctionInfo: public HeapObject {
// [function data]: This field holds some additional data for function.
// Currently it either has FunctionTemplateInfo to make benefit the API
// or Smi identifying a custom call generator.
// or Smi identifying a builtin function.
// In the long run we don't want all functions to have this field but
// we can fix that when we have a better model for storing hidden data
// on objects.
@ -3878,8 +3905,9 @@ class SharedFunctionInfo: public HeapObject {
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasCustomCallGenerator();
inline int custom_call_generator_id();
inline bool HasBuiltinFunctionId();
inline bool IsBuiltinMathFunction();
inline BuiltinFunctionId builtin_function_id();
// [script info]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
@ -4130,12 +4158,6 @@ class SharedFunctionInfo: public HeapObject {
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
// Get/set a special tag on the functions from math.js so we can inline
// efficient versions of them in the code.
inline MathFunctionId math_function_id();
inline void set_math_function_id(int id);
static inline int max_math_id_number() { return kMathFunctionMask; }
typedef FixedBodyDescriptor<kNameOffset,
kThisPropertyAssignmentsOffset + kPointerSize,
kSize> BodyDescriptor;
@ -4153,12 +4175,10 @@ class SharedFunctionInfo: public HeapObject {
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFullCodegen = 1;
static const int kAllowLazyCompilation = 2;
static const int kMathFunctionShift = 3;
static const int kMathFunctionMask = 0xf;
static const int kLiveObjectsMayExist = 7;
static const int kCodeAgeShift = 8;
static const int kLiveObjectsMayExist = 3;
static const int kCodeAgeShift = 4;
static const int kCodeAgeMask = 0x7;
static const int kOptimizationDisabled = 11;
static const int kOptimizationDisabled = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};

119
deps/v8/src/parser.cc

@ -609,7 +609,25 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
// Initialize parser state.
source->TryFlatten();
scanner_.Initialize(source);
if (source->IsExternalTwoByteString()) {
// Notice that the stream is destroyed at the end of the branch block.
// The last line of the blocks can't be moved outside, even though they're
// identical calls.
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
return DoParseProgram(source, in_global_context, &zone_scope);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
return DoParseProgram(source, in_global_context, &zone_scope);
}
}
FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
bool in_global_context,
ZoneScope* zone_scope) {
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
@ -655,25 +673,45 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source,
// If there was a syntax error we have to get rid of the AST
// and it is not safe to do so before the scope has been deleted.
if (result == NULL) zone_scope.DeleteOnExit();
if (result == NULL) zone_scope->DeleteOnExit();
return result;
}
FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
HistogramTimerScope timer(&Counters::parse_lazy);
Handle<String> source(String::cast(script_->source()));
Counters::total_parse_size.Increment(source->length());
// Initialize parser state.
source->TryFlatten();
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source),
info->start_position(),
info->end_position());
FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
return result;
} else {
GenericStringUC16CharacterStream stream(source,
info->start_position(),
info->end_position());
FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
return result;
}
}
FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
UC16CharacterStream* source,
ZoneScope* zone_scope) {
scanner_.Initialize(source, JavaScriptScanner::kAllLiterals);
ASSERT(target_stack_ == NULL);
Handle<String> name(String::cast(info->name()));
fni_ = new FuncNameInferrer();
fni_->PushEnclosingName(name);
// Initialize parser state.
source->TryFlatten();
scanner_.Initialize(source, info->start_position(), info->end_position());
ASSERT(target_stack_ == NULL);
mode_ = PARSE_EAGERLY;
// Place holder for the result.
@ -705,7 +743,7 @@ FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
// not safe to do before scope has been deleted.
if (result == NULL) {
Top::StackOverflow();
zone_scope.DeleteOnExit();
zone_scope->DeleteOnExit();
} else {
Handle<String> inferred_name(info->inferred_name());
result->set_inferred_name(inferred_name);
@ -719,12 +757,12 @@ Handle<String> Parser::GetSymbol(bool* ok) {
if (pre_data() != NULL) {
symbol_id = pre_data()->GetSymbolIdentifier();
}
return LookupSymbol(symbol_id, scanner_.literal());
return LookupSymbol(symbol_id, scanner().literal());
}
void Parser::ReportMessage(const char* type, Vector<const char*> args) {
Scanner::Location source_location = scanner_.location();
Scanner::Location source_location = scanner().location();
ReportMessageAt(source_location, type, args);
}
@ -1641,7 +1679,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) {
Expect(Token::CONTINUE, CHECK_OK);
Handle<String> label = Handle<String>::null();
Token::Value tok = peek();
if (!scanner_.has_line_terminator_before_next() &&
if (!scanner().has_line_terminator_before_next() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@ -1667,7 +1705,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
Expect(Token::BREAK, CHECK_OK);
Handle<String> label;
Token::Value tok = peek();
if (!scanner_.has_line_terminator_before_next() &&
if (!scanner().has_line_terminator_before_next() &&
tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
label = ParseIdentifier(CHECK_OK);
}
@ -1712,7 +1750,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
}
Token::Value tok = peek();
if (scanner_.has_line_terminator_before_next() ||
if (scanner().has_line_terminator_before_next() ||
tok == Token::SEMICOLON ||
tok == Token::RBRACE ||
tok == Token::EOS) {
@ -1844,7 +1882,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
Expect(Token::THROW, CHECK_OK);
int pos = scanner().location().beg_pos;
if (scanner_.has_line_terminator_before_next()) {
if (scanner().has_line_terminator_before_next()) {
ReportMessage("newline_after_throw", Vector<const char*>::empty());
*ok = false;
return NULL;
@ -2408,7 +2446,8 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// LeftHandSideExpression ('++' | '--')?
Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
if (!scanner().has_line_terminator_before_next() &&
Token::IsCountOp(peek())) {
// Signal a reference error if the expression is an invalid
// left-hand side expression. We could report this as a syntax
// error here but for compatibility with JSC we choose to report the
@ -2677,7 +2716,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
result = NewNumberLiteral(value);
break;
}
@ -3028,7 +3067,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
case Token::NUMBER: {
Consume(Token::NUMBER);
double value =
StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
key = NewNumberLiteral(value);
break;
}
@ -3089,7 +3128,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
if (!scanner_.ScanRegExpPattern(seen_equal)) {
if (!scanner().ScanRegExpPattern(seen_equal)) {
Next();
ReportMessage("unterminated_regexp", Vector<const char*>::empty());
*ok = false;
@ -3099,10 +3138,10 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
int literal_index = temp_scope_->NextMaterializedLiteralIndex();
Handle<String> js_pattern =
Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
scanner_.ScanRegExpFlags();
Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
scanner().ScanRegExpFlags();
Handle<String> js_flags =
Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
Next();
return new RegExpLiteral(js_pattern, js_flags, literal_index);
@ -3158,7 +3197,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner_.location().beg_pos;
int start_pos = scanner().location().beg_pos;
bool done = (peek() == Token::RPAREN);
while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK);
@ -3195,7 +3234,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
bool is_lazily_compiled =
mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
int function_block_pos = scanner_.location().beg_pos;
int function_block_pos = scanner().location().beg_pos;
int materialized_literal_count;
int expected_property_count;
int end_pos;
@ -3212,7 +3251,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
ReportInvalidPreparseData(name, CHECK_OK);
}
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
scanner_.SeekForward(end_pos);
// Seek to position just before terminal '}'.
scanner().SeekForward(end_pos - 1);
materialized_literal_count = entry.literal_count();
expected_property_count = entry.property_count();
only_simple_this_property_assignments = false;
@ -3228,7 +3268,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
this_property_assignments = temp_scope.this_property_assignments();
Expect(Token::RBRACE, CHECK_OK);
end_pos = scanner_.location().end_pos;
end_pos = scanner().location().end_pos;
}
FunctionLiteral* function_literal =
@ -3332,7 +3372,7 @@ void Parser::ExpectSemicolon(bool* ok) {
Next();
return;
}
if (scanner_.has_line_terminator_before_next() ||
if (scanner().has_line_terminator_before_next() ||
tok == Token::RBRACE ||
tok == Token::EOS) {
return;
@ -3383,8 +3423,8 @@ Handle<String> Parser::ParseIdentifierOrGetOrSet(bool* is_get,
bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return Handle<String>();
if (scanner_.literal_length() == 3) {
const char* token = scanner_.literal_string();
if (scanner().literal_length() == 3) {
const char* token = scanner().literal_string();
*is_get = strcmp(token, "get") == 0;
*is_set = !*is_get && strcmp(token, "set") == 0;
}
@ -3503,8 +3543,8 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
// ----------------------------------------------------------------------------
// JSON
Handle<Object> JsonParser::ParseJson(Handle<String> source) {
source->TryFlatten();
Handle<Object> JsonParser::ParseJson(Handle<String> script,
UC16CharacterStream* source) {
scanner_.Initialize(source);
stack_overflow_ = false;
Handle<Object> result = ParseJsonValue();
@ -3540,7 +3580,7 @@ Handle<Object> JsonParser::ParseJson(Handle<String> source) {
}
Scanner::Location source_location = scanner_.location();
MessageLocation location(Factory::NewScript(source),
MessageLocation location(Factory::NewScript(script),
source_location.beg_pos,
source_location.end_pos);
int argc = (name_opt == NULL) ? 0 : 1;
@ -4555,13 +4595,12 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
static ScriptDataImpl* DoPreParse(Handle<String> source,
unibrow::CharacterStream* stream,
static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
bool allow_lazy,
ParserRecorder* recorder,
int literal_flags) {
V8JavaScriptScanner scanner;
scanner.Initialize(source, stream, literal_flags);
scanner.Initialize(source, literal_flags);
intptr_t stack_limit = StackGuard::real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
recorder,
@ -4580,8 +4619,7 @@ static ScriptDataImpl* DoPreParse(Handle<String> source,
// Preparse, but only collect data that is immediately useful,
// even if the preparser data is only used once.
ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
unibrow::CharacterStream* stream,
ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension) {
bool allow_lazy = FLAG_lazy && (extension == NULL);
if (!allow_lazy) {
@ -4590,22 +4628,19 @@ ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
return NULL;
}
PartialParserRecorder recorder;
return DoPreParse(source, stream, allow_lazy, &recorder,
return DoPreParse(source, allow_lazy, &recorder,
JavaScriptScanner::kNoLiterals);
}
ScriptDataImpl* ParserApi::PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
v8::Extension* extension) {
Handle<Script> no_script;
bool allow_lazy = FLAG_lazy && (extension == NULL);
CompleteParserRecorder recorder;
int kPreParseLiteralsFlags =
JavaScriptScanner::kLiteralString | JavaScriptScanner::kLiteralIdentifier;
return DoPreParse(source, stream, allow_lazy,
&recorder, kPreParseLiteralsFlags);
return DoPreParse(source, allow_lazy, &recorder, kPreParseLiteralsFlags);
}

33
deps/v8/src/parser.h

@ -169,14 +169,12 @@ class ParserApi {
static bool Parse(CompilationInfo* info);
// Generic preparser generating full preparse data.
static ScriptDataImpl* PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
static ScriptDataImpl* PreParse(UC16CharacterStream* source,
v8::Extension* extension);
// Preparser that only does preprocessing that makes sense if only used
// immediately after.
static ScriptDataImpl* PartialPreParse(Handle<String> source,
unibrow::CharacterStream* stream,
static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
v8::Extension* extension);
};
@ -435,18 +433,26 @@ class Parser {
Vector<const char*> args);
protected:
FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
UC16CharacterStream* source,
ZoneScope* zone_scope);
enum Mode {
PARSE_LAZILY,
PARSE_EAGERLY
};
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(Handle<String> source,
bool in_global_context,
ZoneScope* zone_scope);
// Report syntax error
void ReportUnexpectedToken(Token::Value token);
void ReportInvalidPreparseData(Handle<String> name, bool* ok);
void ReportMessage(const char* message, Vector<const char*> args);
bool inside_with() const { return with_nesting_level_ > 0; }
Scanner& scanner() { return scanner_; }
V8JavaScriptScanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
ScriptDataImpl* pre_data() const { return pre_data_; }
@ -548,7 +554,7 @@ class Parser {
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
return scanner_.peek();
return scanner().peek();
}
INLINE(Token::Value Next()) {
@ -560,9 +566,11 @@ class Parser {
}
if (StackLimitCheck().HasOverflowed()) {
// Any further calls to Next or peek will return the illegal token.
// The current call must return the next token, which might already
// have been peek'ed.
stack_overflow_ = true;
}
return scanner_.Next();
return scanner().Next();
}
INLINE(void Consume(Token::Value token));
@ -702,7 +710,14 @@ class JsonParser BASE_EMBEDDED {
// Parse JSON input as a single JSON value.
// Returns null handle and sets exception if parsing failed.
static Handle<Object> Parse(Handle<String> source) {
return JsonParser().ParseJson(source);
if (source->IsExternalTwoByteString()) {
ExternalTwoByteStringUC16CharacterStream stream(
Handle<ExternalTwoByteString>::cast(source), 0, source->length());
return JsonParser().ParseJson(source, &stream);
} else {
GenericStringUC16CharacterStream stream(source, 0, source->length());
return JsonParser().ParseJson(source, &stream);
}
}
private:
@ -710,7 +725,7 @@ class JsonParser BASE_EMBEDDED {
~JsonParser() { }
// Parse a string containing a single JSON value.
Handle<Object> ParseJson(Handle<String>);
Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
// Parse a single JSON value from input (grammar production JSONValue).
// A JSON value is either a (double-quoted) string literal, a number literal,
// one of "true", "false", or "null", or an object or array literal.

14
deps/v8/src/platform-freebsd.cc

@ -500,16 +500,6 @@ class FreeBSDMutex : public Mutex {
return result;
}
virtual bool TryLock() {
int result = pthread_mutex_trylock(&mutex_);
// Return false if the lock is busy and locking failed.
if (result == EBUSY) {
return false;
}
ASSERT(result == 0); // Verify no other errors.
return true;
}
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
@ -587,12 +577,14 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
// We always sample the VM state.
sample.state = VMState::current_state();
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
sample.state = Top::current_vm_state();
#if V8_HOST_ARCH_IA32
sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);

29
deps/v8/src/platform-linux.cc

@ -134,9 +134,7 @@ static bool CPUInfoContainsString(const char * search_string) {
}
bool OS::ArmCpuHasFeature(CpuFeature feature) {
const int max_items = 2;
const char* search_strings[max_items] = { NULL, NULL };
int search_items = 0;
const char* search_string = NULL;
// Simple detection of VFP at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to ARM (mid 2009), no similar
@ -144,25 +142,26 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// so it's up to individual OSes to provide such.
switch (feature) {
case VFP3:
search_strings[0] = "vfpv3";
// Some old kernels will report vfp for A8, not vfpv3, so we check for
// A8 explicitely. The cpuinfo file report the CPU Part which for Cortex
// A8 is 0xc08.
search_strings[1] = "0xc08";
search_items = 2;
ASSERT(search_items <= max_items);
search_string = "vfpv3";
break;
case ARMv7:
search_strings[0] = "ARMv7" ;
search_items = 1;
ASSERT(search_items <= max_items);
search_string = "ARMv7";
break;
default:
UNREACHABLE();
}
for (int i = 0; i < search_items; ++i) {
if (CPUInfoContainsString(search_strings[i])) {
if (CPUInfoContainsString(search_string)) {
return true;
}
if (feature == VFP3) {
// Some old kernels will report vfp not vfpv3. Here we make a last attempt
// to detect vfpv3 by checking for vfp *and* neon, since neon is only
// available on architectures with vfpv3.
// Checking neon on its own is not enough as it is possible to have neon
// without vfp.
if (CPUInfoContainsString("vfp") && CPUInfoContainsString("neon")) {
return true;
}
}

12
deps/v8/src/platform-openbsd.cc

@ -476,16 +476,6 @@ class OpenBSDMutex : public Mutex {
return result;
}
virtual bool TryLock() {
int result = pthread_mutex_trylock(&mutex_);
// Return false if the lock is busy and locking failed.
if (result == EBUSY) {
return false;
}
ASSERT(result == 0); // Verify no other errors.
return true;
}
private:
pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
};
@ -564,7 +554,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
// We always sample the VM state.
sample.state = Top::current_vm_state();
sample.state = VMState::current_state();
active_sampler_->Tick(&sample);
}

3
deps/v8/src/platform.h

@ -114,6 +114,8 @@ int signbit(double x);
#endif // __GNUC__
#include "atomicops.h"
#include "utils.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
@ -123,6 +125,7 @@ namespace internal {
typedef intptr_t AtomicWord;
class Semaphore;
class Mutex;
double ceiling(double x);
double modulo(double x, double y);

109
deps/v8/src/preparser-api.cc

@ -39,39 +39,121 @@ namespace v8 {
namespace internal {
// UTF16Buffer based on a v8::UnicodeInputStream.
class InputStreamUTF16Buffer : public UTF16Buffer {
class InputStreamUTF16Buffer : public UC16CharacterStream {
public:
explicit InputStreamUTF16Buffer(UnicodeInputStream* stream)
: UTF16Buffer(),
stream_(stream) { }
/* The InputStreamUTF16Buffer maintains an internal buffer
* that is filled in chunks from the UC16CharacterStream.
* It also maintains unlimited pushback capability, but optimized
* for small pushbacks.
* The pushback_buffer_ pointer points to the limit of pushbacks
* in the current buffer. There is room for a few pushback'ed chars before
* the buffer containing the most recently read chunk. If this is overflowed,
* an external buffer is allocated/reused to hold further pushbacks, and
* pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
* new buffer. When this buffer is read to the end again, the cursor is
* switched back to the internal buffer
*/
explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
: UC16CharacterStream(),
stream_(stream),
pushback_buffer_(buffer_),
pushback_buffer_end_cache_(NULL),
pushback_buffer_backing_(NULL),
pushback_buffer_backing_size_(0) {
buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
}
virtual ~InputStreamUTF16Buffer() { }
virtual ~InputStreamUTF16Buffer() {
if (pushback_buffer_backing_ != NULL) {
DeleteArray(pushback_buffer_backing_);
}
}
virtual void PushBack(uc32 ch) {
stream_->PushBack(ch);
virtual void PushBack(uc16 ch) {
ASSERT(pos_ > 0);
if (buffer_cursor_ <= pushback_buffer_) {
// No more room in the current buffer to do pushbacks.
if (pushback_buffer_end_cache_ == NULL) {
// We have overflowed the pushback space at the beginning of buffer_.
// Switch to using a separate allocated pushback buffer.
if (pushback_buffer_backing_ == NULL) {
// Allocate a buffer the first time we need it.
pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
pushback_buffer_backing_size_ = kPushBackSize;
}
pushback_buffer_ = pushback_buffer_backing_;
pushback_buffer_end_cache_ = buffer_end_;
buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
buffer_cursor_ = buffer_end_ - 1;
} else {
// Hit the bottom of the allocated pushback buffer.
// Double the buffer and continue.
uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
memcpy(new_buffer + pushback_buffer_backing_size_,
pushback_buffer_backing_,
pushback_buffer_backing_size_);
DeleteArray(pushback_buffer_backing_);
buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
}
}
pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = ch;
pos_--;
}
virtual uc32 Advance() {
uc32 result = stream_->Next();
if (result >= 0) pos_++;
return result;
protected:
virtual bool ReadBlock() {
if (pushback_buffer_end_cache_ != NULL) {
buffer_cursor_ = buffer_;
buffer_end_ = pushback_buffer_end_cache_;
pushback_buffer_end_cache_ = NULL;
return buffer_end_ > buffer_cursor_;
}
// Copy the top of the buffer into the pushback area.
int32_t value;
uc16* buffer_start = buffer_ + kPushBackSize;
buffer_cursor_ = buffer_end_ = buffer_start;
while ((value = stream_->Next()) >= 0) {
if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
value = unibrow::Utf8::kBadChar;
}
// buffer_end_ is a const pointer, but buffer_ is writable.
buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
}
return buffer_end_ > buffer_start;
}
virtual void SeekForward(int pos) {
virtual unsigned SlowSeekForward(unsigned pos) {
// Seeking in the input is not used by preparsing.
// It's only used by the real parser based on preparser data.
UNIMPLEMENTED();
return 0;
}
private:
static const unsigned kBufferSize = 512;
static const unsigned kPushBackSize = 16;
v8::UnicodeInputStream* const stream_;
// Buffer holding first kPushBackSize characters of pushback buffer,
// then kBufferSize chars of read-ahead.
// The pushback buffer is only used if pushing back characters past
// the start of a block.
uc16 buffer_[kPushBackSize + kBufferSize];
// Limit of pushbacks before new allocation is necessary.
uc16* pushback_buffer_;
// Only if that pushback buffer at the start of buffer_ isn't sufficient
// is the following used.
const uc16* pushback_buffer_end_cache_;
uc16* pushback_buffer_backing_;
unsigned pushback_buffer_backing_size_;
};
class StandAloneJavaScriptScanner : public JavaScriptScanner {
public:
void Initialize(UTF16Buffer* source) {
void Initialize(UC16CharacterStream* source) {
source_ = source;
literal_flags_ = kLiteralString | kLiteralIdentifier;
Init();
@ -92,7 +174,6 @@ void FatalProcessOutOfMemory(const char* reason) {
bool EnableSlowAsserts() { return true; }
} // namespace internal.

1
deps/v8/src/preparser.cc

@ -1078,6 +1078,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
Expect(i::Token::RBRACE, CHECK_OK);
// Position right after terminal '}'.
int end_pos = scanner_->location().end_pos;
log_->LogFunction(function_block_pos, end_pos,
function_scope.materialized_literal_count(),

14
deps/v8/src/profile-generator-inl.h

@ -122,7 +122,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
inline uint64_t HeapEntry::id() {
uint64_t HeapEntry::id() {
union {
Id stored_id;
uint64_t returned_id;
@ -146,6 +146,18 @@ void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
}
}
bool HeapSnapshotGenerator::ReportProgress(bool force) {
const int kProgressReportGranularity = 10000;
if (control_ != NULL
&& (force || progress_counter_ % kProgressReportGranularity == 0)) {
return
control_->ReportProgressValue(progress_counter_, progress_total_) ==
v8::ActivityControl::kContinue;
}
return true;
}
} } // namespace v8::internal
#endif // ENABLE_LOGGING_AND_PROFILING

359
deps/v8/src/profile-generator.cc

@ -1382,86 +1382,6 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
}
void HeapSnapshot::FillReversePostorderIndexes(Vector<HeapEntry*>* entries) {
ClearPaint();
int current_entry = 0;
List<HeapEntry*> nodes_to_visit;
nodes_to_visit.Add(root());
root()->paint_reachable();
while (!nodes_to_visit.is_empty()) {
HeapEntry* entry = nodes_to_visit.last();
Vector<HeapGraphEdge> children = entry->children();
bool has_new_edges = false;
for (int i = 0; i < children.length(); ++i) {
if (children[i].type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* child = children[i].to();
if (!child->painted_reachable()) {
nodes_to_visit.Add(child);
child->paint_reachable();
has_new_edges = true;
}
}
if (!has_new_edges) {
entry->set_ordered_index(current_entry);
(*entries)[current_entry++] = entry;
nodes_to_visit.RemoveLast();
}
}
entries->Truncate(current_entry);
}
static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
int finger1 = i1, finger2 = i2;
while (finger1 != finger2) {
while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
}
return finger1;
}
// The algorithm is based on the article:
// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
// Softw. Pract. Exper. 4 (2001), pp. 1–10.
void HeapSnapshot::BuildDominatorTree(const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators) {
if (entries.length() == 0) return;
const int root_index = entries.length() - 1;
for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
(*dominators)[root_index] = entries[root_index];
bool changed = true;
while (changed) {
changed = false;
for (int i = root_index - 1; i >= 0; --i) {
HeapEntry* new_idom = NULL;
Vector<HeapGraphEdge*> rets = entries[i]->retainers();
int j = 0;
for (; j < rets.length(); ++j) {
if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* ret = rets[j]->From();
if (dominators->at(ret->ordered_index()) != NULL) {
new_idom = ret;
break;
}
}
for (++j; j < rets.length(); ++j) {
if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* ret = rets[j]->From();
if (dominators->at(ret->ordered_index()) != NULL) {
new_idom = entries[Intersect(ret->ordered_index(),
new_idom->ordered_index(),
*dominators)];
}
}
if (new_idom != NULL && dominators->at(i) != new_idom) {
(*dominators)[i] = new_idom;
changed = true;
}
}
}
}
void HeapSnapshot::SetDominatorsToSelf() {
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry* entry = entries_[i];
@ -1470,61 +1390,6 @@ void HeapSnapshot::SetDominatorsToSelf() {
}
void HeapSnapshot::SetEntriesDominators() {
// This array is used for maintaining reverse postorder of nodes.
ScopedVector<HeapEntry*> ordered_entries(entries_.length());
FillReversePostorderIndexes(&ordered_entries);
ScopedVector<HeapEntry*> dominators(ordered_entries.length());
BuildDominatorTree(ordered_entries, &dominators);
for (int i = 0; i < ordered_entries.length(); ++i) {
ASSERT(dominators[i] != NULL);
ordered_entries[i]->set_dominator(dominators[i]);
}
// For nodes unreachable from root, set dominator to itself.
SetDominatorsToSelf();
}
void HeapSnapshot::ApproximateRetainedSizes() {
SetEntriesDominators();
// As for the dominators tree we only know parent nodes, not
// children, to sum up total sizes we traverse the tree level by
// level upwards, starting from leaves.
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry* entry = entries_[i];
entry->set_retained_size(entry->self_size());
entry->set_leaf();
}
while (true) {
bool onlyLeaves = true;
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry *entry = entries_[i], *dominator = entry->dominator();
if (!entry->is_processed() && dominator != entry) {
dominator->set_non_leaf();
onlyLeaves = false;
}
}
if (onlyLeaves) break;
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry *entry = entries_[i], *dominator = entry->dominator();
if (entry->is_leaf() && dominator != entry) {
dominator->add_retained_size(entry->retained_size());
}
}
// Mark all current leaves as processed, reset non-leaves back to leaves.
for (int i = 0; i < entries_.length(); ++i) {
HeapEntry* entry = entries_[i];
if (entry->is_leaf())
entry->set_processed();
else if (entry->is_non_leaf())
entry->set_leaf();
}
}
}
HeapEntry* HeapSnapshot::GetNextEntryToInit() {
if (entries_.length() > 0) {
HeapEntry* last_entry = entries_.last();
@ -1716,7 +1581,14 @@ HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
const char* name,
unsigned uid) {
is_tracking_objects_ = true; // Start watching for heap objects moves.
HeapSnapshot* snapshot = new HeapSnapshot(this, type, name, uid);
return new HeapSnapshot(this, type, name, uid);
}
void HeapSnapshotsCollection::SnapshotGenerationFinished(
HeapSnapshot* snapshot) {
ids_.SnapshotGenerationFinished();
if (snapshot != NULL) {
snapshots_.Add(snapshot);
HashMap::Entry* entry =
snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
@ -1724,7 +1596,7 @@ HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
true);
ASSERT(entry->value == NULL);
entry->value = snapshot;
return snapshot;
}
}
@ -1832,8 +1704,10 @@ void HeapObjectsSet::Insert(Object* obj) {
}
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control)
: snapshot_(snapshot),
control_(control),
collection_(snapshot->collection()),
filler_(NULL) {
}
@ -1990,21 +1864,13 @@ class RootsReferencesExtractor : public ObjectVisitor {
};
void HeapSnapshotGenerator::GenerateSnapshot() {
bool HeapSnapshotGenerator::GenerateSnapshot() {
AssertNoAllocation no_alloc;
SetProgressTotal(4); // 2 passes + dominators + sizes.
// Pass 1. Iterate heap contents to count entries and references.
SnapshotCounter counter(&entries_);
filler_ = &counter;
filler_->AddEntry(HeapSnapshot::kInternalRootObject);
filler_->AddEntry(HeapSnapshot::kGcRootsObject);
HeapIterator iterator(HeapIterator::kPreciseFiltering);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
ExtractReferences(obj);
}
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
if (!CountEntriesAndReferences()) return false;
// Allocate and fill entries in the snapshot, allocate references.
snapshot_->AllocateEntries(entries_.entries_count(),
@ -2014,16 +1880,14 @@ void HeapSnapshotGenerator::GenerateSnapshot() {
entries_.UpdateEntries(&allocator);
// Pass 2. Fill references.
SnapshotFiller filler(snapshot_, &entries_);
filler_ = &filler;
iterator.reset();
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
ExtractReferences(obj);
}
SetRootGcRootsReference();
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
if (!FillReferences()) return false;
snapshot_->ApproximateRetainedSizes();
if (!SetEntriesDominators()) return false;
if (!ApproximateRetainedSizes()) return false;
progress_counter_ = progress_total_;
if (!ReportProgress(true)) return false;
return true;
}
@ -2351,6 +2215,183 @@ void HeapSnapshotGenerator::SetGcRootsReference(Object* child_obj) {
}
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
if (control_ == NULL) return;
HeapIterator iterator(HeapIterator::kPreciseFiltering);
int objects_count = 0;
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next(), ++objects_count) {}
progress_total_ = objects_count * iterations_count;
progress_counter_ = 0;
}
bool HeapSnapshotGenerator::CountEntriesAndReferences() {
SnapshotCounter counter(&entries_);
filler_ = &counter;
filler_->AddEntry(HeapSnapshot::kInternalRootObject);
filler_->AddEntry(HeapSnapshot::kGcRootsObject);
return IterateAndExtractReferences();
}
bool HeapSnapshotGenerator::FillReferences() {
SnapshotFiller filler(snapshot_, &entries_);
filler_ = &filler;
return IterateAndExtractReferences();
}
void HeapSnapshotGenerator::FillReversePostorderIndexes(
Vector<HeapEntry*>* entries) {
snapshot_->ClearPaint();
int current_entry = 0;
List<HeapEntry*> nodes_to_visit;
nodes_to_visit.Add(snapshot_->root());
snapshot_->root()->paint_reachable();
while (!nodes_to_visit.is_empty()) {
HeapEntry* entry = nodes_to_visit.last();
Vector<HeapGraphEdge> children = entry->children();
bool has_new_edges = false;
for (int i = 0; i < children.length(); ++i) {
if (children[i].type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* child = children[i].to();
if (!child->painted_reachable()) {
nodes_to_visit.Add(child);
child->paint_reachable();
has_new_edges = true;
}
}
if (!has_new_edges) {
entry->set_ordered_index(current_entry);
(*entries)[current_entry++] = entry;
nodes_to_visit.RemoveLast();
}
}
entries->Truncate(current_entry);
}
static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
int finger1 = i1, finger2 = i2;
while (finger1 != finger2) {
while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
}
return finger1;
}
// The algorithm is based on the article:
// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
// Softw. Pract. Exper. 4 (2001), pp. 1–10.
bool HeapSnapshotGenerator::BuildDominatorTree(
const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators) {
if (entries.length() == 0) return true;
const int entries_length = entries.length(), root_index = entries_length - 1;
for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
(*dominators)[root_index] = entries[root_index];
int changed = 1;
const int base_progress_counter = progress_counter_;
while (changed != 0) {
changed = 0;
for (int i = root_index - 1; i >= 0; --i) {
HeapEntry* new_idom = NULL;
Vector<HeapGraphEdge*> rets = entries[i]->retainers();
int j = 0;
for (; j < rets.length(); ++j) {
if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* ret = rets[j]->From();
if (dominators->at(ret->ordered_index()) != NULL) {
new_idom = ret;
break;
}
}
for (++j; j < rets.length(); ++j) {
if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
HeapEntry* ret = rets[j]->From();
if (dominators->at(ret->ordered_index()) != NULL) {
new_idom = entries[Intersect(ret->ordered_index(),
new_idom->ordered_index(),
*dominators)];
}
}
if (new_idom != NULL && dominators->at(i) != new_idom) {
(*dominators)[i] = new_idom;
++changed;
}
}
int remaining = entries_length - changed;
if (remaining < 0) remaining = 0;
progress_counter_ = base_progress_counter + remaining;
if (!ReportProgress(true)) return false;
}
return true;
}
bool HeapSnapshotGenerator::SetEntriesDominators() {
// This array is used for maintaining reverse postorder of nodes.
ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
FillReversePostorderIndexes(&ordered_entries);
ScopedVector<HeapEntry*> dominators(ordered_entries.length());
if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
for (int i = 0; i < ordered_entries.length(); ++i) {
ASSERT(dominators[i] != NULL);
ordered_entries[i]->set_dominator(dominators[i]);
}
// For nodes unreachable from root, set dominator to itself.
snapshot_->SetDominatorsToSelf();
return true;
}
bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
// As for the dominators tree we only know parent nodes, not
// children, to sum up total sizes we "bubble" node's self size
// adding it to all of its parents.
for (int i = 0; i < snapshot_->entries()->length(); ++i) {
HeapEntry* entry = snapshot_->entries()->at(i);
entry->set_retained_size(entry->self_size());
}
for (int i = 0;
i < snapshot_->entries()->length();
++i, IncProgressCounter()) {
HeapEntry* entry = snapshot_->entries()->at(i);
int entry_size = entry->self_size();
for (HeapEntry* dominator = entry->dominator();
dominator != entry;
entry = dominator, dominator = entry->dominator()) {
dominator->add_retained_size(entry_size);
}
if (!ReportProgress()) return false;
}
return true;
}
bool HeapSnapshotGenerator::IterateAndExtractReferences() {
HeapIterator iterator(HeapIterator::kPreciseFiltering);
bool interrupted = false;
// Heap iteration with precise filtering must be finished in any case.
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next(), IncProgressCounter()) {
if (!interrupted) {
ExtractReferences(obj);
if (!ReportProgress()) interrupted = true;
}
}
if (interrupted) return false;
SetRootGcRootsReference();
RootsReferencesExtractor extractor(this);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
return ReportProgress();
}
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
raw_additions_root_ =
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));

41
deps/v8/src/profile-generator.h

@ -526,7 +526,7 @@ class HeapEntry BASE_EMBEDDED {
HeapSnapshot* snapshot() { return snapshot_; }
Type type() { return static_cast<Type>(type_); }
const char* name() { return name_; }
uint64_t id();
inline uint64_t id();
int self_size() { return self_size_; }
int retained_size() { return retained_size_; }
void add_retained_size(int size) { retained_size_ += size; }
@ -558,13 +558,6 @@ class HeapEntry BASE_EMBEDDED {
void ApplyAndPaintAllReachable(Visitor* visitor);
void PaintAllReachable();
bool is_leaf() { return painted_ == kLeaf; }
void set_leaf() { painted_ = kLeaf; }
bool is_non_leaf() { return painted_ == kNonLeaf; }
void set_non_leaf() { painted_ = kNonLeaf; }
bool is_processed() { return painted_ == kProcessed; }
void set_processed() { painted_ = kProcessed; }
void SetIndexedReference(HeapGraphEdge::Type type,
int child_index,
int index,
@ -625,10 +618,6 @@ class HeapEntry BASE_EMBEDDED {
static const unsigned kUnpainted = 0;
static const unsigned kPainted = 1;
static const unsigned kPaintedReachableFromOthers = 2;
// Paints used for approximate retained sizes calculation.
static const unsigned kLeaf = 0;
static const unsigned kNonLeaf = 1;
static const unsigned kProcessed = 2;
static const int kExactRetainedSizeTag = 1;
@ -682,6 +671,7 @@ class HeapSnapshot {
unsigned uid() { return uid_; }
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
List<HeapEntry*>* entries() { return &entries_; }
void AllocateEntries(
int entries_count, int children_count, int retainers_count);
@ -693,7 +683,6 @@ class HeapSnapshot {
int size,
int children_count,
int retainers_count);
void ApproximateRetainedSizes();
void ClearPaint();
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
HeapEntry* GetEntryById(uint64_t id);
@ -716,10 +705,6 @@ class HeapSnapshot {
int children_count,
int retainers_count);
HeapEntry* GetNextEntryToInit();
void BuildDominatorTree(const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators);
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
void SetEntriesDominators();
HeapSnapshotsCollection* collection_;
Type type_;
@ -845,7 +830,7 @@ class HeapSnapshotsCollection {
HeapSnapshot* NewSnapshot(
HeapSnapshot::Type type, const char* name, unsigned uid);
void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
void SnapshotGenerationFinished(HeapSnapshot* snapshot);
List<HeapSnapshot*>* snapshots() { return &snapshots_; }
HeapSnapshot* GetSnapshot(unsigned uid);
@ -968,16 +953,27 @@ class HeapSnapshotGenerator {
HeapEntry* child_entry) = 0;
};
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
void GenerateSnapshot();
HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control);
bool GenerateSnapshot();
private:
bool ApproximateRetainedSizes();
bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
Vector<HeapEntry*>* dominators);
bool CountEntriesAndReferences();
HeapEntry* GetEntry(Object* obj);
void IncProgressCounter() { ++progress_counter_; }
void ExtractReferences(HeapObject* obj);
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
bool FillReferences();
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
bool IterateAndExtractReferences();
inline bool ReportProgress(bool force = false);
bool SetEntriesDominators();
void SetClosureReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@ -1009,8 +1005,10 @@ class HeapSnapshotGenerator {
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
void SetGcRootsReference(Object* child);
void SetProgressTotal(int iterations_count);
HeapSnapshot* snapshot_;
v8::ActivityControl* control_;
HeapSnapshotsCollection* collection_;
// Mapping from HeapObject* pointers to HeapEntry* pointers.
HeapEntriesMap entries_;
@ -1018,6 +1016,9 @@ class HeapSnapshotGenerator {
// Used during references extraction to mark heap objects that
// are references via non-hidden properties.
HeapObjectsSet known_references_;
// Used during snapshot generation.
int progress_counter_;
int progress_total_;
friend class IndexedReferencesExtractor;
friend class RootsReferencesExtractor;

62
deps/v8/src/regexp.js

@ -120,24 +120,30 @@ function DoRegExpExec(regexp, string, index) {
function BuildResultFromMatchInfo(lastMatchInfo, s) {
var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
if (numResults === 1) {
var matchStart = lastMatchInfo[CAPTURE(0)];
var matchEnd = lastMatchInfo[CAPTURE(1)];
result[0] = SubString(s, matchStart, matchEnd);
var start = lastMatchInfo[CAPTURE0];
var end = lastMatchInfo[CAPTURE1];
var result = %_RegExpConstructResult(numResults, start, s);
if (start + 1 == end) {
result[0] = %_StringCharAt(s, start);
} else {
for (var i = 0; i < numResults; i++) {
var matchStart = lastMatchInfo[CAPTURE(i << 1)];
var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
if (matchStart != -1 && matchEnd != -1) {
result[i] = SubString(s, matchStart, matchEnd);
result[0] = %_SubString(s, start, end);
}
var j = REGEXP_FIRST_CAPTURE + 2;
for (var i = 1; i < numResults; i++) {
start = lastMatchInfo[j++];
end = lastMatchInfo[j++];
if (end != -1) {
if (start + 1 == end) {
result[i] = %_StringCharAt(s, start);
} else {
result[i] = %_SubString(s, start, end);
}
} else {
// Make sure the element is present. Avoid reading the undefined
// property from the global object since this may change.
result[i] = void 0;
}
}
}
return result;
}
@ -166,12 +172,7 @@ function RegExpExec(string) {
}
string = regExpInput;
}
var s;
if (IS_STRING(string)) {
s = string;
} else {
s = ToString(string);
}
string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
// Conversion is required by the ES5 specification (RegExp.prototype.exec
@ -180,7 +181,7 @@ function RegExpExec(string) {
var global = this.global;
if (global) {
if (i < 0 || i > s.length) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return null;
}
@ -188,9 +189,9 @@ function RegExpExec(string) {
i = 0;
}
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
if (global) this.lastIndex = 0;
@ -202,7 +203,7 @@ function RegExpExec(string) {
if (global) {
this.lastIndex = lastMatchInfo[CAPTURE1];
}
return BuildResultFromMatchInfo(matchIndices, s);
return BuildResultFromMatchInfo(matchIndices, string);
}
@ -227,12 +228,7 @@ function RegExpTest(string) {
string = regExpInput;
}
var s;
if (IS_STRING(string)) {
s = string;
} else {
s = ToString(string);
}
string = TO_STRING_INLINE(string);
var lastIndex = this.lastIndex;
@ -241,13 +237,13 @@ function RegExpTest(string) {
var i = TO_INTEGER(lastIndex);
if (this.global) {
if (i < 0 || i > s.length) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return false;
}
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
this.lastIndex = 0;
return false;
@ -269,11 +265,11 @@ function RegExpTest(string) {
(this.ignoreCase ? 'i' : '')
+ (this.multiline ? 'm' : ''));
}
if (!regexp_val.test(s)) return false;
if (!regexp_val.test(string)) return false;
}
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(this, s, 0, lastMatchInfo);
var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
if (matchIndices === null) return false;
lastMatchInfoOverride = null;
return true;

103
deps/v8/src/runtime-profiler.cc

@ -68,12 +68,18 @@ class PendingListNode : public Malloced {
};
enum SamplerState {
IN_NON_JS_STATE = 0,
IN_JS_STATE = 1
};
// Optimization sampler constants.
static const int kSamplerFrameCount = 2;
static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
static const int kSamplerWindowSize = 16;
static const int kSamplerTicksDelta = 32;
static const int kSamplerTicksBetweenThresholdAdjustment = 32;
static const int kSamplerThresholdInit = 3;
static const int kSamplerThresholdMin = 1;
@ -88,6 +94,11 @@ static const int kSizeLimit = 1500;
static int sampler_threshold = kSamplerThresholdInit;
static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
static int sampler_ticks_until_threshold_adjustment =
kSamplerTicksBetweenThresholdAdjustment;
// The ratio of ticks spent in JS code in percent.
static Atomic32 js_ratio;
// The JSFunctions in the sampler window are not GC safe. Old-space
// pointers are not cleared during mark-sweep collection and therefore
@ -261,40 +272,71 @@ void RuntimeProfiler::OptimizeNow() {
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
JSFunction* samples[kSamplerFrameCount];
int count = 0;
int sample_count = 0;
int frame_count = 0;
for (JavaScriptFrameIterator it;
count < kSamplerFrameCount && !it.done();
frame_count++ < kSamplerFrameCount && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
JSFunction* function = JSFunction::cast(frame->function());
int function_size = function->shared()->SourceSize();
int threshold_size_factor;
if (function_size > kSizeLimit) {
threshold_size_factor = sampler_threshold_size_factor;
} else {
threshold_size_factor = 1;
// Adjust threshold each time we have processed
// a certain number of ticks.
if (sampler_ticks_until_threshold_adjustment > 0) {
sampler_ticks_until_threshold_adjustment--;
if (sampler_ticks_until_threshold_adjustment <= 0) {
// If the threshold is not already at the minimum
// modify and reset the ticks until next adjustment.
if (sampler_threshold > kSamplerThresholdMin) {
sampler_threshold -= kSamplerThresholdDelta;
sampler_ticks_until_threshold_adjustment =
kSamplerTicksBetweenThresholdAdjustment;
}
}
}
int threshold = sampler_threshold * threshold_size_factor;
samples[count++] = function;
if (function->IsMarkedForLazyRecompilation()) {
Code* unoptimized = function->shared()->code();
int nesting = unoptimized->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
} else if (LookupSample(function) >= threshold) {
if (IsOptimizable(function)) {
}
// Do not record non-optimizable functions.
if (!IsOptimizable(function)) continue;
samples[sample_count++] = function;
int function_size = function->shared()->SourceSize();
int threshold_size_factor = (function_size > kSizeLimit)
? sampler_threshold_size_factor
: 1;
int threshold = sampler_threshold * threshold_size_factor;
int current_js_ratio = NoBarrier_Load(&js_ratio);
// Adjust threshold depending on the ratio of time spent
// in JS code.
if (current_js_ratio < 20) {
// If we spend less than 20% of the time in JS code,
// do not optimize.
continue;
} else if (current_js_ratio < 75) {
// Below 75% of time spent in JS code, only optimize very
// frequently used functions.
threshold *= 3;
}
if (LookupSample(function) >= threshold) {
Optimize(function, false, 0);
CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
}
}
}
// Add the collected functions as samples. It's important not to do
// this as part of collecting them because this will interfere with
// the sample lookup in case of recursive functions.
for (int i = 0; i < count; i++) {
for (int i = 0; i < sample_count; i++) {
AddSample(samples[i], kSamplerFrameWeight[i]);
}
}
@ -308,8 +350,35 @@ void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static void UpdateStateRatio(SamplerState current_state) {
static const int kStateWindowSize = 128;
static SamplerState state_window[kStateWindowSize];
static int state_window_position = 0;
static int state_counts[2] = { kStateWindowSize, 0 };
SamplerState old_state = state_window[state_window_position];
state_counts[old_state]--;
state_window[state_window_position] = current_state;
state_counts[current_state]++;
ASSERT(IsPowerOf2(kStateWindowSize));
state_window_position = (state_window_position + 1) &
(kStateWindowSize - 1);
NoBarrier_Store(&js_ratio, state_counts[IN_JS_STATE] * 100 /
kStateWindowSize);
}
#endif
void RuntimeProfiler::NotifyTick() {
#ifdef ENABLE_LOGGING_AND_PROFILING
// Record state sample.
SamplerState state = Top::IsInJSState()
? IN_JS_STATE
: IN_NON_JS_STATE;
UpdateStateRatio(state);
StackGuard::RequestRuntimeProfilerTick();
#endif
}
@ -341,6 +410,8 @@ void RuntimeProfiler::Setup() {
void RuntimeProfiler::Reset() {
sampler_threshold = kSamplerThresholdInit;
sampler_ticks_until_threshold_adjustment =
kSamplerTicksBetweenThresholdAdjustment;
sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
}
@ -361,6 +432,7 @@ int RuntimeProfiler::SamplerWindowSize() {
bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
#ifdef ENABLE_LOGGING_AND_PROFILING
static const int kNonJSTicksThreshold = 100;
// We suspend the runtime profiler thread when not running
// JavaScript. If the CPU profiler is active we must not do this
@ -378,6 +450,7 @@ bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
}
}
}
#endif
return false;
}

127
deps/v8/src/runtime.cc

@ -614,22 +614,6 @@ static MaybeObject* Runtime_SetHiddenPrototype(Arguments args) {
}
// Sets the magic number that identifies a function as one of the special
// math functions that can be inlined.
static MaybeObject* Runtime_SetMathFunctionId(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, function, args[0]);
CONVERT_CHECKED(Smi, id, args[1]);
RUNTIME_ASSERT(id->value() >= 0);
RUNTIME_ASSERT(id->value() < SharedFunctionInfo::max_math_id_number());
function->shared()->set_math_function_id(id->value());
return Heap::undefined_value();
}
static MaybeObject* Runtime_IsConstructCall(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
@ -3516,7 +3500,8 @@ static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(Arguments args) {
CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_CHECKED(String, name, args[1]);
CONVERT_CHECKED(Smi, flag_setter, args[2]);
CONVERT_CHECKED(JSFunction, fun, args[3]);
Object* fun = args[3];
RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
CONVERT_CHECKED(Smi, flag_attr, args[4]);
int unchecked = flag_attr->value();
RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@ -3572,7 +3557,7 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
}
LookupResult result;
js_object->LocalLookupRealNamedProperty(*name, &result);
js_object->LookupRealNamedProperty(*name, &result);
// Take special care when attributes are different and there is already
// a property. For simplicity we normalize the property which enables us
@ -3580,7 +3565,8 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
// map. The current version of SetObjectProperty does not handle attributes
// correctly in the case where a property is a field and is reset with
// new attributes.
if (result.IsProperty() && attr != result.GetAttributes()) {
if (result.IsProperty() &&
(attr != result.GetAttributes() || result.type() == CALLBACKS)) {
// New attributes - normalize to avoid writing to instance descriptor
NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
// Use IgnoreAttributes version since a readonly property may be
@ -4689,6 +4675,13 @@ static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
if (!new_alloc->ToObject(&new_object)) {
return new_alloc;
}
if (!Heap::new_space()->Contains(new_object)) {
// Even if our string is small enough to fit in new space we still have to
// handle it being allocated in old space as may happen in the third
// attempt. See CALL_AND_RETRY in heap-inl.h and similar code in
// CEntryStub::GenerateCore.
return SlowQuoteJsonString<Char, StringType>(characters);
}
StringType* new_string = StringType::cast(new_object);
ASSERT(Heap::new_space()->Contains(new_string));
@ -4723,9 +4716,9 @@ static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
}
*(write_cursor++) = '"';
int final_length =
int final_length = static_cast<int>(
write_cursor - reinterpret_cast<Char*>(
new_string->address() + SeqAsciiString::kHeaderSize);
new_string->address() + SeqAsciiString::kHeaderSize));
Heap::new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
final_length);
return new_string;
@ -6022,37 +6015,6 @@ static MaybeObject* Runtime_Math_log(Arguments args) {
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
// S. Warren, Jr., figure 11-6, page 213.
static double powi(double x, int y) {
ASSERT(y != kMinInt);
unsigned n = (y < 0) ? -y : y;
double m = x;
double p = 1;
while (true) {
if ((n & 1) != 0) p *= m;
n >>= 1;
if (n == 0) {
if (y < 0) {
// Unfortunately, we have to be careful when p has reached
// infinity in the computation, because sometimes the higher
// internal precision in the pow() implementation would have
// given us a finite p. This happens very rarely.
double result = 1.0 / p;
return (result == 0 && isinf(p))
? pow(x, static_cast<double>(y)) // Avoid pow(double, int).
: result;
} else {
return p;
}
}
m *= m;
}
}
static MaybeObject* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@ -6064,31 +6026,11 @@ static MaybeObject* Runtime_Math_pow(Arguments args) {
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = Smi::cast(args[1])->value();
return Heap::NumberFromDouble(powi(x, y));
return Heap::NumberFromDouble(power_double_int(x, y));
}
CONVERT_DOUBLE_CHECKED(y, args[1]);
if (!isinf(x)) {
if (y == 0.5) {
// It's not uncommon to use Math.pow(x, 0.5) to compute the
// square root of a number. To speed up such computations, we
// explictly check for this case and use the sqrt() function
// which is faster than pow().
return Heap::AllocateHeapNumber(sqrt(x));
} else if (y == -0.5) {
// Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
return Heap::AllocateHeapNumber(1.0 / sqrt(x));
}
}
if (y == 0) {
return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return Heap::nan_value();
} else {
return Heap::AllocateHeapNumber(pow(x, y));
}
return Heap::AllocateHeapNumber(power_double_double(x, y));
}
// Fast version of Math.pow if we know that y is not an integer and
@ -6936,12 +6878,17 @@ static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
if (CompileOptimized(function, ast_id) && function->IsOptimized()) {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
function->code()->deoptimization_data());
if (data->OsrPcOffset()->value() >= 0) {
if (FLAG_trace_osr) {
PrintF("[on-stack replacement offset %d in optimized code]\n",
data->OsrPcOffset()->value());
}
ASSERT(data->OsrAstId()->value() == ast_id);
ASSERT(data->OsrPcOffset()->value() >= 0);
} else {
// We may never generate the desired OSR entry if we emit an
// early deoptimize.
succeeded = false;
}
} else {
succeeded = false;
}
@ -7716,13 +7663,13 @@ static MaybeObject* Runtime_AllocateInNewSpace(Arguments args) {
}
// Push an array unto an array of arrays if it is not already in the
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
ASSERT(args.length() == 2);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(JSArray, element, args[1]);
CONVERT_CHECKED(JSObject, element, args[1]);
RUNTIME_ASSERT(array->HasFastElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
@ -9733,7 +9680,7 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 4);
ASSERT(args.length() == 5);
Object* check_result;
{ MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
if (!maybe_check_result->ToObject(&check_result)) {
@ -9743,6 +9690,7 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
CONVERT_ARG_CHECKED(String, source, 2);
CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
Handle<Object> additional_context(args[4]);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@ -9793,6 +9741,11 @@ static MaybeObject* Runtime_DebugEvaluate(Arguments args) {
Handle<Context> function_context(frame_context->fcontext());
context = CopyWithContextChain(frame_context, context);
if (additional_context->IsJSObject()) {
context = Factory::NewWithContext(context,
Handle<JSObject>::cast(additional_context), false);
}
// Wrap the evaluation statement in a new function compiled in the newly
// created context. The function has one parameter which has to be called
// 'arguments'. This it to have access to what would have been 'arguments' in
@ -9847,7 +9800,7 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
// Check the execution state and decode arguments frame and source to be
// evaluated.
ASSERT(args.length() == 3);
ASSERT(args.length() == 4);
Object* check_result;
{ MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
if (!maybe_check_result->ToObject(&check_result)) {
@ -9856,6 +9809,7 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
}
CONVERT_ARG_CHECKED(String, source, 1);
CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
Handle<Object> additional_context(args[3]);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@ -9874,11 +9828,24 @@ static MaybeObject* Runtime_DebugEvaluateGlobal(Arguments args) {
// debugger was invoked.
Handle<Context> context = Top::global_context();
bool is_global = true;
if (additional_context->IsJSObject()) {
// Create a function context first, than put 'with' context on top of it.
Handle<JSFunction> go_between = Factory::NewFunction(
Factory::empty_string(), Factory::undefined_value());
go_between->set_context(*context);
context =
Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
context->set_extension(JSObject::cast(*additional_context));
is_global = false;
}
// Compile the source to be evaluated.
Handle<SharedFunctionInfo> shared =
Compiler::CompileEval(source,
context,
true);
is_global);
if (shared.is_null()) return Failure::Exception();
Handle<JSFunction> compiled_function =
Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,

5
deps/v8/src/runtime.h

@ -66,7 +66,6 @@ namespace internal {
\
F(IsInPrototypeChain, 2, 1) \
F(SetHiddenPrototype, 2, 1) \
F(SetMathFunctionId, 2, 1) \
\
F(IsConstructCall, 0, 1) \
\
@ -343,8 +342,8 @@ namespace internal {
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 3, 1) \
F(ClearStepping, 0, 1) \
F(DebugEvaluate, 4, 1) \
F(DebugEvaluateGlobal, 3, 1) \
F(DebugEvaluate, 5, 1) \
F(DebugEvaluateGlobal, 4, 1) \
F(DebugGetLoadedScripts, 0, 1) \
F(DebugReferencedBy, 3, 1) \
F(DebugConstructedBy, 2, 1) \

20
deps/v8/src/runtime.js

@ -594,13 +594,15 @@ function IsPrimitive(x) {
// ECMA-262, section 8.6.2.6, page 28.
function DefaultNumber(x) {
if (IS_FUNCTION(x.valueOf)) {
var v = x.valueOf();
var valueOf = x.valueOf;
if (IS_FUNCTION(valueOf)) {
var v = %_CallFunction(x, valueOf);
if (%IsPrimitive(v)) return v;
}
if (IS_FUNCTION(x.toString)) {
var s = x.toString();
var toString = x.toString;
if (IS_FUNCTION(toString)) {
var s = %_CallFunction(x, toString);
if (%IsPrimitive(s)) return s;
}
@ -610,13 +612,15 @@ function DefaultNumber(x) {
// ECMA-262, section 8.6.2.6, page 28.
function DefaultString(x) {
if (IS_FUNCTION(x.toString)) {
var s = x.toString();
var toString = x.toString;
if (IS_FUNCTION(toString)) {
var s = %_CallFunction(x, toString);
if (%IsPrimitive(s)) return s;
}
if (IS_FUNCTION(x.valueOf)) {
var v = x.valueOf();
var valueOf = x.valueOf;
if (IS_FUNCTION(valueOf)) {
var v = %_CallFunction(x, valueOf);
if (%IsPrimitive(v)) return v;
}

22
deps/v8/src/scanner-base.cc

@ -34,12 +34,6 @@
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// UTF16Buffer
UTF16Buffer::UTF16Buffer()
: pos_(0), end_(kNoEndPosition) { }
// ----------------------------------------------------------------------------
// LiteralCollector
@ -92,7 +86,7 @@ bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
// ----------------------------------------------------------------------------
// Scanner
Scanner::Scanner() : source_(NULL) {}
Scanner::Scanner() { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@ -142,8 +136,7 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
// ----------------------------------------------------------------------------
// JavaScriptScanner
JavaScriptScanner::JavaScriptScanner()
: has_line_terminator_before_next_(false) {}
JavaScriptScanner::JavaScriptScanner() : Scanner() {}
Token::Value JavaScriptScanner::Next() {
@ -503,12 +496,21 @@ void JavaScriptScanner::Scan() {
void JavaScriptScanner::SeekForward(int pos) {
source_->SeekForward(pos - 1);
// After this call, we will have the token at the given position as
// the "next" token. The "current" token will be invalid.
if (pos == next_.location.beg_pos) return;
int current_pos = source_pos();
ASSERT_EQ(next_.location.end_pos, current_pos);
// Positions inside the lookahead token aren't supported.
ASSERT(pos >= current_pos);
if (pos != current_pos) {
source_->SeekForward(pos - source_->pos());
Advance();
// This function is only called to seek to the location
// of the end of a function (at the "}" token). It doesn't matter
// whether there was a line terminator in the part we skip.
has_line_terminator_before_next_ = false;
}
Scan();
}

86
deps/v8/src/scanner-base.h

@ -52,31 +52,75 @@ inline int HexValue(uc32 c) {
return -1;
}
// ----------------------------------------------------------------------------
// UTF16Buffer - scanner input source with pushback.
class UTF16Buffer {
public:
UTF16Buffer();
virtual ~UTF16Buffer() {}
virtual void PushBack(uc32 ch) = 0;
// Returns a value < 0 when the buffer end is reached.
virtual uc32 Advance() = 0;
virtual void SeekForward(int pos) = 0;
int pos() const { return pos_; }
// ---------------------------------------------------------------------
// Buffered stream of characters, using an internal UC16 buffer.
static const int kNoEndPosition = 1;
class UC16CharacterStream {
public:
UC16CharacterStream() : pos_(0) { }
virtual ~UC16CharacterStream() { }
// Returns and advances past the next UC16 character in the input
// stream. If there are no more characters, it returns a negative
// value.
inline int32_t Advance() {
if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
pos_++;
return *(buffer_cursor_++);
}
// Note: currently the following increment is necessary to avoid a
// parser problem! The scanner treats the final kEndOfInput as
// a character with a position, and does math relative to that
// position.
pos_++;
return kEndOfInput;
}
// Return the current position in the character stream.
// Starts at zero.
inline unsigned pos() const { return pos_; }
// Skips forward past the next character_count UC16 characters
// in the input, or until the end of input if that comes sooner.
// Returns the number of characters actually skipped. If less
// than character_count,
inline unsigned SeekForward(unsigned character_count) {
unsigned buffered_chars =
static_cast<unsigned>(buffer_end_ - buffer_cursor_);
if (character_count <= buffered_chars) {
buffer_cursor_ += character_count;
pos_ += character_count;
return character_count;
}
return SlowSeekForward(character_count);
}
// Pushes back the most recently read UC16 character, i.e.,
// the value returned by the most recent call to Advance.
// Must not be used right after calling SeekForward.
virtual void PushBack(uc16 character) = 0;
protected:
// Initial value of end_ before the input stream is initialized.
int pos_; // Current position in the buffer.
int end_; // Position where scanning should stop (EOF).
static const int32_t kEndOfInput = -1;
// Ensures that the buffer_cursor_ points to the character at
// position pos_ of the input, if possible. If the position
// is at or after the end of the input, return false. If there
// are more characters available, return true.
virtual bool ReadBlock() = 0;
virtual unsigned SlowSeekForward(unsigned character_count) = 0;
const uc16* buffer_cursor_;
const uc16* buffer_end_;
unsigned pos_;
};
// ---------------------------------------------------------------------
// Constants used by scanners.
class ScannerConstants : AllStatic {
public:
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
@ -277,7 +321,7 @@ class Scanner {
// Low-level scanning support.
void Advance() { c0_ = source_->Advance(); }
void PushBack(uc32 ch) {
source_->PushBack(ch);
source_->PushBack(c0_);
c0_ = ch;
}
@ -307,8 +351,8 @@ class Scanner {
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
// Input stream. Must be initialized to an UTF16Buffer.
UTF16Buffer* source_;
// Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_;
// Buffer to hold literal values (identifiers, strings, numbers)
// using '\x00'-terminated UTF-8 encoding. Handles allocation internally.

365
deps/v8/src/scanner.cc

@ -36,63 +36,265 @@ namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// UTF16Buffer
// CharacterStreamUTF16Buffer
CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
: pushback_buffer_(0), last_(0), stream_(NULL) { }
// BufferedUC16CharacterStreams
BufferedUC16CharacterStream::BufferedUC16CharacterStream()
: UC16CharacterStream(),
pushback_limit_(NULL) {
// Initialize buffer as being empty. First read will fill the buffer.
buffer_cursor_ = buffer_;
buffer_end_ = buffer_;
}
BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
unibrow::CharacterStream* input,
int start_position,
int end_position) {
stream_ = input;
if (start_position > 0) {
SeekForward(start_position);
void BufferedUC16CharacterStream::PushBack(uc16 character) {
if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
// buffer_ is writable, buffer_cursor_ is const pointer.
buffer_[--buffer_cursor_ - buffer_] = character;
pos_--;
return;
}
end_ = end_position != kNoEndPosition ? end_position : kMaxInt;
SlowPushBack(character);
}
void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
pushback_buffer()->Add(last_);
last_ = ch;
void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
// In pushback mode, the end of the buffer contains pushback,
// and the start of the buffer (from buffer start to pushback_limit_)
// contains valid data that comes just after the pushback.
// We NULL the pushback_limit_ if pushing all the way back to the
// start of the buffer.
if (pushback_limit_ == NULL) {
// Enter pushback mode.
pushback_limit_ = buffer_end_;
buffer_end_ = buffer_ + kBufferSize;
buffer_cursor_ = buffer_end_;
}
ASSERT(pushback_limit_ > buffer_);
ASSERT(pos_ > 0);
buffer_[--buffer_cursor_ - buffer_] = character;
if (buffer_cursor_ == buffer_) {
pushback_limit_ = NULL;
} else if (buffer_cursor_ < pushback_limit_) {
pushback_limit_ = buffer_cursor_;
}
pos_--;
}
uc32 CharacterStreamUTF16Buffer::Advance() {
ASSERT(end_ != kNoEndPosition);
ASSERT(end_ >= 0);
// NOTE: It is of importance to Persian / Farsi resources that we do
// *not* strip format control characters in the scanner; see
//
// https://bugzilla.mozilla.org/show_bug.cgi?id=274152
//
// So, even though ECMA-262, section 7.1, page 11, dictates that we
// must remove Unicode format-control characters, we do not. This is
// in line with how IE and SpiderMonkey handles it.
if (!pushback_buffer()->is_empty()) {
pos_++;
return last_ = pushback_buffer()->RemoveLast();
} else if (stream_->has_more() && pos_ < end_) {
pos_++;
uc32 next = stream_->GetNext();
return last_ = next;
bool BufferedUC16CharacterStream::ReadBlock() {
if (pushback_limit_ != NULL) {
buffer_cursor_ = buffer_;
buffer_end_ = pushback_limit_;
pushback_limit_ = NULL;
ASSERT(buffer_cursor_ != buffer_end_);
return true;
}
unsigned length = FillBuffer(pos_, kBufferSize);
buffer_cursor_ = buffer_;
buffer_end_ = buffer_ + length;
return length > 0;
}
unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
// Leave pushback mode (i.e., ignore that there might be valid data
// in the buffer before the pushback_limit_ point).
pushback_limit_ = NULL;
return BufferSeekForward(delta);
}
// ----------------------------------------------------------------------------
// GenericStringUC16CharacterStream
GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
Handle<String> data,
unsigned start_position,
unsigned end_position)
: string_(data),
length_(end_position) {
ASSERT(end_position >= start_position);
buffer_cursor_ = buffer_;
buffer_end_ = buffer_;
pos_ = start_position;
}
GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned old_pos = pos_;
pos_ = Min(pos_ + delta, length_);
ReadBlock();
return pos_ - old_pos;
}
unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
unsigned length) {
if (from_pos >= length_) return 0;
if (from_pos + length > length_) {
length = length_ - from_pos;
}
String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
return length;
}
// ----------------------------------------------------------------------------
// Utf8ToUC16CharacterStream
Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
unsigned length)
: BufferedUC16CharacterStream(),
raw_data_(data),
raw_data_length_(length),
raw_data_pos_(0),
raw_character_position_(0) {
ReadBlock();
}
Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
unsigned old_pos = pos_;
unsigned target_pos = pos_ + delta;
SetRawPosition(target_pos);
pos_ = raw_character_position_;
ReadBlock();
return pos_ - old_pos;
}
unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
unsigned length) {
static const unibrow::uchar kMaxUC16Character = 0xffff;
SetRawPosition(char_position);
if (raw_character_position_ != char_position) {
// char_position was not a valid position in the stream (hit the end
// while spooling to it).
return 0u;
}
unsigned i = 0;
while (i < length) {
if (raw_data_pos_ == raw_data_length_) break;
unibrow::uchar c = raw_data_[raw_data_pos_];
if (c <= unibrow::Utf8::kMaxOneByteChar) {
raw_data_pos_++;
} else {
// Note: currently the following increment is necessary to avoid a
// test-parser problem!
pos_++;
return last_ = static_cast<uc32>(-1);
c = unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
raw_data_length_ - raw_data_pos_,
&raw_data_pos_);
// Don't allow characters outside of the BMP.
if (c > kMaxUC16Character) {
c = unibrow::Utf8::kBadChar;
}
}
buffer_[i++] = static_cast<uc16>(c);
}
raw_character_position_ = char_position + i;
return i;
}
static const byte kUtf8MultiByteMask = 0xC0;
static const byte kUtf8MultiByteCharStart = 0xC0;
static const byte kUtf8MultiByteCharFollower = 0x80;
#ifdef DEBUG
static bool IsUtf8MultiCharacterStart(byte first_byte) {
return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
}
#endif
void CharacterStreamUTF16Buffer::SeekForward(int pos) {
pos_ = pos;
ASSERT(pushback_buffer()->is_empty());
stream_->Seek(pos);
static bool IsUtf8MultiCharacterFollower(byte later_byte) {
return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
}
// Move the cursor back to point at the preceding UTF-8 character start
// in the buffer.
static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
byte character = buffer[--*cursor];
if (character > unibrow::Utf8::kMaxOneByteChar) {
ASSERT(IsUtf8MultiCharacterFollower(character));
// Last byte of a multi-byte character encoding. Step backwards until
// pointing to the first byte of the encoding, recognized by having the
// top two bits set.
while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
}
}
// Move the cursor forward to point at the next following UTF-8 character start
// in the buffer.
static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
byte character = buffer[(*cursor)++];
if (character > unibrow::Utf8::kMaxOneByteChar) {
// First character of a multi-byte character encoding.
// The number of most-significant one-bits determines the length of the
// encoding:
// 110..... - (0xCx, 0xDx) one additional byte (minimum).
// 1110.... - (0xEx) two additional bytes.
// 11110... - (0xFx) three additional bytes (maximum).
ASSERT(IsUtf8MultiCharacterStart(character));
// Additional bytes is:
// 1 if value in range 0xC0 .. 0xDF.
// 2 if value in range 0xE0 .. 0xEF.
// 3 if value in range 0xF0 .. 0xF7.
// Encode that in a single value.
unsigned additional_bytes =
((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
*cursor += additional_bytes;
ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
}
}
void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
if (raw_character_position_ > target_position) {
// Spool backwards in utf8 buffer.
do {
Utf8CharacterBack(raw_data_, &raw_data_pos_);
raw_character_position_--;
} while (raw_character_position_ > target_position);
return;
}
// Spool forwards in the utf8 buffer.
while (raw_character_position_ < target_position) {
if (raw_data_pos_ == raw_data_length_) return;
Utf8CharacterForward(raw_data_, &raw_data_pos_);
raw_character_position_++;
}
}
// ----------------------------------------------------------------------------
// ExternalTwoByteStringUC16CharacterStream
ExternalTwoByteStringUC16CharacterStream::
~ExternalTwoByteStringUC16CharacterStream() { }
ExternalTwoByteStringUC16CharacterStream
::ExternalTwoByteStringUC16CharacterStream(
Handle<ExternalTwoByteString> data,
int start_position,
int end_position)
: UC16CharacterStream(),
source_(data),
raw_data_(data->GetTwoByteData(start_position)) {
buffer_cursor_ = raw_data_,
buffer_end_ = raw_data_ + (end_position - start_position);
pos_ = start_position;
}
@ -115,46 +317,19 @@ void Scanner::LiteralScope::Complete() {
complete_ = true;
}
// ----------------------------------------------------------------------------
// V8JavaScriptScanner
void V8JavaScriptScanner::Initialize(Handle<String> source,
int literal_flags) {
source_ = stream_initializer_.Init(source, NULL, 0, source->length());
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
literal_flags_ = literal_flags | kLiteralIdentifier;
Init();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
has_line_terminator_before_next_ = true;
SkipWhiteSpace();
Scan();
}
void V8JavaScriptScanner::Initialize(Handle<String> source,
unibrow::CharacterStream* stream,
int literal_flags) {
source_ = stream_initializer_.Init(source, stream,
0, UTF16Buffer::kNoEndPosition);
literal_flags_ = literal_flags | kLiteralIdentifier;
Init();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
has_line_terminator_before_next_ = true;
SkipWhiteSpace();
Scan();
}
V8JavaScriptScanner::V8JavaScriptScanner() : JavaScriptScanner() { }
void V8JavaScriptScanner::Initialize(Handle<String> source,
int start_position,
int end_position,
void V8JavaScriptScanner::Initialize(UC16CharacterStream* source,
int literal_flags) {
source_ = stream_initializer_.Init(source, NULL,
start_position, end_position);
source_ = source;
literal_flags_ = literal_flags | kLiteralIdentifier;
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
Init();
// Skip initial whitespace allowing HTML comment ends just like
// after a newline and scan first token.
@ -164,48 +339,14 @@ void V8JavaScriptScanner::Initialize(Handle<String> source,
}
UTF16Buffer* StreamInitializer::Init(Handle<String> source,
unibrow::CharacterStream* stream,
int start_position,
int end_position) {
// Either initialize the scanner from a character stream or from a
// string.
ASSERT(source.is_null() || stream == NULL);
// Initialize the source buffer.
if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
two_byte_string_buffer_.Initialize(
Handle<ExternalTwoByteString>::cast(source),
start_position,
end_position);
return &two_byte_string_buffer_;
} else if (!source.is_null() && StringShape(*source).IsExternalAscii()) {
ascii_string_buffer_.Initialize(
Handle<ExternalAsciiString>::cast(source),
start_position,
end_position);
return &ascii_string_buffer_;
} else {
if (!source.is_null()) {
safe_string_input_buffer_.Reset(source.location());
stream = &safe_string_input_buffer_;
}
char_stream_buffer_.Initialize(source,
stream,
start_position,
end_position);
return &char_stream_buffer_;
}
}
// ----------------------------------------------------------------------------
// JsonScanner
JsonScanner::JsonScanner() {}
JsonScanner::JsonScanner() : Scanner() { }
void JsonScanner::Initialize(Handle<String> source) {
source_ = stream_initializer_.Init(source, NULL, 0, source->length());
void JsonScanner::Initialize(UC16CharacterStream* source) {
source_ = source;
Init();
// Skip initial whitespace.
SkipJsonWhiteSpace();

191
deps/v8/src/scanner.h

@ -35,67 +35,97 @@
namespace v8 {
namespace internal {
// UTF16 buffer to read characters from a character stream.
class CharacterStreamUTF16Buffer: public UTF16Buffer {
// A buffered character stream based on a random access character
// source (ReadBlock can be called with pos_ pointing to any position,
// even positions before the current).
class BufferedUC16CharacterStream: public UC16CharacterStream {
public:
CharacterStreamUTF16Buffer();
virtual ~CharacterStreamUTF16Buffer() {}
void Initialize(Handle<String> data,
unibrow::CharacterStream* stream,
int start_position,
int end_position);
virtual void PushBack(uc32 ch);
virtual uc32 Advance();
virtual void SeekForward(int pos);
BufferedUC16CharacterStream();
virtual ~BufferedUC16CharacterStream();
virtual void PushBack(uc16 character);
protected:
static const unsigned kBufferSize = 512;
static const unsigned kPushBackStepSize = 16;
virtual unsigned SlowSeekForward(unsigned delta);
virtual bool ReadBlock();
virtual void SlowPushBack(uc16 character);
private:
List<uc32> pushback_buffer_;
uc32 last_;
unibrow::CharacterStream* stream_;
virtual unsigned BufferSeekForward(unsigned delta) = 0;
virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
List<uc32>* pushback_buffer() { return &pushback_buffer_; }
const uc16* pushback_limit_;
uc16 buffer_[kBufferSize];
};
// UTF16 buffer to read characters from an external string.
template <typename StringType, typename CharType>
class ExternalStringUTF16Buffer: public UTF16Buffer {
// Generic string stream.
class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
public:
ExternalStringUTF16Buffer();
virtual ~ExternalStringUTF16Buffer() {}
void Initialize(Handle<StringType> data,
int start_position,
int end_position);
virtual void PushBack(uc32 ch);
virtual uc32 Advance();
virtual void SeekForward(int pos);
GenericStringUC16CharacterStream(Handle<String> data,
unsigned start_position,
unsigned end_position);
virtual ~GenericStringUC16CharacterStream();
protected:
virtual unsigned BufferSeekForward(unsigned delta);
virtual unsigned FillBuffer(unsigned position, unsigned length);
private:
const CharType* raw_data_; // Pointer to the actual array of characters.
Handle<String> string_;
unsigned start_position_;
unsigned length_;
};
// Initializes a UTF16Buffer as input stream, using one of a number
// of strategies depending on the available character sources.
class StreamInitializer {
// UC16 stream based on a literal UTF-8 string.
class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
public:
UTF16Buffer* Init(Handle<String> source,
unibrow::CharacterStream* stream,
Utf8ToUC16CharacterStream(const byte* data, unsigned length);
virtual ~Utf8ToUC16CharacterStream();
protected:
virtual unsigned BufferSeekForward(unsigned delta);
virtual unsigned FillBuffer(unsigned char_position, unsigned length);
void SetRawPosition(unsigned char_position);
const byte* raw_data_;
unsigned raw_data_length_; // Measured in bytes, not characters.
unsigned raw_data_pos_;
// The character position of the character at raw_data[raw_data_pos_].
// Not necessarily the same as pos_.
unsigned raw_character_position_;
};
// UTF16 buffer to read characters from an external string.
class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
public:
ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
int start_position,
int end_position);
private:
// Different UTF16 buffers used to pull characters from. Based on input one of
// these will be initialized as the actual data source.
CharacterStreamUTF16Buffer char_stream_buffer_;
ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
two_byte_string_buffer_;
ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
// Used to convert the source string into a character stream when a stream
// is not passed to the scanner.
SafeStringInputBuffer safe_string_input_buffer_;
virtual ~ExternalTwoByteStringUC16CharacterStream();
virtual void PushBack(uc16 character) {
ASSERT(buffer_cursor_ > raw_data_);
buffer_cursor_--;
pos_--;
}
protected:
virtual unsigned SlowSeekForward(unsigned delta) {
// Fast case always handles seeking.
return 0;
}
virtual bool ReadBlock() {
// Entire string is read at start.
return false;
}
Handle<ExternalTwoByteString> source_;
const uc16* raw_data_; // Pointer to the actual array of characters.
};
// ----------------------------------------------------------------------------
// V8JavaScriptScanner
// JavaScript scanner getting its input from either a V8 String or a unicode
@ -103,19 +133,9 @@ class StreamInitializer {
class V8JavaScriptScanner : public JavaScriptScanner {
public:
V8JavaScriptScanner() {}
// Initialize the Scanner to scan source.
void Initialize(Handle<String> source, int literal_flags = kAllLiterals);
void Initialize(Handle<String> source,
unibrow::CharacterStream* stream,
int literal_flags = kAllLiterals);
void Initialize(Handle<String> source,
int start_position, int end_position,
V8JavaScriptScanner();
void Initialize(UC16CharacterStream* source,
int literal_flags = kAllLiterals);
protected:
StreamInitializer stream_initializer_;
};
@ -123,8 +143,7 @@ class JsonScanner : public Scanner {
public:
JsonScanner();
// Initialize the Scanner to scan source.
void Initialize(Handle<String> source);
void Initialize(UC16CharacterStream* source);
// Returns the next token.
Token::Value Next();
@ -138,7 +157,7 @@ class JsonScanner : public Scanner {
// Recognizes all of the single-character tokens directly, or calls a function
// to scan a number, string or identifier literal.
// The only allowed whitespace characters between tokens are tab,
// carrige-return, newline and space.
// carriage-return, newline and space.
void ScanJson();
// A JSON number (production JSONNumber) is a subset of the valid JavaScript
@ -159,60 +178,8 @@ class JsonScanner : public Scanner {
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
StreamInitializer stream_initializer_;
};
// ExternalStringUTF16Buffer
template <typename StringType, typename CharType>
ExternalStringUTF16Buffer<StringType, CharType>::ExternalStringUTF16Buffer()
: raw_data_(NULL) { }
template <typename StringType, typename CharType>
void ExternalStringUTF16Buffer<StringType, CharType>::Initialize(
Handle<StringType> data,
int start_position,
int end_position) {
ASSERT(!data.is_null());
raw_data_ = data->resource()->data();
ASSERT(end_position <= data->length());
if (start_position > 0) {
SeekForward(start_position);
}
end_ =
end_position != kNoEndPosition ? end_position : data->length();
}
template <typename StringType, typename CharType>
uc32 ExternalStringUTF16Buffer<StringType, CharType>::Advance() {
if (pos_ < end_) {
return raw_data_[pos_++];
} else {
// note: currently the following increment is necessary to avoid a
// test-parser problem!
pos_++;
return static_cast<uc32>(-1);
}
}
template <typename StringType, typename CharType>
void ExternalStringUTF16Buffer<StringType, CharType>::PushBack(uc32 ch) {
pos_--;
ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
}
template <typename StringType, typename CharType>
void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
pos_ = pos;
}
} } // namespace v8::internal
#endif // V8_SCANNER_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save