Browse Source

upgrade v8 to 1.2.8

v0.7.4-release
Ryan 16 years ago
parent
commit
84f9178e66
  1. 14
      deps/v8/ChangeLog
  2. 16
      deps/v8/include/v8.h
  3. 3
      deps/v8/src/SConscript
  4. 5
      deps/v8/src/api.cc
  5. 20
      deps/v8/src/arm/builtins-arm.cc
  6. 10
      deps/v8/src/arm/codegen-arm-inl.h
  7. 923
      deps/v8/src/arm/codegen-arm.cc
  8. 9
      deps/v8/src/arm/codegen-arm.h
  9. 20
      deps/v8/src/arm/constants-arm.h
  10. 4
      deps/v8/src/arm/cpu-arm.cc
  11. 18
      deps/v8/src/arm/disasm-arm.cc
  12. 242
      deps/v8/src/arm/frames-arm.h
  13. 30
      deps/v8/src/arm/ic-arm.cc
  14. 4
      deps/v8/src/arm/jump-target-arm.cc
  15. 126
      deps/v8/src/arm/macro-assembler-arm.cc
  16. 35
      deps/v8/src/arm/macro-assembler-arm.h
  17. 169
      deps/v8/src/arm/simulator-arm.cc
  18. 10
      deps/v8/src/arm/simulator-arm.h
  19. 16
      deps/v8/src/arm/stub-cache-arm.cc
  20. 8
      deps/v8/src/arm/virtual-frame-arm.cc
  21. 63
      deps/v8/src/array.js
  22. 31
      deps/v8/src/assembler.cc
  23. 42
      deps/v8/src/assembler.h
  24. 3
      deps/v8/src/bootstrapper.cc
  25. 3
      deps/v8/src/builtins.cc
  26. 2
      deps/v8/src/code-stubs.cc
  27. 2
      deps/v8/src/code-stubs.h
  28. 11
      deps/v8/src/codegen.cc
  29. 18
      deps/v8/src/codegen.h
  30. 28
      deps/v8/src/compiler.cc
  31. 2
      deps/v8/src/d8-debug.h
  32. 20
      deps/v8/src/d8.cc
  33. 117
      deps/v8/src/d8.js
  34. 73
      deps/v8/src/date-delay.js
  35. 67
      deps/v8/src/debug-delay.js
  36. 14
      deps/v8/src/debug.cc
  37. 2
      deps/v8/src/flag-definitions.h
  38. 25
      deps/v8/src/frames-inl.h
  39. 14
      deps/v8/src/frames.cc
  40. 8
      deps/v8/src/frames.h
  41. 35
      deps/v8/src/heap.cc
  42. 5
      deps/v8/src/heap.h
  43. 19
      deps/v8/src/ia32/assembler-ia32.cc
  44. 2
      deps/v8/src/ia32/assembler-ia32.h
  45. 10
      deps/v8/src/ia32/codegen-ia32-inl.h
  46. 676
      deps/v8/src/ia32/codegen-ia32.cc
  47. 9
      deps/v8/src/ia32/codegen-ia32.h
  48. 173
      deps/v8/src/ia32/frames-ia32.h
  49. 27
      deps/v8/src/ia32/ic-ia32.cc
  50. 6
      deps/v8/src/ia32/jump-target-ia32.cc
  51. 24
      deps/v8/src/ia32/macro-assembler-ia32.cc
  52. 7
      deps/v8/src/ia32/macro-assembler-ia32.h
  53. 20
      deps/v8/src/ia32/virtual-frame-ia32.cc
  54. 7
      deps/v8/src/ia32/virtual-frame-ia32.h
  55. 29
      deps/v8/src/ic.cc
  56. 11
      deps/v8/src/ic.h
  57. 173
      deps/v8/src/jump-target.cc
  58. 51
      deps/v8/src/jump-target.h
  59. 187
      deps/v8/src/log-utils.cc
  60. 67
      deps/v8/src/log-utils.h
  61. 169
      deps/v8/src/log.cc
  62. 57
      deps/v8/src/log.h
  63. 1
      deps/v8/src/macros.py
  64. 92
      deps/v8/src/math.js
  65. 63
      deps/v8/src/messages.js
  66. 154
      deps/v8/src/mirror-delay.js
  67. 58
      deps/v8/src/objects.cc
  68. 33
      deps/v8/src/objects.h
  69. 15
      deps/v8/src/parser.cc
  70. 1
      deps/v8/src/regexp-macro-assembler-irregexp.cc
  71. 488
      deps/v8/src/runtime.cc
  72. 4
      deps/v8/src/runtime.h
  73. 108
      deps/v8/src/runtime.js
  74. 32
      deps/v8/src/serialize.cc
  75. 60
      deps/v8/src/stub-cache.cc
  76. 5
      deps/v8/src/utils.h
  77. 2
      deps/v8/src/v8-counters.h
  78. 37
      deps/v8/src/v8.cc
  79. 13
      deps/v8/src/v8.h
  80. 4
      deps/v8/src/version.cc
  81. 28
      deps/v8/src/x64/assembler-x64-inl.h
  82. 314
      deps/v8/src/x64/assembler-x64.cc
  83. 127
      deps/v8/src/x64/assembler-x64.h
  84. 240
      deps/v8/src/x64/builtins-x64.cc
  85. 11
      deps/v8/src/x64/codegen-x64-inl.h
  86. 466
      deps/v8/src/x64/codegen-x64.cc
  87. 17
      deps/v8/src/x64/codegen-x64.h
  88. 45
      deps/v8/src/x64/frames-x64.cc
  89. 58
      deps/v8/src/x64/frames-x64.h
  90. 13
      deps/v8/src/x64/ic-x64.cc
  91. 599
      deps/v8/src/x64/macro-assembler-x64.cc
  92. 30
      deps/v8/src/x64/macro-assembler-x64.h
  93. 37
      deps/v8/src/x64/register-allocator-x64-inl.h
  94. 63
      deps/v8/src/x64/register-allocator-x64.cc
  95. 2
      deps/v8/src/x64/register-allocator-x64.h
  96. 1
      deps/v8/src/x64/simulator-x64.h
  97. 169
      deps/v8/src/x64/virtual-frame-x64.cc
  98. 4
      deps/v8/src/x64/virtual-frame-x64.h
  99. 75
      deps/v8/test/cctest/test-api.cc
  100. 15
      deps/v8/test/cctest/test-assembler-x64.cc

14
deps/v8/ChangeLog

@ -1,3 +1,17 @@
2009-06-16: Version 1.2.8
Optimized math on ARM platforms.
Fixed two crash bugs in the handling of getters and setters.
Improved the debugger support by adding scope chain information.
Improved the profiler support by compressing log data transmitted
to clients.
Improved overall performance.
2009-06-08: Version 1.2.7 2009-06-08: Version 1.2.7
Improved debugger and profiler support. Improved debugger and profiler support.

16
deps/v8/include/v8.h

@ -212,9 +212,9 @@ template <class T> class V8EXPORT_INLINE Handle {
*/ */
bool IsEmpty() const { return val_ == 0; } bool IsEmpty() const { return val_ == 0; }
T* operator->() const; T* operator->() const { return val_; }
T* operator*() const; T* operator*() const { return val_; }
/** /**
* Sets the handle to be empty. IsEmpty() will then return true. * Sets the handle to be empty. IsEmpty() will then return true.
@ -2509,18 +2509,6 @@ void Persistent<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<void**>(**this)); V8::ClearWeak(reinterpret_cast<void**>(**this));
} }
template <class T>
T* Handle<T>::operator->() const {
return val_;
}
template <class T>
T* Handle<T>::operator*() const {
return val_;
}
Local<Value> Arguments::operator[](int i) const { Local<Value> Arguments::operator[](int i) const {
if (i < 0 || length_ <= i) return Local<Value>(*Undefined()); if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
return Local<Value>(reinterpret_cast<Value*>(values_ - i)); return Local<Value>(reinterpret_cast<Value*>(values_ - i));

3
deps/v8/src/SConscript

@ -77,7 +77,8 @@ SOURCES = {
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc', 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
# 'x64/regexp-macro-assembler-x64.cc', # 'x64/regexp-macro-assembler-x64.cc',
'x64/stub-cache-x64.cc' 'x64/register-allocator-x64.cc',
'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
], ],
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],

5
deps/v8/src/api.cc

@ -2124,7 +2124,9 @@ int v8::Object::GetIdentityHash() {
} else { } else {
int attempts = 0; int attempts = 0;
do { do {
hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi. // Generate a random 32-bit hash value but limit range to fit
// within a smi.
hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++; attempts++;
} while (hash_value == 0 && attempts < 30); } while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0 hash_value = hash_value != 0 ? hash_value : 1; // never return 0
@ -3382,6 +3384,7 @@ void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) { void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler"); EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8; ENTER_V8;
HandleScope scope;
i::Debugger::SetMessageHandler(handler); i::Debugger::SetMessageHandler(handler);
} }

20
deps/v8/src/arm/builtins-arm.cc

@ -64,9 +64,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call); __ b(eq, &non_function_call);
// Check that the function is a JSFunction. // Check that the function is a JSFunction.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(ne, &non_function_call); __ b(ne, &non_function_call);
// Enter a construct frame. // Enter a construct frame.
@ -159,9 +157,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// If the type of the result (stored in its map) is less than // If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense. // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset)); __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, &exit); __ b(ge, &exit);
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
@ -290,9 +286,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function); __ b(eq, &non_function);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(eq, &function); __ b(eq, &function);
// Non-function called: Clear the function to force exception. // Non-function called: Clear the function to force exception.
@ -328,9 +322,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ cmp(r2, r3); __ cmp(r2, r3);
__ b(eq, &use_global_receiver); __ b(eq, &use_global_receiver);
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &call_to_object); __ b(lt, &call_to_object);
__ cmp(r3, Operand(LAST_JS_OBJECT_TYPE)); __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &done); __ b(le, &done);
@ -501,9 +493,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Check if the receiver is already a JavaScript object. // Check if the receiver is already a JavaScript object.
// r0: receiver // r0: receiver
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &call_to_object); __ b(lt, &call_to_object);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &push_receiver); __ b(le, &push_receiver);

10
deps/v8/src/arm/codegen-arm-inl.h

@ -39,6 +39,16 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); } void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); } void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

923
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

9
deps/v8/src/arm/codegen-arm.h

@ -349,6 +349,15 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args); void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support. // Methods and constants for fast case switch statement support.
// //
// Only allow fast-case switch if the range of labels is at most // Only allow fast-case switch if the range of labels is at most

20
deps/v8/src/arm/constants-arm.h

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,6 +28,14 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_ #ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_
// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
// are not running on real ARM hardware. One reason for this is that the
// old ABI uses fp registers in the calling convention and the simulator does
// not simulate fp registers or coroutine instructions.
#if defined(__ARM_EABI__) || !defined(__arm__)
# define USE_ARM_EABI 1
#endif
namespace assembler { namespace assembler {
namespace arm { namespace arm {
@ -104,15 +112,9 @@ enum Shift {
// simulator. // simulator.
enum SoftwareInterruptCodes { enum SoftwareInterruptCodes {
// transition to C code // transition to C code
call_rt_r5 = 0x10, call_rt_redirected = 0x10,
call_rt_r2 = 0x11,
// break point // break point
break_point = 0x20, break_point = 0x20
// FP operations. These simulate calling into C for a moment to do fp ops.
// They should trash all caller-save registers.
simulator_fp_add = 0x21,
simulator_fp_sub = 0x22,
simulator_fp_mul = 0x23
}; };

4
deps/v8/src/arm/cpu-arm.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -46,6 +46,8 @@ void CPU::FlushICache(void* start, size_t size) {
#if !defined (__arm__) #if !defined (__arm__)
// Not generating ARM instructions for C-code. This means that we are // Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. No I$ flushes are necessary. // building an ARM emulator based target. No I$ flushes are necessary.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
#else #else
// Ideally, we would call // Ideally, we would call
// syscall(__ARM_NR_cacheflush, start, // syscall(__ARM_NR_cacheflush, start,

18
deps/v8/src/arm/disasm-arm.cc

@ -1,4 +1,4 @@
// Copyright 2007-2008 the V8 project authors. All rights reserved. // Copyright 2007-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -253,24 +253,12 @@ void Decoder::PrintPU(Instr* instr) {
// the FormatOption method. // the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) { void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
switch (swi) { switch (swi) {
case call_rt_r5: case call_rt_redirected:
Print("call_rt_r5"); Print("call_rt_redirected");
return;
case call_rt_r2:
Print("call_rt_r2");
return; return;
case break_point: case break_point:
Print("break_point"); Print("break_point");
return; return;
case simulator_fp_add:
Print("simulator_fp_add");
return;
case simulator_fp_mul:
Print("simulator_fp_mul");
return;
case simulator_fp_sub:
Print("simulator_fp_sub");
return;
default: default:
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", "%d",

242
deps/v8/src/arm/frames-arm.h

@ -68,7 +68,7 @@ static const RegList kCalleeSaved =
1 << 8 | // r8 v5 (cp in JavaScript code) 1 << 8 | // r8 v5 (cp in JavaScript code)
kR9Available kR9Available
<< 9 | // r9 v6 << 9 | // r9 v6
1 << 10 | // r10 v7 (pp in JavaScript code) 1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code) 1 << 11; // r11 v8 (fp in JavaScript code)
static const int kNumCalleeSaved = 7 + kR9Available; static const int kNumCalleeSaved = 7 + kR9Available;
@ -79,15 +79,11 @@ static const int kNumCalleeSaved = 7 + kR9Available;
class StackHandlerConstants : public AllStatic { class StackHandlerConstants : public AllStatic {
public: public:
// TODO(1233780): Get rid of the code slot in stack handlers. static const int kNextOffset = 0 * kPointerSize;
static const int kCodeOffset = 0 * kPointerSize; static const int kStateOffset = 1 * kPointerSize;
static const int kNextOffset = 1 * kPointerSize; static const int kFPOffset = 2 * kPointerSize;
static const int kStateOffset = 2 * kPointerSize; static const int kPCOffset = 3 * kPointerSize;
static const int kPPOffset = 3 * kPointerSize;
static const int kFPOffset = 4 * kPointerSize;
static const int kPCOffset = 5 * kPointerSize;
static const int kAddressDisplacement = -1 * kPointerSize;
static const int kSize = kPCOffset + kPointerSize; static const int kSize = kPCOffset + kPointerSize;
}; };
@ -108,14 +104,14 @@ class ExitFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = 0 * kPointerSize; static const int kSavedRegistersOffset = 0 * kPointerSize;
// Let the parameters pointer for exit frames point just below the
// frame structure on the stack.
static const int kPPDisplacement = 3 * kPointerSize;
// The caller fields are below the frame pointer on the stack. // The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize; static const int kCallerFPOffset = +0 * kPointerSize;
static const int kCallerPPOffset = +1 * kPointerSize; // The calling JS function is between FP and PC.
static const int kCallerPCOffset = +2 * kPointerSize; static const int kCallerPCOffset = +2 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +3 * kPointerSize;
}; };
@ -137,7 +133,7 @@ class JavaScriptFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = +2 * kPointerSize; static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// PP-relative. // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize; static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize; static const int kReceiverOffset = -1 * kPointerSize;
}; };
@ -161,220 +157,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} }
// ----------------------------------------------------
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// | JS frame |
// | |
// | |
// ----------- +=============+ <--- sp (stack pointer)
// | function |
// +-------------+
// +-------------+
// | |
// | expressions |
// | |
// +-------------+
// | |
// a | locals |
// c | |
// t +- - - - - - -+ <---
// i -4 | local0 | ^
// v +-------------+ |
// a -3 | code | |
// t +-------------+ | kLocal0Offset
// i -2 | context | |
// o +-------------+ |
// n -1 | args_length | v
// +-------------+ <--- fp (frame pointer)
// 0 | caller_pp |
// f +-------------+
// r 1 | caller_fp |
// a +-------------+
// m 2 | sp_on_exit | (pp if return, caller_sp if no return)
// e +-------------+
// 3 | caller_pc |
// +-------------+ <--- caller_sp (incl. parameters)
// | |
// | parameters |
// | |
// +- - - - - - -+ <---
// -2 | parameter0 | ^
// +-------------+ | kParam0Offset
// -1 | receiver | v
// ----------- +=============+ <--- pp (parameter pointer, r10)
// 0 | function |
// +-------------+
// | |
// |caller-saved | (must be valid JS values, traversed during GC)
// | regs |
// | |
// +-------------+
// | |
// | caller |
// higher | expressions |
// addresses | |
// | |
// | JS frame |
// Handler frames (part of expressions of JS frames):
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// h | expressions |
// a | |
// n +-------------+
// d -1 | code |
// l +-------------+ <--- handler sp
// e 0 | next_sp | link to next handler (next handler's sp)
// r +-------------+
// 1 | state |
// f +-------------+
// r 2 | pp |
// a +-------------+
// m 3 | fp |
// e +-------------+
// 4 | pc |
// +-------------+
// | |
// higher | expressions |
// addresses | |
// JS entry frames: When calling from C to JS, we construct two extra
// frames: An entry frame (C) and a trampoline frame (JS). The
// following pictures shows the two frames:
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// | JS frame |
// | |
// | |
// ----------- +=============+ <--- sp (stack pointer)
// | |
// | parameters |
// t | |
// r +- - - - - - -+
// a | parameter0 |
// m +-------------+
// p | receiver |
// o +-------------+
// l | function |
// i +-------------+
// n -3 | code |
// e +-------------+
// -2 | NULL | context is always NULL
// +-------------+
// f -1 | 0 | args_length is always zero
// r +-------------+ <--- fp (frame pointer)
// a 0 | NULL | caller pp is always NULL for entries
// m +-------------+
// e 1 | caller_fp |
// +-------------+
// 2 | sp_on_exit | (caller_sp)
// +-------------+
// 3 | caller_pc |
// ----------- +=============+ <--- caller_sp == pp
// . ^
// . | try-handler, fake, not GC'ed
// . v
// +-------------+ <---
// -2 | next top pp |
// +-------------+
// -1 | next top fp |
// +-------------+ <--- fp
// | r4 | r4-r9 holding non-JS values must be preserved
// +-------------+
// J | r5 | before being initialized not to confuse GC
// S +-------------+
// | r6 |
// +-------------+
// e | r7 |
// n +-------------+
// t | r8 |
// r +-------------+
// y [ | r9 | ] only if r9 available
// +-------------+
// | r10 |
// f +-------------+
// r | r11 |
// a +-------------+
// m | caller_sp |
// e +-------------+
// | caller_pc |
// +-------------+ <--- caller_sp
// | argv | passed on stack from C code
// +-------------+
// | |
// higher | |
// addresses | C frame |
// The first 4 args are passed from C in r0-r3 and are not spilled on entry:
// r0: code entry
// r1: function
// r2: receiver
// r3: argc
// [sp+0]: argv
// C entry frames: When calling from JS to C, we construct one extra
// frame:
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// | C frame |
// | |
// | |
// ----------- +=============+ <--- sp (stack pointer)
// | |
// | parameters | (first 4 args are passed in r0-r3)
// | |
// +-------------+ <--- fp (frame pointer)
// f 4/5 | caller_fp |
// r +-------------+
// a 5/6 | sp_on_exit | (pp)
// m +-------------+
// e 6/7 | caller_pc |
// +-------------+ <--- caller_sp (incl. parameters)
// 7/8 | |
// | parameters |
// | |
// +- - - - - - -+ <---
// -2 | parameter0 | ^
// +-------------+ | kParam0Offset
// -1 | receiver | v
// ----------- +=============+ <--- pp (parameter pointer, r10)
// 0 | function |
// +-------------+
// | |
// |caller-saved |
// | regs |
// | |
// +-------------+
// | |
// | caller |
// | expressions |
// | |
// higher | |
// addresses | JS frame |
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_ #endif // V8_ARM_FRAMES_ARM_H_

30
deps/v8/src/arm/ic-arm.cc

@ -223,9 +223,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for number. // Check for number.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &number); __ b(eq, &number);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &non_number); __ b(ne, &non_number);
__ bind(&number); __ bind(&number);
StubCompiler::GenerateLoadGlobalFunctionPrototype( StubCompiler::GenerateLoadGlobalFunctionPrototype(
@ -272,9 +270,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ b(eq, miss); __ b(eq, miss);
// Check that the value is a JSFunction. // Check that the value is a JSFunction.
__ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
__ cmp(r0, Operand(JS_FUNCTION_TYPE));
__ b(ne, miss); __ b(ne, miss);
// Check that the function has been loaded. // Check that the function has been loaded.
@ -312,10 +308,8 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss); __ b(eq, &miss);
// Check that the receiver is a valid JS object. // Check that the receiver is a valid JS object. Put the map in r3.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
__ ldrb(r0, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &miss); __ b(lt, &miss);
// If this assert fails, we have to check upper bound too. // If this assert fails, we have to check upper bound too.
@ -392,9 +386,7 @@ void CallIC::Generate(MacroAssembler* masm,
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &invoke); __ b(eq, &invoke);
__ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, &global); __ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE)); __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(ne, &invoke); __ b(ne, &invoke);
@ -447,10 +439,8 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &miss); __ b(eq, &miss);
// Check that the receiver is a valid JS object. // Check that the receiver is a valid JS object. Put the map in r3.
__ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset)); __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
__ ldrb(r1, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &miss); __ b(lt, &miss);
// If this assert fails, we have to check upper bound too. // If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@ -513,6 +503,12 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false; return false;
} }
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return false;
}
Object* KeyedLoadIC_Miss(Arguments args); Object* KeyedLoadIC_Miss(Arguments args);

4
deps/v8/src/arm/jump-target-arm.cc

@ -149,7 +149,7 @@ void JumpTarget::Call() {
} }
void JumpTarget::DoBind(int mergable_elements) { void JumpTarget::DoBind() {
ASSERT(!is_bound()); ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic // Live non-frame registers are not allowed at the start of a basic
@ -207,7 +207,7 @@ void JumpTarget::DoBind(int mergable_elements) {
// Compute the frame to use for entry to the block. // Compute the frame to use for entry to the block.
if (entry_frame_ == NULL) { if (entry_frame_ == NULL) {
ComputeEntryFrame(mergable_elements); ComputeEntryFrame();
} }
// Some moves required to merge to an expected frame require purely // Some moves required to merge to an expected frame require purely

126
deps/v8/src/arm/macro-assembler-arm.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -35,11 +35,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Give alias names to registers
Register cp = { 8 }; // JavaScript context pointer
Register pp = { 10 }; // parameter pointer
MacroAssembler::MacroAssembler(void* buffer, int size) MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size), : Assembler(buffer, size),
unresolved_(0), unresolved_(0),
@ -128,26 +123,10 @@ void MacroAssembler::Call(Register target, Condition cond) {
void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
Condition cond) { Condition cond) {
#if !defined(__arm__)
if (rmode == RelocInfo::RUNTIME_ENTRY) {
mov(r2, Operand(target, rmode), LeaveCC, cond);
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
// Notify the simulator of the transition to C code.
swi(assembler::arm::call_rt_r2);
} else {
// set lr for return at current pc + 8
mov(lr, Operand(pc), LeaveCC, cond);
// emit a ldr<cond> pc, [pc + offset of target in constant pool]
mov(pc, Operand(target, rmode), LeaveCC, cond);
}
#else
// Set lr for return at current pc + 8. // Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond); mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool]. // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
mov(pc, Operand(target, rmode), LeaveCC, cond); mov(pc, Operand(target, rmode), LeaveCC, cond);
#endif // !defined(__arm__)
// If USE_BLX is defined, we could emit a 'mov ip, target', followed by a // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
// 'blx ip'; however, the code would not be shorter than the above sequence // 'blx ip'; however, the code would not be shorter than the above sequence
// and the target address of the call would be referenced by the first // and the target address of the call would be referenced by the first
@ -301,8 +280,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
sub(r6, r6, Operand(kPointerSize)); sub(r6, r6, Operand(kPointerSize));
// Compute parameter pointer before making changes and save it as ip // Compute callee's stack pointer before making changes and save it as
// register so that it is restored as sp register on exit, thereby // ip register so that it is restored as sp register on exit, thereby
// popping the args. // popping the args.
// ip = sp + kPointerSize * #args; // ip = sp + kPointerSize * #args;
@ -573,41 +552,48 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
} }
#endif #endif
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// The pc (return address) is passed in register lr. // The pc (return address) is passed in register lr.
if (try_location == IN_JAVASCRIPT) { if (try_location == IN_JAVASCRIPT) {
stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
if (type == TRY_CATCH_HANDLER) { if (type == TRY_CATCH_HANDLER) {
mov(r3, Operand(StackHandler::TRY_CATCH)); mov(r3, Operand(StackHandler::TRY_CATCH));
} else { } else {
mov(r3, Operand(StackHandler::TRY_FINALLY)); mov(r3, Operand(StackHandler::TRY_FINALLY));
} }
push(r3); // state ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
// Save the current handler as the next handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address))); mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(r1, MemOperand(r3)); ldr(r1, MemOperand(r3));
push(r1); // next sp ASSERT(StackHandlerConstants::kNextOffset == 0);
str(sp, MemOperand(r3)); // chain handler push(r1);
mov(r0, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS // Link this handler as the new current one.
push(r0); str(sp, MemOperand(r3));
} else { } else {
// Must preserve r0-r4, r5-r7 are available. // Must preserve r0-r4, r5-r7 are available.
ASSERT(try_location == IN_JS_ENTRY); ASSERT(try_location == IN_JS_ENTRY);
// The parameter pointer is meaningless here and fp does not point to a JS // The frame pointer does not point to a JS frame so we save NULL
// frame. So we save NULL for both pp and fp. We expect the code throwing an // for fp. We expect the code throwing an exception to check fp
// exception to check fp before dereferencing it to restore the context. // before dereferencing it to restore the context.
mov(pp, Operand(0)); // set pp to NULL mov(ip, Operand(0)); // To save a NULL frame pointer.
mov(ip, Operand(0)); // to save a NULL fp
stm(db_w, sp, pp.bit() | ip.bit() | lr.bit());
mov(r6, Operand(StackHandler::ENTRY)); mov(r6, Operand(StackHandler::ENTRY));
push(r6); // state ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize);
stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
// Save the current handler as the next handler.
mov(r7, Operand(ExternalReference(Top::k_handler_address))); mov(r7, Operand(ExternalReference(Top::k_handler_address)));
ldr(r6, MemOperand(r7)); ldr(r6, MemOperand(r7));
push(r6); // next sp ASSERT(StackHandlerConstants::kNextOffset == 0);
str(sp, MemOperand(r7)); // chain handler push(r6);
mov(r5, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS // Link this handler as the new current one.
push(r5); // flush TOS str(sp, MemOperand(r7));
} }
} }
@ -759,6 +745,62 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
} }
void MacroAssembler::CompareObjectType(Register function,
Register map,
Register type_reg,
InstanceType type) {
ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss) {
// Check that the receiver isn't a smi.
BranchOnSmi(function, miss);
// Check that the function really is a function. Load map into result reg.
CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
b(ne, miss);
// Make sure that the function has an instance prototype.
Label non_instance;
ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
b(ne, &non_instance);
// Get the prototype or initial map from the function.
ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
cmp(result, Operand(Factory::the_hole_value()));
b(eq, miss);
// If the function does not have an initial map, we're done.
Label done;
CompareObjectType(result, scratch, scratch, MAP_TYPE);
b(ne, &done);
// Get the prototype from the initial map.
ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
jmp(&done);
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
// All done.
bind(&done);
}
void MacroAssembler::CallStub(CodeStub* stub) { void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET); Call(stub->GetCode(), RelocInfo::CODE_TARGET);

35
deps/v8/src/arm/macro-assembler-arm.h

@ -35,8 +35,7 @@ namespace internal {
// Give alias names to registers // Give alias names to registers
extern Register cp; // JavaScript context pointer const Register cp = { 8 }; // JavaScript context pointer
extern Register pp; // parameter pointer
// Helper types to make boolean flag easier to read at call-site. // Helper types to make boolean flag easier to read at call-site.
@ -187,6 +186,38 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Support functions. // Support functions.
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
// It leaves the map in the map register (unless the type_reg and map register
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other // registers.
void CompareObjectType(Register heap_object,
Register map,
Register type_reg,
InstanceType type);
inline void BranchOnSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
}
inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
}
// Generates code for reporting that an illegal operation has // Generates code for reporting that an illegal operation has
// occurred. // occurred.
void IllegalOperation(int num_arguments); void IllegalOperation(int num_arguments);

169
deps/v8/src/arm/simulator-arm.cc

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -30,6 +30,7 @@
#include "v8.h" #include "v8.h"
#include "disasm.h" #include "disasm.h"
#include "assembler.h"
#include "arm/constants-arm.h" #include "arm/constants-arm.h"
#include "arm/simulator-arm.h" #include "arm/simulator-arm.h"
@ -380,7 +381,23 @@ void Debugger::Debug() {
} }
// Create one simulator per thread and keep it in thread local storage.
static v8::internal::Thread::LocalStorageKey simulator_key;
bool Simulator::initialized_ = false;
void Simulator::Initialize() {
if (initialized_) return;
simulator_key = v8::internal::Thread::CreateThreadLocalKey();
initialized_ = true;
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
}
Simulator::Simulator() { Simulator::Simulator() {
ASSERT(initialized_);
// Setup simulator support first. Some of this information is needed to // Setup simulator support first. Some of this information is needed to
// setup the architecture state. // setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@ -412,9 +429,63 @@ Simulator::Simulator() {
} }
// Create one simulator per thread and keep it in thread local storage. // When the generated code calls an external reference we need to catch that in
static v8::internal::Thread::LocalStorageKey simulator_key = // the simulator. The external reference will be a function compiled for the
v8::internal::Thread::CreateThreadLocalKey(); // host architecture. We need to call that function instead of trying to
// execute it with the simulator. We do that by redirecting the external
// reference to a swi (software-interrupt) instruction that is handled by
// the simulator. We write the original destination of the jump just at a known
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
Redirection(void* external_function, bool fp_return)
: external_function_(external_function),
swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
fp_return_(fp_return),
next_(list_) {
list_ = this;
}
void* address_of_swi_instruction() {
return reinterpret_cast<void*>(&swi_instruction_);
}
void* external_function() { return external_function_; }
bool fp_return() { return fp_return_; }
static Redirection* Get(void* external_function, bool fp_return) {
Redirection* current;
for (current = list_; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
return new Redirection(external_function, fp_return);
}
static Redirection* FromSwiInstruction(Instr* swi_instruction) {
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
char* addr_of_redirection =
addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
private:
void* external_function_;
uint32_t swi_instruction_;
bool fp_return_;
Redirection* next_;
static Redirection* list_;
};
Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function,
bool fp_return) {
Redirection* redirection = Redirection::Get(external_function, fp_return);
return redirection->address_of_swi_instruction();
}
// Get the active Simulator for the current thread. // Get the active Simulator for the current thread.
Simulator* Simulator::current() { Simulator* Simulator::current() {
@ -921,7 +992,14 @@ void Simulator::HandleRList(Instr* instr, bool load) {
// 64-bit value. With the code below we assume that all runtime calls return // 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the r1 result register contains a bogus // 64 bits of result. If they don't, the r1 result register contains a bogus
// value, which is fine because it is caller-saved. // value, which is fine because it is caller-saved.
typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1); typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
// Software interrupt instructions are used by the simulator to call into the // Software interrupt instructions are used by the simulator to call into the
@ -929,30 +1007,51 @@ typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
void Simulator::SoftwareInterrupt(Instr* instr) { void Simulator::SoftwareInterrupt(Instr* instr) {
int swi = instr->SwiField(); int swi = instr->SwiField();
switch (swi) { switch (swi) {
case call_rt_r5: { case call_rt_redirected: {
SimulatorRuntimeCall target = Redirection* redirection = Redirection::FromSwiInstruction(instr);
reinterpret_cast<SimulatorRuntimeCall>(get_register(r5)); int32_t arg0 = get_register(r0);
intptr_t arg0 = get_register(r0); int32_t arg1 = get_register(r1);
intptr_t arg1 = get_register(r1); int32_t arg2 = get_register(r2);
int64_t result = target(arg0, arg1); int32_t arg3 = get_register(r3);
int32_t lo_res = static_cast<int32_t>(result); // This is dodgy but it works because the C entry stubs are never moved.
int32_t hi_res = static_cast<int32_t>(result >> 32); // See comment in codegen-arm.cc and bug 1242173.
set_register(r0, lo_res); int32_t saved_lr = get_register(lr);
set_register(r1, hi_res); if (redirection->fp_return()) {
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize); intptr_t external =
break; reinterpret_cast<intptr_t>(redirection->external_function());
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (::v8::internal::FLAG_trace_sim) {
double x, y;
GetFpArgs(&x, &y);
PrintF("Call to host function at %p with args %f, %f\n",
FUNCTION_ADDR(target), x, y);
} }
case call_rt_r2: { double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
} else {
intptr_t external =
reinterpret_cast<int32_t>(redirection->external_function());
SimulatorRuntimeCall target = SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(get_register(r2)); reinterpret_cast<SimulatorRuntimeCall>(external);
intptr_t arg0 = get_register(r0); if (::v8::internal::FLAG_trace_sim) {
intptr_t arg1 = get_register(r1); PrintF(
int64_t result = target(arg0, arg1); "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3);
}
int64_t result = target(arg0, arg1, arg2, arg3);
int32_t lo_res = static_cast<int32_t>(result); int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32); int32_t hi_res = static_cast<int32_t>(result >> 32);
set_register(r0, lo_res); set_register(r0, lo_res);
set_register(r1, hi_res); set_register(r1, hi_res);
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize); set_register(r0, result);
}
set_register(lr, saved_lr);
set_pc(get_register(lr));
break; break;
} }
case break_point: { case break_point: {
@ -960,30 +1059,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
dbg.Debug(); dbg.Debug();
break; break;
} }
{
double x, y, z;
case simulator_fp_add:
GetFpArgs(&x, &y);
z = x + y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_sub:
GetFpArgs(&x, &y);
z = x - y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_mul:
GetFpArgs(&x, &y);
z = x * y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
}
default: { default: {
UNREACHABLE(); UNREACHABLE();
break; break;

10
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -106,6 +106,9 @@ class Simulator {
// Executes ARM instructions until the PC reaches end_sim_pc. // Executes ARM instructions until the PC reaches end_sim_pc.
void Execute(); void Execute();
// Call on program start.
static void Initialize();
// V8 generally calls into generated code with 5 parameters. This is a // V8 generally calls into generated code with 5 parameters. This is a
// convenience function, which sets up the simulator state and grabs the // convenience function, which sets up the simulator state and grabs the
// result on return. // result on return.
@ -175,6 +178,10 @@ class Simulator {
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instr* instr);
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
bool fp_return);
// For use in calls that take two double values, constructed from r0, r1, r2 // For use in calls that take two double values, constructed from r0, r1, r2
// and r3. // and r3.
void GetFpArgs(double* x, double* y); void GetFpArgs(double* x, double* y);
@ -192,6 +199,7 @@ class Simulator {
char* stack_; char* stack_;
bool pc_modified_; bool pc_modified_;
int icount_; int icount_;
static bool initialized_;
// registered breakpoints // registered breakpoints
Instr* break_pc_; Instr* break_pc_;

16
deps/v8/src/arm/stub-cache-arm.cc

@ -283,9 +283,7 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
__ b(eq, miss_label); __ b(eq, miss_label);
// Check that the object is a JS array. // Check that the object is a JS array.
__ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmp(scratch, Operand(JS_ARRAY_TYPE));
__ b(ne, miss_label); __ b(ne, miss_label);
// Load length directly from the JS array. // Load length directly from the JS array.
@ -523,9 +521,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &miss); __ b(eq, &miss);
// Get the map. // Get the map.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(ne, &miss); __ b(ne, &miss);
// Patch the receiver on the stack with the global proxy if // Patch the receiver on the stack with the global proxy if
@ -588,9 +584,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case STRING_CHECK: case STRING_CHECK:
// Check that the object is a two-byte string or a symbol. // Check that the object is a two-byte string or a symbol.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
__ b(hs, &miss); __ b(hs, &miss);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.
GenerateLoadGlobalFunctionPrototype(masm(), GenerateLoadGlobalFunctionPrototype(masm(),
@ -605,9 +599,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
// Check that the object is a smi or a heap number. // Check that the object is a smi or a heap number.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &fast); __ b(eq, &fast);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &miss); __ b(ne, &miss);
__ bind(&fast); __ bind(&fast);
// Check that the maps starting from the prototype haven't changed. // Check that the maps starting from the prototype haven't changed.

8
deps/v8/src/arm/virtual-frame-arm.cc

@ -156,9 +156,7 @@ void VirtualFrame::Enter() {
__ b(ne, &map_check); __ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check)."); __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
__ bind(&map_check); __ bind(&map_check);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(eq, &done); __ b(eq, &done);
__ stop("VirtualFrame::Enter - r1 is not a function (map check)."); __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
__ bind(&done); __ bind(&done);
@ -230,8 +228,8 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
void VirtualFrame::PushTryHandler(HandlerType type) { void VirtualFrame::PushTryHandler(HandlerType type) {
// Grow the expression stack by handler size less one (the return address // Grow the expression stack by handler size less one (the return
// is already pushed by a call instruction). // address in lr is already counted by a call instruction).
Adjust(kHandlerSize - 1); Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type); __ PushTryHandler(IN_JAVASCRIPT, type);
} }

63
deps/v8/src/array.js

@ -769,6 +769,63 @@ function ArraySort(comparefn) {
} }
} }
function SafeRemoveArrayHoles(obj) {
// Copy defined elements from the end to fill in all holes and undefineds
// in the beginning of the array. Write undefineds and holes at the end
// after loop is finished.
var first_undefined = 0;
var last_defined = length - 1;
var num_holes = 0;
while (first_undefined < last_defined) {
// Find first undefined element.
while (first_undefined < last_defined &&
!IS_UNDEFINED(obj[first_undefined])) {
first_undefined++;
}
// Maintain the invariant num_holes = the number of holes in the original
// array with indices <= first_undefined or > last_defined.
if (!obj.hasOwnProperty(first_undefined)) {
num_holes++;
}
// Find last defined element.
while (first_undefined < last_defined &&
IS_UNDEFINED(obj[last_defined])) {
if (!obj.hasOwnProperty(last_defined)) {
num_holes++;
}
last_defined--;
}
if (first_undefined < last_defined) {
// Fill in hole or undefined.
obj[first_undefined] = obj[last_defined];
obj[last_defined] = void 0;
}
}
// If there were any undefineds in the entire array, first_undefined
// points to one past the last defined element. Make this true if
// there were no undefineds, as well, so that first_undefined == number
// of defined elements.
if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
// Fill in the undefineds and the holes. There may be a hole where
// an undefined should be and vice versa.
var i;
for (i = first_undefined; i < length - num_holes; i++) {
obj[i] = void 0;
}
for (i = length - num_holes; i < length; i++) {
// For compatability with Webkit, do not expose elements in the prototype.
if (i in obj.__proto__) {
obj[i] = void 0;
} else {
delete obj[i];
}
}
// Return the number of defined elements.
return first_undefined;
}
var length = ToUint32(this.length); var length = ToUint32(this.length);
if (length < 2) return this; if (length < 2) return this;
@ -787,6 +844,12 @@ function ArraySort(comparefn) {
} }
var num_non_undefined = %RemoveArrayHoles(this, length); var num_non_undefined = %RemoveArrayHoles(this, length);
if (num_non_undefined == -1) {
// There were indexed accessors in the array. Move array holes and
// undefineds to the end using a Javascript function that is safe
// in the presence of accessors.
num_non_undefined = SafeRemoveArrayHoles(this);
}
QuickSort(this, 0, num_non_undefined); QuickSort(this, 0, num_non_undefined);

31
deps/v8/src/assembler.cc

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -363,7 +363,7 @@ void RelocIterator::next() {
if (SetMode(DebugInfoModeFromTag(top_tag))) return; if (SetMode(DebugInfoModeFromTag(top_tag))) return;
} else { } else {
// Otherwise, just skip over the data. // Otherwise, just skip over the data.
Advance(kIntSize); Advance(kIntptrSize);
} }
} else { } else {
AdvanceReadPC(); AdvanceReadPC();
@ -508,7 +508,7 @@ void RelocInfo::Verify() {
// Implementation of ExternalReference // Implementation of ExternalReference
ExternalReference::ExternalReference(Builtins::CFunctionId id) ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Builtins::c_function_address(id)) {} : address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(Builtins::Name name) ExternalReference::ExternalReference(Builtins::Name name)
@ -516,15 +516,15 @@ ExternalReference::ExternalReference(Builtins::Name name)
ExternalReference::ExternalReference(Runtime::FunctionId id) ExternalReference::ExternalReference(Runtime::FunctionId id)
: address_(Runtime::FunctionForId(id)->entry) {} : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(Runtime::Function* f) ExternalReference::ExternalReference(Runtime::Function* f)
: address_(f->entry) {} : address_(Redirect(f->entry)) {}
ExternalReference::ExternalReference(const IC_Utility& ic_utility) ExternalReference::ExternalReference(const IC_Utility& ic_utility)
: address_(ic_utility.address()) {} : address_(Redirect(ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address) ExternalReference::ExternalReference(const Debug_Address& debug_address)
@ -543,10 +543,21 @@ ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {} : address_(table_ref.address()) {}
ExternalReference ExternalReference::perform_gc_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
}
ExternalReference ExternalReference::builtin_passed_function() { ExternalReference ExternalReference::builtin_passed_function() {
return ExternalReference(&Builtins::builtin_passed_function); return ExternalReference(&Builtins::builtin_passed_function);
} }
ExternalReference ExternalReference::random_positive_smi_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
}
ExternalReference ExternalReference::the_hole_value_location() { ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location()); return ExternalReference(Factory::the_hole_value().location());
} }
@ -614,13 +625,17 @@ ExternalReference ExternalReference::double_fp_operation(
default: default:
UNREACHABLE(); UNREACHABLE();
} }
return ExternalReference(FUNCTION_ADDR(function)); // Passing true as 2nd parameter indicates that they return an fp value.
return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
} }
ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() { ExternalReference ExternalReference::debug_break() {
return ExternalReference(FUNCTION_ADDR(Debug::Break)); return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
} }

42
deps/v8/src/assembler.h

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_ #ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_ #define V8_ASSEMBLER_H_
@ -352,10 +352,15 @@ class SCTableReference;
class Debug_Address; class Debug_Address;
#endif #endif
// An ExternalReference represents a C++ address called from the generated
// code. All references to C++ functions and must be encapsulated in an typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
// ExternalReference instance. This is done in order to track the origin of
// all external references in the code.
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
// an ExternalReference instance. This is done in order to track the origin of
// all external references in the code so that they can be bound to the correct
// addresses when deserializing a heap.
class ExternalReference BASE_EMBEDDED { class ExternalReference BASE_EMBEDDED {
public: public:
explicit ExternalReference(Builtins::CFunctionId id); explicit ExternalReference(Builtins::CFunctionId id);
@ -382,7 +387,9 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the // pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually. // ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function();
static ExternalReference builtin_passed_function(); static ExternalReference builtin_passed_function();
static ExternalReference random_positive_smi_function();
// Static variable Factory::the_hole_value.location() // Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(); static ExternalReference the_hole_value_location();
@ -403,7 +410,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference double_fp_operation(Token::Value operation); static ExternalReference double_fp_operation(Token::Value operation);
Address address() const {return address_;} Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break() // Function Debug::Break()
@ -413,11 +420,30 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_step_in_fp_address(); static ExternalReference debug_step_in_fp_address();
#endif #endif
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(ExternalReferenceRedirector* redirector) {
ASSERT(redirector_ == NULL); // We can't stack them.
redirector_ = redirector;
}
private: private:
explicit ExternalReference(void* address) explicit ExternalReference(void* address)
: address_(reinterpret_cast<Address>(address)) {} : address_(address) {}
static ExternalReferenceRedirector* redirector_;
static void* Redirect(void* address, bool fp_return = false) {
if (redirector_ == NULL) return address;
return (*redirector_)(address, fp_return);
}
static void* Redirect(Address address_arg, bool fp_return = false) {
void* address = reinterpret_cast<void*>(address_arg);
return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
}
Address address_; void* address_;
}; };

3
deps/v8/src/bootstrapper.cc

@ -1113,6 +1113,9 @@ bool Genesis::InstallNatives() {
} }
#ifdef V8_HOST_ARCH_64_BIT #ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Remove these tests when code generation works and is stable.
MacroAssembler::ConstructAndTestJSFunction();
CodeGenerator::TestCodeGenerator();
// TODO(X64): Reenable remaining initialization when code generation works. // TODO(X64): Reenable remaining initialization when code generation works.
return true; return true;
#endif // V8_HOST_ARCH_64_BIT #endif // V8_HOST_ARCH_64_BIT

3
deps/v8/src/builtins.cc

@ -720,7 +720,8 @@ void Builtins::Setup(bool create_heap_objects) {
// bootstrapper. // bootstrapper.
Bootstrapper::AddFixup(Code::cast(code), &masm); Bootstrapper::AddFixup(Code::cast(code), &masm);
// Log the event and add the code to the builtins array. // Log the event and add the code to the builtins array.
LOG(CodeCreateEvent("Builtin", Code::cast(code), functions[i].s_name)); LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name));
builtins_[i] = code; builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) { if (FLAG_print_builtin_code) {

2
deps/v8/src/code-stubs.cc

@ -66,7 +66,7 @@ Handle<Code> CodeStub::GetCode() {
// Add unresolved entries in the code to the fixup list. // Add unresolved entries in the code to the fixup list.
Bootstrapper::AddFixup(*code, &masm); Bootstrapper::AddFixup(*code, &masm);
LOG(CodeCreateEvent("Stub", *code, GetName())); LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size()); Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER

2
deps/v8/src/code-stubs.h

@ -41,6 +41,8 @@ class CodeStub BASE_EMBEDDED {
SmiOp, SmiOp,
Compare, Compare,
RecordWrite, // Last stub that allows stub calls inside. RecordWrite, // Last stub that allows stub calls inside.
ConvertToDouble,
WriteInt32ToHeapNumber,
StackCheck, StackCheck,
UnarySub, UnarySub,
RevertToNumber, RevertToNumber,

11
deps/v8/src/codegen.cc

@ -302,12 +302,12 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
} }
// Function compilation complete. // Function compilation complete.
LOG(CodeCreateEvent("Function", *code, *node->name())); LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
#ifdef ENABLE_OPROFILE_AGENT #ifdef ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*node->name(), OProfileAgent::CreateNativeCodeRegion(*node->name(),
code->address(), code->instruction_start(),
code->ExecutableSize()); code->instruction_size());
#endif #endif
} }
@ -422,7 +422,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"}, {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"}, {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"}, {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"} {&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
{&CodeGenerator::GenerateMathCos, "_Math_cos"}
}; };

18
deps/v8/src/codegen.h

@ -228,13 +228,27 @@ class StackCheckStub : public CodeStub {
}; };
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
int MinorKey() { return 0; }
};
class UnarySubStub : public CodeStub { class UnarySubStub : public CodeStub {
public: public:
UnarySubStub() { } explicit UnarySubStub(bool overwrite)
: overwrite_(overwrite) { }
private: private:
bool overwrite_;
Major MajorKey() { return UnarySub; } Major MajorKey() { return UnarySub; }
int MinorKey() { return 0; } int MinorKey() { return overwrite_ ? 1 : 0; }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
const char* GetName() { return "UnarySubStub"; } const char* GetName() { return "UnarySubStub"; }

28
deps/v8/src/compiler.cc

@ -179,13 +179,17 @@ static Handle<JSFunction> MakeFunction(bool is_global,
if (script->name()->IsString()) { if (script->name()->IsString()) {
SmartPointer<char> data = SmartPointer<char> data =
String::cast(script->name())->ToCString(DISALLOW_NULLS); String::cast(script->name())->ToCString(DISALLOW_NULLS);
LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, *data)); LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
OProfileAgent::CreateNativeCodeRegion(*data, code->address(), *code, *data));
code->ExecutableSize()); OProfileAgent::CreateNativeCodeRegion(*data,
code->instruction_start(),
code->instruction_size());
} else { } else {
LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, "")); LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
*code, ""));
OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script", OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
code->address(), code->ExecutableSize()); code->instruction_start(),
code->instruction_size());
} }
} }
#endif #endif
@ -380,16 +384,18 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
if (line_num > 0) { if (line_num > 0) {
line_num += script->line_offset()->value() + 1; line_num += script->line_offset()->value() + 1;
} }
LOG(CodeCreateEvent("LazyCompile", *code, *func_name, LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
String::cast(script->name()), line_num)); String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name, OProfileAgent::CreateNativeCodeRegion(*func_name,
String::cast(script->name()), String::cast(script->name()),
line_num, code->address(), line_num,
code->ExecutableSize()); code->instruction_start(),
code->instruction_size());
} else { } else {
LOG(CodeCreateEvent("LazyCompile", *code, *func_name)); LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
OProfileAgent::CreateNativeCodeRegion(*func_name, code->address(), OProfileAgent::CreateNativeCodeRegion(*func_name,
code->ExecutableSize()); code->instruction_start(),
code->instruction_size());
} }
} }
#endif #endif

2
deps/v8/src/d8-debug.h

@ -41,7 +41,7 @@ void HandleDebugEvent(DebugEvent event,
Handle<Object> event_data, Handle<Object> event_data,
Handle<Value> data); Handle<Value> data);
// Start the remove debugger connecting to a V8 debugger agent on the specified // Start the remote debugger connecting to a V8 debugger agent on the specified
// port. // port.
void RunRemoteDebugger(int port); void RunRemoteDebugger(int port);

20
deps/v8/src/d8.cc

@ -460,6 +460,16 @@ void Shell::Initialize() {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access. // Set the security token of the debug context to allow access.
i::Debug::debug_context()->set_security_token(i::Heap::undefined_value()); i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
}
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif #endif
} }
@ -721,16 +731,6 @@ int Shell::Main(int argc, char* argv[]) {
RunRemoteDebugger(i::FLAG_debugger_port); RunRemoteDebugger(i::FLAG_debugger_port);
return 0; return 0;
} }
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
}
// Start the in-process debugger if requested.
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif #endif
} }
if (run_shell) if (run_shell)

117
deps/v8/src/d8.js

@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// How crappy is it that I have to implement completely basic stuff
// like this myself? Answer: very.
String.prototype.startsWith = function (str) { String.prototype.startsWith = function (str) {
if (str.length > this.length) if (str.length > this.length)
return false; return false;
@ -100,6 +98,13 @@ Debug.ScriptCompilationType = { Host: 0,
JSON: 2 }; JSON: 2 };
// The different types of scopes matching constants runtime.cc.
Debug.ScopeType = { Global: 0,
Local: 1,
With: 2,
Closure: 3 };
// Current debug state. // Current debug state.
const kNoFrame = -1; const kNoFrame = -1;
Debug.State = { Debug.State = {
@ -124,7 +129,7 @@ function DebugMessageDetails(message) {
} }
function DebugEventDetails(response) { function DebugEventDetails(response) {
details = {text:'', running:false} details = {text:'', running:false};
// Get the running state. // Get the running state.
details.running = response.running(); details.running = response.running();
@ -297,6 +302,14 @@ function DebugRequest(cmd_line) {
this.request_ = this.frameCommandToJSONRequest_(args); this.request_ = this.frameCommandToJSONRequest_(args);
break; break;
case 'scopes':
this.request_ = this.scopesCommandToJSONRequest_(args);
break;
case 'scope':
this.request_ = this.scopeCommandToJSONRequest_(args);
break;
case 'print': case 'print':
case 'p': case 'p':
this.request_ = this.printCommandToJSONRequest_(args); this.request_ = this.printCommandToJSONRequest_(args);
@ -396,13 +409,17 @@ DebugRequest.prototype.createRequest = function(command) {
// Create a JSON request for the evaluation command. // Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) { DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
// Global varaible used to store whether a handle was requested.
lookup_handle = null;
// Check if the expression is a handle id in the form #<handle>#. // Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/); var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) { if (handle_match) {
// Remember the handle requested in a global variable.
lookup_handle = parseInt(handle_match[1]);
// Build a lookup request. // Build a lookup request.
var request = this.createRequest('lookup'); var request = this.createRequest('lookup');
request.arguments = {}; request.arguments = {};
request.arguments.handle = parseInt(handle_match[1]); request.arguments.handles = [ lookup_handle ];
return request.toJSONProtocol(); return request.toJSONProtocol();
} else { } else {
// Build an evaluate request. // Build an evaluate request.
@ -561,6 +578,27 @@ DebugRequest.prototype.frameCommandToJSONRequest_ = function(args) {
}; };
// Create a JSON request for the scopes command.
DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
// Build a scopes request from the text command.
var request = this.createRequest('scopes');
return request.toJSONProtocol();
};
// Create a JSON request for the scope command.
DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
// Build a scope request from the text command.
var request = this.createRequest('scope');
args = args.split(/\s*[ ]+\s*/g);
if (args.length > 0 && args[0].length > 0) {
request.arguments = {};
request.arguments.number = args[0];
}
return request.toJSONProtocol();
};
// Create a JSON request for the print command. // Create a JSON request for the print command.
DebugRequest.prototype.printCommandToJSONRequest_ = function(args) { DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
// Build an evaluate request from the text command. // Build an evaluate request from the text command.
@ -785,8 +823,11 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('clear <breakpoint #>'); print('clear <breakpoint #>');
print('backtrace [n] | [-n] | [from to]'); print('backtrace [n] | [-n] | [from to]');
print('frame <frame #>'); print('frame <frame #>');
print('scopes');
print('scope <scope #>');
print('step [in | next | out| min [step count]]'); print('step [in | next | out| min [step count]]');
print('print <expression>'); print('print <expression>');
print('dir <expression>');
print('source [from line [num lines]]'); print('source [from line [num lines]]');
print('scripts'); print('scripts');
print('continue'); print('continue');
@ -796,7 +837,11 @@ DebugRequest.prototype.helpCommand_ = function(args) {
function formatHandleReference_(value) { function formatHandleReference_(value) {
if (value.handle() >= 0) {
return '#' + value.handle() + '#'; return '#' + value.handle() + '#';
} else {
return '#Transient#';
}
} }
@ -820,11 +865,15 @@ function formatObject_(value, include_properties) {
result += value.propertyName(i); result += value.propertyName(i);
result += ': '; result += ': ';
var property_value = value.propertyValue(i); var property_value = value.propertyValue(i);
if (property_value instanceof ProtocolReference) {
result += '<no type>';
} else {
if (property_value && property_value.type()) { if (property_value && property_value.type()) {
result += property_value.type(); result += property_value.type();
} else { } else {
result += '<no type>'; result += '<no type>';
} }
}
result += ' '; result += ' ';
result += formatHandleReference_(property_value); result += formatHandleReference_(property_value);
result += '\n'; result += '\n';
@ -834,6 +883,33 @@ function formatObject_(value, include_properties) {
} }
function formatScope_(scope) {
var result = '';
var index = scope.index;
result += '#' + (index <= 9 ? '0' : '') + index;
result += ' ';
switch (scope.type) {
case Debug.ScopeType.Global:
result += 'Global, ';
result += '#' + scope.object.ref + '#';
break;
case Debug.ScopeType.Local:
result += 'Local';
break;
case Debug.ScopeType.With:
result += 'With, ';
result += '#' + scope.object.ref + '#';
break;
case Debug.ScopeType.Closure:
result += 'Closure';
break;
default:
result += 'UNKNOWN';
}
return result;
}
// Convert a JSON response to text for display in a text based debugger. // Convert a JSON response to text for display in a text based debugger.
function DebugResponseDetails(response) { function DebugResponseDetails(response) {
details = {text:'', running:false} details = {text:'', running:false}
@ -883,12 +959,41 @@ function DebugResponseDetails(response) {
Debug.State.currentFrame = body.index; Debug.State.currentFrame = body.index;
break; break;
case 'scopes':
if (body.totalScopes == 0) {
result = '(no scopes)';
} else {
result = 'Scopes #' + body.fromScope + ' to #' +
(body.toScope - 1) + ' of ' + body.totalScopes + '\n';
for (i = 0; i < body.scopes.length; i++) {
if (i != 0) {
result += '\n';
}
result += formatScope_(body.scopes[i]);
}
}
details.text = result;
break;
case 'scope':
result += formatScope_(body);
result += '\n';
var scope_object_value = response.lookup(body.object.ref);
result += formatObject_(scope_object_value, true);
details.text = result;
break;
case 'evaluate': case 'evaluate':
case 'lookup': case 'lookup':
if (last_cmd == 'p' || last_cmd == 'print') { if (last_cmd == 'p' || last_cmd == 'print') {
result = body.text; result = body.text;
} else { } else {
var value = response.bodyValue(); var value;
if (lookup_handle) {
value = response.bodyValue(lookup_handle);
} else {
value = response.bodyValue();
}
if (value.isObject()) { if (value.isObject()) {
result += formatObject_(value, true); result += formatObject_(value, true);
} else { } else {
@ -1105,7 +1210,7 @@ ProtocolPackage.prototype.body = function() {
ProtocolPackage.prototype.bodyValue = function(index) { ProtocolPackage.prototype.bodyValue = function(index) {
if (index) { if (index != null) {
return new ProtocolValue(this.packet_.body[index], this); return new ProtocolValue(this.packet_.body[index], this);
} else { } else {
return new ProtocolValue(this.packet_.body, this); return new ProtocolValue(this.packet_.body, this);

73
deps/v8/src/date-delay.js

@ -129,23 +129,82 @@ function EquivalentTime(t) {
// (measured in whole seconds based on the 1970 epoch). // (measured in whole seconds based on the 1970 epoch).
// We solve this by mapping the time to a year with same leap-year-ness // We solve this by mapping the time to a year with same leap-year-ness
// and same starting day for the year. The ECMAscript specification says // and same starting day for the year. The ECMAscript specification says
// we must do this, but for compatability with other browsers, we use // we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037 // the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t; if (t >= 0 && t <= 2.1e12) return t;
var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t)); var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
return TimeClip(MakeDate(day, TimeWithinDay(t))); return TimeClip(MakeDate(day, TimeWithinDay(t)));
} }
var daylight_cache_time = $NaN;
var daylight_cache_offset; // Because computing the DST offset is a pretty expensive operation
// we keep a cache of last computed offset along with a time interval
// where we know the cache is valid.
var DST_offset_cache = {
// Cached DST offset.
offset: 0,
// Time interval where the cached offset is valid.
start: 0, end: -1,
// Size of next interval expansion.
increment: 0
};
function DaylightSavingsOffset(t) { function DaylightSavingsOffset(t) {
if (t == daylight_cache_time) { // Load the cache object from the builtins object.
return daylight_cache_offset; var cache = DST_offset_cache;
// Cache the start and the end in local variables for fast access.
var start = cache.start;
var end = cache.end;
if (start <= t) {
// If the time fits in the cached interval, return the cached offset.
if (t <= end) return cache.offset;
// Compute a possible new interval end.
var new_end = end + cache.increment;
if (t <= new_end) {
var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
if (cache.offset == end_offset) {
// If the offset at the end of the new interval still matches
// the offset in the cache, we grow the cached time interval
// and return the offset.
cache.end = new_end;
cache.increment = msPerMonth;
return end_offset;
} else {
var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
if (offset == end_offset) {
// The offset at the given time is equal to the offset at the
// new end of the interval, so that means that we've just skipped
// the point in time where the DST offset change occurred. Updated
// the interval to reflect this and reset the increment.
cache.start = t;
cache.end = new_end;
cache.increment = msPerMonth;
} else {
// The interval contains a DST offset change and the given time is
// before it. Adjust the increment to avoid a linear search for
// the offset change point and change the end of the interval.
cache.increment /= 3;
cache.end = t;
}
// Update the offset in the cache and return it.
cache.offset = offset;
return offset;
}
} }
}
// Compute the DST offset for the time and shrink the cache interval
// to only contain the time. This allows fast repeated DST offset
// computations for the same time.
var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
daylight_cache_time = t; cache.offset = offset;
daylight_cache_offset = offset; cache.start = cache.end = t;
cache.increment = msPerMonth;
return offset; return offset;
} }

67
deps/v8/src/debug-delay.js

@ -1208,6 +1208,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.backtraceRequest_(request, response); this.backtraceRequest_(request, response);
} else if (request.command == 'frame') { } else if (request.command == 'frame') {
this.frameRequest_(request, response); this.frameRequest_(request, response);
} else if (request.command == 'scopes') {
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
} else if (request.command == 'evaluate') { } else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response); this.evaluateRequest_(request, response);
} else if (request.command == 'lookup') { } else if (request.command == 'lookup') {
@ -1540,7 +1544,7 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
// With no arguments just keep the selected frame. // With no arguments just keep the selected frame.
if (request.arguments) { if (request.arguments) {
index = request.arguments.number; var index = request.arguments.number;
if (index < 0 || this.exec_state_.frameCount() <= index) { if (index < 0 || this.exec_state_.frameCount() <= index) {
return response.failed('Invalid frame number'); return response.failed('Invalid frame number');
} }
@ -1551,6 +1555,67 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
}; };
DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Get the frame for which the scope or scopes are requested. With no frameNumber
// argument use the currently selected frame.
if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
return response.failed('Invalid frame number');
}
return this.exec_state_.frame(frame_index);
} else {
return this.exec_state_.frame();
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No scopes');
}
// Get the frame for which the scopes are requested.
var frame = this.frameForScopeRequest_(request);
// Fill all scopes for this frame.
var total_scopes = frame.scopeCount();
var scopes = [];
for (var i = 0; i < total_scopes; i++) {
scopes.push(frame.scope(i));
}
response.body = {
fromScope: 0,
toScope: total_scopes,
totalScopes: total_scopes,
scopes: scopes
}
};
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// No frames no scopes.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No scopes');
}
// Get the frame for which the scope is requested.
var frame = this.frameForScopeRequest_(request);
// With no scope argument just return top scope.
var scope_index = 0;
if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
scope_index = %ToNumber(request.arguments.number);
if (scope_index < 0 || frame.scopeCount() <= scope_index) {
return response.failed('Invalid scope number');
}
}
response.body = frame.scope(scope_index);
};
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) { if (!request.arguments) {
return response.failed('Missing arguments'); return response.failed('Missing arguments');

14
deps/v8/src/debug.cc

@ -382,6 +382,7 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
// the code copy and will therefore have no effect on the running code // the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code. // keeping it from using the inlined code.
if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc()); if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc());
} }
} }
@ -389,6 +390,19 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
void BreakLocationIterator::ClearDebugBreakAtIC() { void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke. // Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address()); rinfo()->set_target_address(original_rinfo()->target_address());
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = original_rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Restore the inlined version of keyed stores to get back to the
// fast case. We need to patch back the keyed store because no
// patching happens when running normally. For keyed loads, the
// map check will get patched back when running normally after ICs
// have been cleared at GC.
if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
}
} }

2
deps/v8/src/flag-definitions.h

@ -332,6 +332,8 @@ DEFINE_bool(log_gc, false,
DEFINE_bool(log_handles, false, "Log global handle events.") DEFINE_bool(log_handles, false, "Log global handle events.")
DEFINE_bool(log_state_changes, false, "Log state changes.") DEFINE_bool(log_state_changes, false, "Log state changes.")
DEFINE_bool(log_suspect, false, "Log suspect operations.") DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(compress_log, false,
"Compress log to save space (makes log less human-readable).")
DEFINE_bool(prof, false, DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).") "Log statistical profiling information (implies --log-code).")
DEFINE_bool(prof_auto, true, DEFINE_bool(prof_auto, true,

25
deps/v8/src/frames-inl.h

@ -43,13 +43,7 @@ namespace internal {
inline Address StackHandler::address() const { inline Address StackHandler::address() const {
// NOTE: There's an obvious problem with the address of the NULL return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
// stack handler. Right now, it benefits us that the subtraction
// leads to a very high address (above everything else on the
// stack), but maybe we should stop relying on it?
const int displacement = StackHandlerConstants::kAddressDisplacement;
Address address = reinterpret_cast<Address>(const_cast<StackHandler*>(this));
return address + displacement;
} }
@ -68,13 +62,7 @@ inline bool StackHandler::includes(Address address) const {
inline void StackHandler::Iterate(ObjectVisitor* v) const { inline void StackHandler::Iterate(ObjectVisitor* v) const {
// Stack handlers do not contain any pointers that need to be // Stack handlers do not contain any pointers that need to be
// traversed. The only field that have to worry about is the code // traversed.
// field which is unused and should always be uninitialized.
#ifdef DEBUG
const int offset = StackHandlerConstants::kCodeOffset;
Object* code = Memory::Object_at(address() + offset);
ASSERT(Smi::cast(code)->value() == StackHandler::kCodeNotPresent);
#endif
} }
@ -122,11 +110,6 @@ inline Object* StandardFrame::context() const {
} }
inline Address StandardFrame::caller_sp() const {
return pp();
}
inline Address StandardFrame::caller_fp() const { inline Address StandardFrame::caller_fp() const {
return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset); return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
} }
@ -157,13 +140,13 @@ inline bool StandardFrame::IsConstructFrame(Address fp) {
inline Object* JavaScriptFrame::receiver() const { inline Object* JavaScriptFrame::receiver() const {
const int offset = JavaScriptFrameConstants::kReceiverOffset; const int offset = JavaScriptFrameConstants::kReceiverOffset;
return Memory::Object_at(pp() + offset); return Memory::Object_at(caller_sp() + offset);
} }
inline void JavaScriptFrame::set_receiver(Object* value) { inline void JavaScriptFrame::set_receiver(Object* value) {
const int offset = JavaScriptFrameConstants::kReceiverOffset; const int offset = JavaScriptFrameConstants::kReceiverOffset;
Memory::Object_at(pp() + offset) = value; Memory::Object_at(caller_sp() + offset) = value;
} }

14
deps/v8/src/frames.cc

@ -49,7 +49,9 @@ class StackHandlerIterator BASE_EMBEDDED {
StackHandler* handler() const { return handler_; } StackHandler* handler() const { return handler_; }
bool done() { return handler_->address() > limit_; } bool done() {
return handler_ == NULL || handler_->address() > limit_;
}
void Advance() { void Advance() {
ASSERT(!done()); ASSERT(!done());
handler_ = handler_->next(); handler_ = handler_->next();
@ -398,7 +400,7 @@ Code* ExitFrame::code() const {
void ExitFrame::ComputeCallerState(State* state) const { void ExitFrame::ComputeCallerState(State* state) const {
// Setup the caller state. // Setup the caller state.
state->sp = pp(); state->sp = caller_sp();
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset); state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address state->pc_address
= reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset); = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
@ -406,7 +408,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
Address ExitFrame::GetCallerStackPointer() const { Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kPPDisplacement; return fp() + ExitFrameConstants::kCallerSPDisplacement;
} }
@ -451,12 +453,12 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
Object* JavaScriptFrame::GetParameter(int index) const { Object* JavaScriptFrame::GetParameter(int index) const {
ASSERT(index >= 0 && index < ComputeParametersCount()); ASSERT(index >= 0 && index < ComputeParametersCount());
const int offset = JavaScriptFrameConstants::kParam0Offset; const int offset = JavaScriptFrameConstants::kParam0Offset;
return Memory::Object_at(pp() + offset - (index * kPointerSize)); return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
} }
int JavaScriptFrame::ComputeParametersCount() const { int JavaScriptFrame::ComputeParametersCount() const {
Address base = pp() + JavaScriptFrameConstants::kReceiverOffset; Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset; Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
return (base - limit) / kPointerSize; return (base - limit) / kPointerSize;
} }
@ -681,7 +683,7 @@ void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset; const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset; const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
Object** base = &Memory::Object_at(fp() + kBaseOffset); Object** base = &Memory::Object_at(fp() + kBaseOffset);
Object** limit = &Memory::Object_at(pp() + kLimitOffset) + 1; Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
v->VisitPointers(base, limit); v->VisitPointers(base, limit);
} }

8
deps/v8/src/frames.h

@ -78,9 +78,6 @@ class StackHandler BASE_EMBEDDED {
void Cook(Code* code); void Cook(Code* code);
void Uncook(Code* code); void Uncook(Code* code);
// TODO(1233780): Get rid of the code slot in stack handlers.
static const int kCodeNotPresent = 0;
private: private:
// Accessors. // Accessors.
inline State state() const; inline State state() const;
@ -132,7 +129,7 @@ class StackFrame BASE_EMBEDDED {
// Accessors. // Accessors.
Address sp() const { return state_.sp; } Address sp() const { return state_.sp; }
Address fp() const { return state_.fp; } Address fp() const { return state_.fp; }
Address pp() const { return GetCallerStackPointer(); } Address caller_sp() const { return GetCallerStackPointer(); }
Address pc() const { return *pc_address(); } Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; } void set_pc(Address pc) { *pc_address() = pc; }
@ -140,7 +137,7 @@ class StackFrame BASE_EMBEDDED {
Address* pc_address() const { return state_.pc_address; } Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame. // Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(pp())); } Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
// Checks if this frame includes any stack handlers. // Checks if this frame includes any stack handlers.
bool HasHandler() const; bool HasHandler() const;
@ -337,7 +334,6 @@ class StandardFrame: public StackFrame {
virtual void ComputeCallerState(State* state) const; virtual void ComputeCallerState(State* state) const;
// Accessors. // Accessors.
inline Address caller_sp() const;
inline Address caller_fp() const; inline Address caller_fp() const;
inline Address caller_pc() const; inline Address caller_pc() const;

35
deps/v8/src/heap.cc

@ -79,9 +79,15 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
// semispace_size_ should be a power of 2 and old_generation_size_ should be // semispace_size_ should be a power of 2 and old_generation_size_ should be
// a multiple of Page::kPageSize. // a multiple of Page::kPageSize.
int Heap::semispace_size_ = 2*MB; #if V8_HOST_ARCH_ARM
int Heap::semispace_size_ = 512*KB;
int Heap::old_generation_size_ = 128*MB;
int Heap::initial_semispace_size_ = 128*KB;
#else
int Heap::semispace_size_ = 8*MB;
int Heap::old_generation_size_ = 512*MB; int Heap::old_generation_size_ = 512*MB;
int Heap::initial_semispace_size_ = 256*KB; int Heap::initial_semispace_size_ = 512*KB;
#endif
GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL;
@ -90,9 +96,8 @@ GCCallback Heap::global_gc_epilogue_callback_ = NULL;
// ConfigureHeap. // ConfigureHeap.
int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_. int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
// Double the new space after this many scavenge collections. int Heap::survived_since_last_expansion_ = 0;
int Heap::new_space_growth_limit_ = 8;
int Heap::scavenge_count_ = 0;
Heap::HeapState Heap::gc_state_ = NOT_IN_GC; Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0; int Heap::mc_count_ = 0;
@ -421,7 +426,7 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_promotion_limit_ = old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3); old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ = old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3); old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false; old_gen_exhausted_ = false;
// If we have used the mark-compact collector to collect the new // If we have used the mark-compact collector to collect the new
@ -624,16 +629,17 @@ void Heap::Scavenge() {
// Implements Cheney's copying algorithm // Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin")); LOG(ResourceEvent("scavenge", "begin"));
scavenge_count_++; // Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize();
if (new_space_.Capacity() < new_space_.MaximumCapacity() && if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
scavenge_count_ > new_space_growth_limit_) { survived_since_last_expansion_ > new_space_.Capacity()) {
// Double the size of the new space, and double the limit. The next // Double the size of new space if there is room to grow and enough
// doubling attempt will occur after the current new_space_growth_limit_ // data has survived scavenge since the last expansion.
// more collections.
// TODO(1240712): NewSpace::Double has a return value which is // TODO(1240712): NewSpace::Double has a return value which is
// ignored here. // ignored here.
new_space_.Double(); new_space_.Double();
new_space_growth_limit_ *= 2; survived_since_last_expansion_ = 0;
} }
// Flip the semispaces. After flipping, to space is empty, from space has // Flip the semispaces. After flipping, to space is empty, from space has
@ -737,6 +743,10 @@ void Heap::Scavenge() {
// Set age mark. // Set age mark.
new_space_.set_age_mark(new_space_.top()); new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
LOG(ResourceEvent("scavenge", "end")); LOG(ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC; gc_state_ = NOT_IN_GC;
@ -1766,7 +1776,6 @@ Object* Heap::CreateCode(const CodeDesc& desc,
// through the self_reference parameter. // through the self_reference parameter.
code->CopyFrom(desc); code->CopyFrom(desc);
if (sinfo != NULL) sinfo->Serialize(code); // write scope info if (sinfo != NULL) sinfo->Serialize(code); // write scope info
LOG(CodeAllocateEvent(code, desc.origin));
#ifdef DEBUG #ifdef DEBUG
code->Verify(); code->Verify();

5
deps/v8/src/heap.h

@ -827,8 +827,9 @@ class Heap : public AllStatic {
static int young_generation_size_; static int young_generation_size_;
static int old_generation_size_; static int old_generation_size_;
static int new_space_growth_limit_; // For keeping track of how much data has survived
static int scavenge_count_; // scavenge since last new space expansion.
static int survived_since_last_expansion_;
static int always_allocate_scope_depth_; static int always_allocate_scope_depth_;
static bool context_disposed_pending_; static bool context_disposed_pending_;

19
deps/v8/src/ia32/assembler-ia32.cc

@ -117,7 +117,8 @@ void CpuFeatures::Probe() {
Object* code = Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
if (!code->IsCode()) return; if (!code->IsCode()) return;
LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe")); LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)(); typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe(); supported_ = probe();
@ -1655,6 +1656,22 @@ void Assembler::fchs() {
} }
void Assembler::fcos() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xFF);
}
void Assembler::fsin() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD9);
EMIT(0xFE);
}
void Assembler::fadd(int i) { void Assembler::fadd(int i) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;

2
deps/v8/src/ia32/assembler-ia32.h

@ -658,6 +658,8 @@ class Assembler : public Malloced {
void fabs(); void fabs();
void fchs(); void fchs();
void fcos();
void fsin();
void fadd(int i); void fadd(int i);
void fsub(int i); void fsub(int i);

10
deps/v8/src/ia32/codegen-ia32-inl.h

@ -39,6 +39,16 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); } void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); } void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __ #undef __
} } // namespace v8::internal } } // namespace v8::internal

676
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

9
deps/v8/src/ia32/codegen-ia32.h

@ -518,6 +518,15 @@ class CodeGenerator: public AstVisitor {
void GenerateGetFramePointer(ZoneList<Expression*>* args); void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support. // Methods and constants for fast case switch statement support.
// //
// Only allow fast-case switch if the range of labels is at most // Only allow fast-case switch if the range of labels is at most

173
deps/v8/src/ia32/frames-ia32.h

@ -55,16 +55,10 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic { class StackHandlerConstants : public AllStatic {
public: public:
static const int kNextOffset = 0 * kPointerSize; static const int kNextOffset = 0 * kPointerSize;
static const int kPPOffset = 1 * kPointerSize; static const int kFPOffset = 1 * kPointerSize;
static const int kFPOffset = 2 * kPointerSize; static const int kStateOffset = 2 * kPointerSize;
static const int kPCOffset = 3 * kPointerSize;
// TODO(1233780): Get rid of the code slot in stack handlers.
static const int kCodeOffset = 3 * kPointerSize;
static const int kStateOffset = 4 * kPointerSize;
static const int kPCOffset = 5 * kPointerSize;
static const int kAddressDisplacement = -1 * kPointerSize;
static const int kSize = kPCOffset + kPointerSize; static const int kSize = kPCOffset + kPointerSize;
}; };
@ -85,12 +79,12 @@ class ExitFrameConstants : public AllStatic {
static const int kDebugMarkOffset = -2 * kPointerSize; static const int kDebugMarkOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize;
// Let the parameters pointer for exit frames point just below the
// frame structure on the stack (frame pointer and return address).
static const int kPPDisplacement = +2 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize; static const int kCallerPCOffset = +1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
}; };
@ -112,7 +106,7 @@ class JavaScriptFrameConstants : public AllStatic {
static const int kSavedRegistersOffset = +2 * kPointerSize; static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// CallerSP-relative (aka PP-relative) // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize; static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize; static const int kReceiverOffset = -1 * kPointerSize;
}; };
@ -136,157 +130,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} }
// ----------------------------------------------------
// C Entry frames:
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// +-------------+
// | entry_pc |
// +-------------+ <--+ entry_sp
// . |
// . |
// . |
// +-------------+ |
// -3 | entry_sp --+----+
// e +-------------+
// n -2 | C function |
// t +-------------+
// r -1 | caller_pp |
// y +-------------+ <--- fp (frame pointer, ebp)
// 0 | caller_fp |
// f +-------------+
// r 1 | caller_pc |
// a +-------------+ <--- caller_sp (stack pointer, esp)
// m 2 | |
// e | arguments |
// | |
// +- - - - - - -+
// | argument0 |
// +=============+
// | |
// | caller |
// higher | expressions |
// addresses | |
// Proper JS frames:
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// ----------- +=============+ <--- sp (stack pointer, esp)
// | function |
// +-------------+
// | |
// | expressions |
// | |
// +-------------+
// a | |
// c | locals |
// t | |
// i +- - - - - - -+ <---
// v -4 | local0 | ^
// a +-------------+ |
// t -3 | code | |
// i +-------------+ |
// o -2 | context | | kLocal0Offset
// n +-------------+ |
// -1 | caller_pp | v
// f +-------------+ <--- fp (frame pointer, ebp)
// r 0 | caller_fp |
// a +-------------+
// m 1 | caller_pc |
// e +-------------+ <--- caller_sp (incl. parameters)
// 2 | |
// | parameters |
// | |
// +- - - - - - -+ <---
// -2 | parameter0 | ^
// +-------------+ | kParam0Offset
// -1 | receiver | v
// ----------- +=============+ <--- pp (parameter pointer, edi)
// 0 | function |
// +-------------+
// | |
// | caller |
// higher | expressions |
// addresses | |
// JS entry frames: When calling from C to JS, we construct two extra
// frames: An entry frame (C) and a trampoline frame (JS). The
// following pictures shows the two frames:
// lower | Stack |
// addresses | ^ |
// | | |
// | |
// ----------- +=============+ <--- sp (stack pointer, esp)
// | |
// | parameters |
// t | |
// r +- - - - - - -+
// a | parameter0 |
// m +-------------+
// p | receiver |
// o +-------------+ <---
// l | function | ^
// i +-------------+ |
// n -3 | code | | kLocal0Offset
// e +-------------+
// -2 | NULL | context is always NULL
// +-------------+
// f -1 | NULL | caller pp is always NULL for entry frames
// r +-------------+ <--- fp (frame pointer, ebp)
// a 0 | caller fp |
// m +-------------+
// e 1 | caller pc |
// +-------------+ <--- caller_sp (incl. parameters)
// | 0 |
// ----------- +=============+ <--- pp (parameter pointer, edi)
// | 0 |
// +-------------+ <---
// . ^
// . | try-handler (HandlerOffsets::kSize)
// . v
// +-------------+ <---
// -5 | next top pp |
// +-------------+
// e -4 | next top fp |
// n +-------------+ <---
// t -3 | ebx | ^
// r +-------------+ |
// y -2 | esi | | callee-saved registers
// +-------------+ |
// -1 | edi | v
// f +-------------+ <--- fp
// r 0 | caller fp |
// a +-------------+ pp == NULL (parameter pointer)
// m 1 | caller pc |
// e +-------------+ <--- caller sp
// 2 | code entry | ^
// +-------------+ |
// 3 | function | |
// +-------------+ | arguments passed from C code
// 4 | receiver | |
// +-------------+ |
// 5 | argc | |
// +-------------+ |
// 6 | argv | v
// +-------------+ <---
// | |
// higher | |
// addresses | |
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_FRAMES_IA32_H_ #endif // V8_IA32_FRAMES_IA32_H_

27
deps/v8/src/ia32/ic-ia32.cc

@ -747,6 +747,21 @@ void KeyedLoadIC::ClearInlinedVersion(Address address) {
} }
void KeyedStoreIC::ClearInlinedVersion(Address address) {
// Insert null as the elements map to check for. This will make
// sure that the elements fast-case map check fails so that control
// flows to the IC instead of the inlined version.
PatchInlinedStore(address, Heap::null_value());
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
// Restore the fast-case elements map check so that the inlined
// version can be used again.
PatchInlinedStore(address, Heap::fixed_array_map());
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call. // The address of the instruction following the call.
Address test_instruction_address = address + 4; Address test_instruction_address = address + 4;
@ -774,7 +789,7 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
} }
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { static bool PatchInlinedMapCheck(Address address, Object* map) {
Address test_instruction_address = address + 4; // 4 = stub address Address test_instruction_address = address + 4; // 4 = stub address
// The keyed load has a fast inlined case if the IC call instruction // The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction. // is immediately followed by a test instruction.
@ -795,6 +810,16 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
} }
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
return PatchInlinedMapCheck(address, map);
}
// Defined in ic.cc. // Defined in ic.cc.
Object* KeyedLoadIC_Miss(Arguments args); Object* KeyedLoadIC_Miss(Arguments args);

6
deps/v8/src/ia32/jump-target-ia32.cc

@ -164,7 +164,7 @@ void JumpTarget::Call() {
} }
void JumpTarget::DoBind(int mergable_elements) { void JumpTarget::DoBind() {
ASSERT(cgen() != NULL); ASSERT(cgen() != NULL);
ASSERT(!is_bound()); ASSERT(!is_bound());
@ -210,7 +210,7 @@ void JumpTarget::DoBind(int mergable_elements) {
// Fast case: no forward jumps, possible backward ones. Remove // Fast case: no forward jumps, possible backward ones. Remove
// constants and copies above the watermark on the fall-through // constants and copies above the watermark on the fall-through
// frame and use it as the entry frame. // frame and use it as the entry frame.
cgen()->frame()->MakeMergable(mergable_elements); cgen()->frame()->MakeMergable();
entry_frame_ = new VirtualFrame(cgen()->frame()); entry_frame_ = new VirtualFrame(cgen()->frame());
} }
__ bind(&entry_label_); __ bind(&entry_label_);
@ -252,7 +252,7 @@ void JumpTarget::DoBind(int mergable_elements) {
} }
// Compute the frame to use for entry to the block. // Compute the frame to use for entry to the block.
ComputeEntryFrame(mergable_elements); ComputeEntryFrame();
// Some moves required to merge to an expected frame require purely // Some moves required to merge to an expected frame require purely
// frame state changes, and do not require any code generation. // frame state changes, and do not require any code generation.

24
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -358,7 +358,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack. // Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kPPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp); push(ebp);
@ -448,7 +448,8 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code // Adjust this code if not the case.
ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// The pc (return address) is already on TOS. // The pc (return address) is already on TOS.
if (try_location == IN_JAVASCRIPT) { if (try_location == IN_JAVASCRIPT) {
if (type == TRY_CATCH_HANDLER) { if (type == TRY_CATCH_HANDLER) {
@ -456,23 +457,18 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} else { } else {
push(Immediate(StackHandler::TRY_FINALLY)); push(Immediate(StackHandler::TRY_FINALLY));
} }
push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
push(ebp); push(ebp);
push(edi);
} else { } else {
ASSERT(try_location == IN_JS_ENTRY); ASSERT(try_location == IN_JS_ENTRY);
// The parameter pointer is meaningless here and ebp does not // The frame pointer does not point to a JS frame so we save NULL
// point to a JS frame. So we save NULL for both pp and ebp. We // for ebp. We expect the code throwing an exception to check ebp
// expect the code throwing an exception to check ebp before // before dereferencing it to restore the context.
// dereferencing it to restore the context.
push(Immediate(StackHandler::ENTRY)); push(Immediate(StackHandler::ENTRY));
push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent))); push(Immediate(0)); // NULL frame pointer.
push(Immediate(0)); // NULL frame pointer
push(Immediate(0)); // NULL parameter pointer
} }
// Cached TOS. // Save the current handler as the next handler.
mov(eax, Operand::StaticVariable(ExternalReference(Top::k_handler_address))); push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
// Link this handler. // Link this handler as the new current one.
mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp); mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
} }

7
deps/v8/src/ia32/macro-assembler-ia32.h

@ -154,9 +154,8 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
// Push a new try handler and link into try handler chain. // Push a new try handler and link into try handler chain. The return
// The return address must be pushed before calling this helper. // address must be pushed before calling this helper.
// On exit, eax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
@ -286,7 +285,7 @@ class MacroAssembler: public Assembler {
List<Unresolved> unresolved_; List<Unresolved> unresolved_;
bool generating_stub_; bool generating_stub_;
bool allow_stub_calls_; bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code Handle<Object> code_object_; // This handle will be patched with the
// code object on installation. // code object on installation.
// Helper functions for generating invokes. // Helper functions for generating invokes.

20
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -174,14 +174,8 @@ void VirtualFrame::SyncRange(int begin, int end) {
} }
void VirtualFrame::MakeMergable(int mergable_elements) { void VirtualFrame::MakeMergable() {
if (mergable_elements == JumpTarget::kAllElements) { for (int i = 0; i < element_count(); i++) {
mergable_elements = element_count();
}
ASSERT(mergable_elements <= element_count());
int start_index = element_count() - mergable_elements;
for (int i = start_index; i < element_count(); i++) {
FrameElement element = elements_[i]; FrameElement element = elements_[i];
if (element.is_constant() || element.is_copy()) { if (element.is_constant() || element.is_copy()) {
@ -775,14 +769,10 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
void VirtualFrame::PushTryHandler(HandlerType type) { void VirtualFrame::PushTryHandler(HandlerType type) {
ASSERT(cgen()->HasValidEntryRegisters()); ASSERT(cgen()->HasValidEntryRegisters());
// Grow the expression stack by handler size less two (the return address // Grow the expression stack by handler size less one (the return
// is already pushed by a call instruction, and PushTryHandler from the // address is already pushed by a call instruction).
// macro assembler will leave the top of stack in the eax register to be Adjust(kHandlerSize - 1);
// pushed separately).
Adjust(kHandlerSize - 2);
__ PushTryHandler(IN_JAVASCRIPT, type); __ PushTryHandler(IN_JAVASCRIPT, type);
// TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
EmitPush(eax);
} }

7
deps/v8/src/ia32/virtual-frame-ia32.h

@ -153,11 +153,8 @@ class VirtualFrame : public ZoneObject {
void SyncRange(int begin, int end); void SyncRange(int begin, int end);
// Make this frame so that an arbitrary frame of the same height can // Make this frame so that an arbitrary frame of the same height can
// be merged to it. Copies and constants are removed from the // be merged to it. Copies and constants are removed from the frame.
// topmost mergable_elements elements of the frame. A void MakeMergable();
// mergable_elements of JumpTarget::kAllElements indicates constants
// and copies are should be removed from the entire frame.
void MakeMergable(int mergable_elements);
// Prepare this virtual frame for merging to an expected frame by // Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating // performing some state changes that do not require generating

29
deps/v8/src/ic.cc

@ -849,6 +849,20 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
} }
static bool StoreICableLookup(LookupResult* lookup) {
// Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
// If the property is read-only, we leave the IC in its current
// state.
if (lookup->IsReadOnly()) return false;
if (!lookup->IsLoaded()) return false;
return true;
}
Object* StoreIC::Store(State state, Object* StoreIC::Store(State state,
Handle<Object> object, Handle<Object> object,
Handle<String> name, Handle<String> name,
@ -873,13 +887,13 @@ Object* StoreIC::Store(State state,
} }
// Lookup the property locally in the receiver. // Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup; LookupResult lookup;
receiver->LocalLookup(*name, &lookup); receiver->LocalLookup(*name, &lookup);
if (StoreICableLookup(&lookup)) {
// Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) {
UpdateCaches(&lookup, state, receiver, name, value); UpdateCaches(&lookup, state, receiver, name, value);
} }
}
// Set the property. // Set the property.
return receiver->SetProperty(*name, *value, NONE); return receiver->SetProperty(*name, *value, NONE);
@ -893,14 +907,9 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Object> value) { Handle<Object> value) {
ASSERT(lookup->IsLoaded()); ASSERT(lookup->IsLoaded());
// Skip JSGlobalProxy. // Skip JSGlobalProxy.
if (receiver->IsJSGlobalProxy()) return; ASSERT(!receiver->IsJSGlobalProxy());
// Bail out if we didn't find a result.
if (!lookup->IsValid() || !lookup->IsCacheable()) return;
// If the property is read-only, we leave the IC in its current ASSERT(StoreICableLookup(lookup));
// state.
if (lookup->IsReadOnly()) return;
// If the property has a non-field type allowing map transitions // If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its // where there is extra room in the object, we leave the IC in its

11
deps/v8/src/ic.h

@ -356,6 +356,12 @@ class KeyedStoreIC: public IC {
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
static void GenerateExtendStorage(MacroAssembler* masm); static void GenerateExtendStorage(MacroAssembler* masm);
// Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address);
// Restore the inlined version so the fast case can get hit.
static void RestoreInlinedVersion(Address address);
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f); static void Generate(MacroAssembler* masm, const ExternalReference& f);
@ -378,6 +384,11 @@ class KeyedStoreIC: public IC {
} }
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
// Support for patching the map that is checked in an inlined
// version of keyed store.
static bool PatchInlinedStore(Address address, Object* map);
friend class IC; friend class IC;
}; };

173
deps/v8/src/jump-target.cc

@ -48,7 +48,7 @@ void JumpTarget::Unuse() {
} }
void JumpTarget::ComputeEntryFrame(int mergable_elements) { void JumpTarget::ComputeEntryFrame() {
// Given: a collection of frames reaching by forward CFG edges and // Given: a collection of frames reaching by forward CFG edges and
// the directionality of the block. Compute: an entry frame for the // the directionality of the block. Compute: an entry frame for the
// block. // block.
@ -77,14 +77,6 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
int length = initial_frame->element_count(); int length = initial_frame->element_count();
ZoneList<FrameElement*> elements(length); ZoneList<FrameElement*> elements(length);
// Convert the number of mergable elements (counted from the top
// down) to a frame high-water mark (counted from the bottom up).
// Elements strictly above the high-water index will be mergable in
// entry frames for bidirectional jump targets.
int high_water_mark = (mergable_elements == kAllElements)
? VirtualFrame::kIllegalIndex // All frame indices are above this.
: length - mergable_elements - 1; // Top index if m_e == 0.
// Initially populate the list of elements based on the initial // Initially populate the list of elements based on the initial
// frame. // frame.
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
@ -92,7 +84,7 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
// We do not allow copies or constants in bidirectional frames. All // We do not allow copies or constants in bidirectional frames. All
// elements above the water mark on bidirectional frames have // elements above the water mark on bidirectional frames have
// unknown static types. // unknown static types.
if (direction_ == BIDIRECTIONAL && i > high_water_mark) { if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) { if (element.is_constant() || element.is_copy()) {
elements.Add(NULL); elements.Add(NULL);
continue; continue;
@ -158,7 +150,7 @@ void JumpTarget::ComputeEntryFrame(int mergable_elements) {
int best_reg_num = RegisterAllocator::kInvalidRegister; int best_reg_num = RegisterAllocator::kInvalidRegister;
StaticType type; // Initially invalid. StaticType type; // Initially invalid.
if (direction_ != BIDIRECTIONAL || i < high_water_mark) { if (direction_ != BIDIRECTIONAL) {
type = reaching_frames_[0]->elements_[i].static_type(); type = reaching_frames_[0]->elements_[i].static_type();
} }
@ -241,25 +233,6 @@ void JumpTarget::Jump(Result* arg) {
} }
void JumpTarget::Jump(Result* arg0, Result* arg1) {
ASSERT(cgen()->has_valid_frame());
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
DoJump();
}
void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
ASSERT(cgen()->has_valid_frame());
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
cgen()->frame()->Push(arg2);
DoJump();
}
void JumpTarget::Branch(Condition cc, Hint hint) { void JumpTarget::Branch(Condition cc, Hint hint) {
DoBranch(cc, hint); DoBranch(cc, hint);
} }
@ -295,84 +268,6 @@ void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
} }
void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
DoBranch(cc, hint);
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
}
void JumpTarget::Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Hint hint) {
ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
DECLARE_ARGCHECK_VARS(arg2);
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
cgen()->frame()->Push(arg2);
DoBranch(cc, hint);
*arg2 = cgen()->frame()->Pop();
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
ASSERT_ARGCHECK(arg2);
}
void JumpTarget::Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
Hint hint) {
ASSERT(cgen()->frame() != NULL);
// We want to check that non-frame registers at the call site stay in
// the same registers on the fall-through branch.
DECLARE_ARGCHECK_VARS(arg0);
DECLARE_ARGCHECK_VARS(arg1);
DECLARE_ARGCHECK_VARS(arg2);
DECLARE_ARGCHECK_VARS(arg3);
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
cgen()->frame()->Push(arg2);
cgen()->frame()->Push(arg3);
DoBranch(cc, hint);
*arg3 = cgen()->frame()->Pop();
*arg2 = cgen()->frame()->Pop();
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
ASSERT_ARGCHECK(arg0);
ASSERT_ARGCHECK(arg1);
ASSERT_ARGCHECK(arg2);
ASSERT_ARGCHECK(arg3);
}
void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) { void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
ASSERT(cgen()->has_valid_frame()); ASSERT(cgen()->has_valid_frame());
@ -400,66 +295,20 @@ void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
#undef ASSERT_ARGCHECK #undef ASSERT_ARGCHECK
void JumpTarget::Bind(int mergable_elements) { void JumpTarget::Bind() {
DoBind(mergable_elements); DoBind();
} }
void JumpTarget::Bind(Result* arg, int mergable_elements) { void JumpTarget::Bind(Result* arg) {
if (cgen()->has_valid_frame()) { if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg); cgen()->frame()->Push(arg);
} }
DoBind(mergable_elements); DoBind();
*arg = cgen()->frame()->Pop(); *arg = cgen()->frame()->Pop();
} }
void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
}
DoBind(mergable_elements);
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0,
Result* arg1,
Result* arg2,
int mergable_elements) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
cgen()->frame()->Push(arg2);
}
DoBind(mergable_elements);
*arg2 = cgen()->frame()->Pop();
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
}
void JumpTarget::Bind(Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
int mergable_elements) {
if (cgen()->has_valid_frame()) {
cgen()->frame()->Push(arg0);
cgen()->frame()->Push(arg1);
cgen()->frame()->Push(arg2);
cgen()->frame()->Push(arg3);
}
DoBind(mergable_elements);
*arg3 = cgen()->frame()->Pop();
*arg2 = cgen()->frame()->Pop();
*arg1 = cgen()->frame()->Pop();
*arg0 = cgen()->frame()->Pop();
}
void JumpTarget::AddReachingFrame(VirtualFrame* frame) { void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
ASSERT(reaching_frames_.length() == merge_labels_.length()); ASSERT(reaching_frames_.length() == merge_labels_.length());
ASSERT(entry_frame_ == NULL); ASSERT(entry_frame_ == NULL);
@ -531,7 +380,7 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
} }
void BreakTarget::Bind(int mergable_elements) { void BreakTarget::Bind() {
#ifdef DEBUG #ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the // All the forward-reaching frames should have been adjusted at the
// jumps to this target. // jumps to this target.
@ -547,11 +396,11 @@ void BreakTarget::Bind(int mergable_elements) {
int count = cgen()->frame()->height() - expected_height_; int count = cgen()->frame()->height() - expected_height_;
cgen()->frame()->ForgetElements(count); cgen()->frame()->ForgetElements(count);
} }
DoBind(mergable_elements); DoBind();
} }
void BreakTarget::Bind(Result* arg, int mergable_elements) { void BreakTarget::Bind(Result* arg) {
#ifdef DEBUG #ifdef DEBUG
// All the forward-reaching frames should have been adjusted at the // All the forward-reaching frames should have been adjusted at the
// jumps to this target. // jumps to this target.
@ -568,7 +417,7 @@ void BreakTarget::Bind(Result* arg, int mergable_elements) {
cgen()->frame()->ForgetElements(count); cgen()->frame()->ForgetElements(count);
cgen()->frame()->Push(arg); cgen()->frame()->Push(arg);
} }
DoBind(mergable_elements); DoBind();
*arg = cgen()->frame()->Pop(); *arg = cgen()->frame()->Pop();
} }

51
deps/v8/src/jump-target.h

@ -107,52 +107,18 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// jump and there will be no current frame after the jump. // jump and there will be no current frame after the jump.
virtual void Jump(); virtual void Jump();
virtual void Jump(Result* arg); virtual void Jump(Result* arg);
void Jump(Result* arg0, Result* arg1);
void Jump(Result* arg0, Result* arg1, Result* arg2);
// Emit a conditional branch to the target. There must be a current // Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the // frame at the branch. The current frame will fall through to the
// code after the branch. // code after the branch.
virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
void Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Hint hint = no_hint);
void Branch(Condition cc,
Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
Hint hint = no_hint);
// Bind a jump target. If there is no current frame at the binding // Bind a jump target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward // site, there must be at least one frame reaching via a forward
// jump. // jump.
// virtual void Bind();
// The number of mergable elements is a number of frame elements virtual void Bind(Result* arg);
// counting from the top down which must be "mergable" (not
// constants or copies) in the entry frame at the jump target.
// Backward jumps to the target must contain the same constants and
// sharing as the entry frame, except for the mergable elements.
//
// A mergable elements argument of kAllElements indicates that all
// frame elements must be mergable. Mergable elements are ignored
// completely for forward-only jump targets.
virtual void Bind(int mergable_elements = kAllElements);
virtual void Bind(Result* arg, int mergable_elements = kAllElements);
void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
void Bind(Result* arg0,
Result* arg1,
Result* arg2,
int mergable_elements = kAllElements);
void Bind(Result* arg0,
Result* arg1,
Result* arg2,
Result* arg3,
int mergable_elements = kAllElements);
// Emit a call to a jump target. There must be a current frame at // Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current // the call. The frame at the target is the same as the current
@ -160,8 +126,6 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// after the call is the same as the frame before the call. // after the call is the same as the frame before the call.
void Call(); void Call();
static const int kAllElements = -1; // Not a valid number of elements.
static void set_compiling_deferred_code(bool flag) { static void set_compiling_deferred_code(bool flag) {
compiling_deferred_code_ = flag; compiling_deferred_code_ = flag;
} }
@ -188,7 +152,7 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// return values using the virtual frame. // return values using the virtual frame.
void DoJump(); void DoJump();
void DoBranch(Condition cc, Hint hint); void DoBranch(Condition cc, Hint hint);
void DoBind(int mergable_elements); void DoBind();
private: private:
static bool compiling_deferred_code_; static bool compiling_deferred_code_;
@ -202,9 +166,8 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// target. // target.
inline void InitializeEntryElement(int index, FrameElement* target); inline void InitializeEntryElement(int index, FrameElement* target);
// Compute a frame to use for entry to this block. Mergable // Compute a frame to use for entry to this block.
// elements is as described for the Bind function. void ComputeEntryFrame();
void ComputeEntryFrame(int mergable_elements);
DISALLOW_COPY_AND_ASSIGN(JumpTarget); DISALLOW_COPY_AND_ASSIGN(JumpTarget);
}; };
@ -251,8 +214,8 @@ class BreakTarget : public JumpTarget {
// Bind a break target. If there is no current frame at the binding // Bind a break target. If there is no current frame at the binding
// site, there must be at least one frame reaching via a forward // site, there must be at least one frame reaching via a forward
// jump. // jump.
virtual void Bind(int mergable_elements = kAllElements); virtual void Bind();
virtual void Bind(Result* arg, int mergable_elements = kAllElements); virtual void Bind(Result* arg);
// Setter for expected height. // Setter for expected height.
void set_expected_height(int expected) { expected_height_ = expected; } void set_expected_height(int expected) { expected_height_ = expected; }

187
deps/v8/src/log-utils.cc

@ -123,7 +123,7 @@ bool Log::is_stopped_ = false;
Log::WritePtr Log::Write = NULL; Log::WritePtr Log::Write = NULL;
FILE* Log::output_handle_ = NULL; FILE* Log::output_handle_ = NULL;
LogDynamicBuffer* Log::output_buffer_ = NULL; LogDynamicBuffer* Log::output_buffer_ = NULL;
// Must be the same message as in Logger::PauseProfiler // Must be the same message as in Logger::PauseProfiler.
const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n"; const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
Mutex* Log::mutex_ = NULL; Mutex* Log::mutex_ = NULL;
char* Log::message_buffer_ = NULL; char* Log::message_buffer_ = NULL;
@ -173,6 +173,9 @@ void Log::Close() {
} }
Write = NULL; Write = NULL;
DeleteArray(message_buffer_);
message_buffer_ = NULL;
delete mutex_; delete mutex_;
mutex_ = NULL; mutex_ = NULL;
@ -212,13 +215,13 @@ void LogMessageBuilder::Append(const char* format, ...) {
Log::kMessageBufferSize - pos_); Log::kMessageBufferSize - pos_);
va_list args; va_list args;
va_start(args, format); va_start(args, format);
Append(format, args); AppendVA(format, args);
va_end(args); va_end(args);
ASSERT(pos_ <= Log::kMessageBufferSize); ASSERT(pos_ <= Log::kMessageBufferSize);
} }
void LogMessageBuilder::Append(const char* format, va_list args) { void LogMessageBuilder::AppendVA(const char* format, va_list args) {
Vector<char> buf(Log::message_buffer_ + pos_, Vector<char> buf(Log::message_buffer_ + pos_,
Log::kMessageBufferSize - pos_); Log::kMessageBufferSize - pos_);
int result = v8::internal::OS::VSNPrintF(buf, format, args); int result = v8::internal::OS::VSNPrintF(buf, format, args);
@ -250,6 +253,27 @@ void LogMessageBuilder::Append(String* str) {
} }
void LogMessageBuilder::AppendAddress(Address addr) {
static Address last_address_ = NULL;
AppendAddress(addr, last_address_);
last_address_ = addr;
}
void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
if (!FLAG_compress_log || bias == NULL) {
Append("0x%" V8PRIxPTR, addr);
} else {
intptr_t delta = addr - bias;
// To avoid printing negative offsets in an unsigned form,
// we are printing an absolute value with a sign.
const char sign = delta >= 0 ? '+' : '-';
if (sign == '-') { delta = -delta; }
Append("%c%" V8PRIxPTR, sign, delta);
}
}
void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) { void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
AssertNoAllocation no_heap_allocation; // Ensure string stay valid. AssertNoAllocation no_heap_allocation; // Ensure string stay valid.
int len = str->length(); int len = str->length();
@ -280,6 +304,24 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
} }
bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
}
bool LogMessageBuilder::RetrieveCompressedPrevious(
LogRecordCompressor* compressor, const char* prefix) {
pos_ = 0;
if (prefix[0] != '\0') Append(prefix);
Vector<char> prev_record(Log::message_buffer_ + pos_,
Log::kMessageBufferSize - pos_);
const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
if (!has_prev) return false;
pos_ += prev_record.length();
return true;
}
void LogMessageBuilder::WriteToLogFile() { void LogMessageBuilder::WriteToLogFile() {
ASSERT(pos_ <= Log::kMessageBufferSize); ASSERT(pos_ <= Log::kMessageBufferSize);
const int written = Log::Write(Log::message_buffer_, pos_); const int written = Log::Write(Log::message_buffer_, pos_);
@ -297,6 +339,145 @@ void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
} }
} }
// Formatting string for back references to the whole line. E.g. "#2" means
// "the second line above".
const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
// Formatting string for back references. E.g. "#2:10" means
// "the second line above, start from char 10 (0-based)".
const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
LogRecordCompressor::~LogRecordCompressor() {
for (int i = 0; i < buffer_.length(); ++i) {
buffer_[i].Dispose();
}
}
static int GetNumberLength(int number) {
ASSERT(number >= 0);
ASSERT(number < 10000);
if (number < 10) return 1;
if (number < 100) return 2;
if (number < 1000) return 3;
return 4;
}
int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
// See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
return pos == 0 ? GetNumberLength(distance) + 1
: GetNumberLength(distance) + GetNumberLength(pos) + 2;
}
void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
int distance,
int pos) {
if (pos == 0) {
OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
} else {
OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
}
}
bool LogRecordCompressor::Store(const Vector<const char>& record) {
// Check if the record is the same as the last stored one.
if (curr_ != -1) {
Vector<const char>& curr = buffer_[curr_];
if (record.length() == curr.length()
&& strncmp(record.start(), curr.start(), record.length()) == 0) {
return false;
}
}
// buffer_ is circular.
prev_ = curr_++;
curr_ %= buffer_.length();
Vector<char> record_copy = Vector<char>::New(record.length());
memcpy(record_copy.start(), record.start(), record.length());
buffer_[curr_].Dispose();
buffer_[curr_] =
Vector<const char>(record_copy.start(), record_copy.length());
return true;
}
bool LogRecordCompressor::RetrievePreviousCompressed(
Vector<char>* prev_record) {
if (prev_ == -1) return false;
int index = prev_;
// Distance from prev_.
int distance = 0;
// Best compression result among records in the buffer.
struct {
intptr_t truncated_len;
int distance;
int copy_from_pos;
int backref_size;
} best = {-1, 0, 0, 0};
Vector<const char>& prev = buffer_[prev_];
const char* const prev_start = prev.start();
const char* const prev_end = prev.start() + prev.length();
do {
// We're moving backwards until we reach the current record.
// Remember that buffer_ is circular.
if (--index == -1) index = buffer_.length() - 1;
++distance;
if (index == curr_) break;
Vector<const char>& data = buffer_[index];
if (data.start() == NULL) break;
const char* const data_end = data.start() + data.length();
const char* prev_ptr = prev_end;
const char* data_ptr = data_end;
// Compare strings backwards, stop on the last matching character.
while (prev_ptr != prev_start && data_ptr != data.start()
&& *(prev_ptr - 1) == *(data_ptr - 1)) {
--prev_ptr;
--data_ptr;
}
const intptr_t truncated_len = prev_end - prev_ptr;
const int copy_from_pos = data_ptr - data.start();
// Check if the length of compressed tail is enough.
if (truncated_len <= kMaxBackwardReferenceSize
&& truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
continue;
}
// Record compression results.
if (truncated_len > best.truncated_len) {
best.truncated_len = truncated_len;
best.distance = distance;
best.copy_from_pos = copy_from_pos;
best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
}
} while (true);
if (best.distance == 0) {
// Can't compress the previous record. Return as is.
ASSERT(prev_record->length() >= prev.length());
memcpy(prev_record->start(), prev.start(), prev.length());
prev_record->Truncate(prev.length());
} else {
// Copy the uncompressible part unchanged.
const intptr_t unchanged_len = prev.length() - best.truncated_len;
// + 1 for '\0'.
ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
memcpy(prev_record->start(), prev.start(), unchanged_len);
// Append the backward reference.
Vector<char> backref(
prev_record->start() + unchanged_len, best.backref_size + 1);
PrintBackwardReference(backref, best.distance, best.copy_from_pos);
ASSERT(strlen(backref.start()) - best.backref_size == 0);
prev_record->Truncate(unchanged_len + best.backref_size);
}
return true;
}
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal } } // namespace v8::internal

67
deps/v8/src/log-utils.h

@ -170,6 +170,50 @@ class Log : public AllStatic {
static char* message_buffer_; static char* message_buffer_;
friend class LogMessageBuilder; friend class LogMessageBuilder;
friend class LogRecordCompressor;
};
// An utility class for performing backward reference compression
// of string ends. It operates using a window of previous strings.
class LogRecordCompressor {
public:
// 'window_size' is the size of backward lookup window.
explicit LogRecordCompressor(int window_size)
: buffer_(window_size + kNoCompressionWindowSize),
kMaxBackwardReferenceSize(
GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
curr_(-1), prev_(-1) {
}
~LogRecordCompressor();
// Fills vector with a compressed version of the previous record.
// Returns false if there is no previous record.
bool RetrievePreviousCompressed(Vector<char>* prev_record);
// Stores a record if it differs from a previous one (or there's no previous).
// Returns true, if the record has been stored.
bool Store(const Vector<const char>& record);
private:
// The minimum size of a buffer: a place needed for the current and
// the previous record. Since there is no place for precedessors of a previous
// record, it can't be compressed at all.
static const int kNoCompressionWindowSize = 2;
// Formatting strings for back references.
static const char* kLineBackwardReferenceFormat;
static const char* kBackwardReferenceFormat;
static int GetBackwardReferenceSize(int distance, int pos);
static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
ScopedVector< Vector<const char> > buffer_;
const int kMaxBackwardReferenceSize;
int curr_;
int prev_;
}; };
@ -186,7 +230,7 @@ class LogMessageBuilder BASE_EMBEDDED {
void Append(const char* format, ...); void Append(const char* format, ...);
// Append string data to the log message. // Append string data to the log message.
void Append(const char* format, va_list args); void AppendVA(const char* format, va_list args);
// Append a character to the log message. // Append a character to the log message.
void Append(const char c); void Append(const char c);
@ -194,8 +238,29 @@ class LogMessageBuilder BASE_EMBEDDED {
// Append a heap string. // Append a heap string.
void Append(String* str); void Append(String* str);
// Appends an address, compressing it if needed by offsetting
// from Logger::last_address_.
void AppendAddress(Address addr);
// Appends an address, compressing it if needed.
void AppendAddress(Address addr, Address bias);
void AppendDetailed(String* str, bool show_impl_info); void AppendDetailed(String* str, bool show_impl_info);
// Stores log message into compressor, returns true if the message
// was stored (i.e. doesn't repeat the previous one).
bool StoreInCompressor(LogRecordCompressor* compressor);
// Sets log message to a previous version of compressed message.
// Returns false, if there is no previous message.
bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
return RetrieveCompressedPrevious(compressor, "");
}
// Does the same at the version without arguments, and sets a prefix.
bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
const char* prefix);
// Write the log message to the log file currently opened. // Write the log message to the log file currently opened.
void WriteToLogFile(); void WriteToLogFile();

169
deps/v8/src/log.cc

@ -262,6 +262,7 @@ void Profiler::Engage() {
Logger::ticker_->SetProfiler(this); Logger::ticker_->SetProfiler(this);
Logger::ProfilerBeginEvent(); Logger::ProfilerBeginEvent();
Logger::LogAliases();
} }
@ -301,6 +302,20 @@ Profiler* Logger::profiler_ = NULL;
VMState* Logger::current_state_ = NULL; VMState* Logger::current_state_ = NULL;
VMState Logger::bottom_state_(EXTERNAL); VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL; SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
};
#undef DECLARE_LONG_EVENT
#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
};
#undef DECLARE_SHORT_EVENT
bool Logger::IsEnabled() { bool Logger::IsEnabled() {
@ -312,6 +327,20 @@ void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs); msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
if (FLAG_compress_log) {
msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
}
msg.WriteToLogFile();
}
void Logger::LogAliases() {
if (!Log::IsEnabled() || !FLAG_compress_log) return;
LogMessageBuilder msg;
for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
msg.Append("alias,%s,%s\n",
kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
}
msg.WriteToLogFile(); msg.WriteToLogFile();
} }
@ -373,7 +402,7 @@ void Logger::ApiEvent(const char* format, ...) {
LogMessageBuilder msg; LogMessageBuilder msg;
va_list ap; va_list ap;
va_start(ap, format); va_start(ap, format);
msg.Append(format, ap); msg.AppendVA(format, ap);
va_end(ap); va_end(ap);
msg.WriteToLogFile(); msg.WriteToLogFile();
} }
@ -594,12 +623,15 @@ void Logger::DeleteEvent(const char* name, void* object) {
} }
void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) { void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", tag, code->address(), msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
code->ExecutableSize()); msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) { for (const char* p = comment; *p != '\0'; p++) {
if (*p == '"') { if (*p == '"') {
msg.Append('\\'); msg.Append('\\');
@ -613,20 +645,22 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
} }
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) { void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
SmartPointer<char> str = SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s\"\n", msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
tag, code->address(), code->ExecutableSize(), *str); msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\"\n", code->ExecutableSize(), *str);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
void Logger::CodeCreateEvent(const char* tag, Code* code, String* name, void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code, String* name,
String* source, int line) { String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
@ -635,23 +669,22 @@ void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr = SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s %s:%d\"\n", msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
tag, code->address(), msg.AppendAddress(code->address());
code->ExecutableSize(), msg.Append(",%d,\"%s %s:%d\"\n",
*str, *sourcestr, line); code->ExecutableSize(), *str, *sourcestr, line);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) { void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"args_count: %d\"\n", tag, msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
code->address(), msg.AppendAddress(code->address());
code->ExecutableSize(), msg.Append(",%d,\"args_count: %d\"\n", code->ExecutableSize(), args_count);
args_count);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -661,9 +694,10 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", "RegExp", msg.Append("%s,%s,",
code->address(), log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
code->ExecutableSize()); msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false); msg.AppendDetailed(source, false);
msg.Append("\"\n"); msg.Append("\"\n");
msg.WriteToLogFile(); msg.WriteToLogFile();
@ -671,23 +705,57 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
} }
void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; // A class that contains all common code dealing with record compression.
msg.Append("code-allocate,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", class CompressionHelper {
code->address(), public:
assem); explicit CompressionHelper(int window_size)
msg.WriteToLogFile(); : compressor_(window_size), repeat_count_(0) { }
#endif
// Handles storing message in compressor, retrieving the previous one and
// prefixing it with repeat count, if needed.
// Returns true if message needs to be written to log.
bool HandleMessage(LogMessageBuilder* msg) {
if (!msg->StoreInCompressor(&compressor_)) {
// Current message repeats the previous one, don't write it.
++repeat_count_;
return false;
}
if (repeat_count_ == 0) {
return msg->RetrieveCompressedPrevious(&compressor_);
}
OS::SNPrintF(prefix_, "%s,%d,",
Logger::log_events_[Logger::REPEAT_META_EVENT],
repeat_count_ + 1);
repeat_count_ = 0;
return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
} }
private:
LogRecordCompressor compressor_;
int repeat_count_;
EmbeddedVector<char, 20> prefix_;
};
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::CodeMoveEvent(Address from, Address to) { void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static Address prev_to_ = NULL;
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("code-move,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", from, to); msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
msg.AppendAddress(from);
msg.Append(',');
msg.AppendAddress(to, prev_to_);
prev_to_ = to;
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -697,7 +765,13 @@ void Logger::CodeDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return; if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("code-delete,0x%" V8PRIxPTR "\n", from); msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
msg.AppendAddress(from);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -802,14 +876,26 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
void Logger::TickEvent(TickSample* sample, bool overflow) { void Logger::TickEvent(TickSample* sample, bool overflow) {
if (!Log::IsEnabled() || !FLAG_prof) return; if (!Log::IsEnabled() || !FLAG_prof) return;
static Address prev_sp = NULL;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("tick,0x%" V8PRIxPTR ",0x%" V8PRIxPTR ",%d", msg.Append("%s,", log_events_[TICK_EVENT]);
sample->pc, sample->sp, static_cast<int>(sample->state)); Address prev_addr = reinterpret_cast<Address>(sample->pc);
msg.AppendAddress(prev_addr);
msg.Append(',');
msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
prev_sp = reinterpret_cast<Address>(sample->sp);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) { if (overflow) {
msg.Append(",overflow"); msg.Append(",overflow");
} }
for (int i = 0; i < sample->frames_count; ++i) { for (int i = 0; i < sample->frames_count; ++i) {
msg.Append(",0x%" V8PRIxPTR, sample->stack[i]); msg.Append(',');
msg.AppendAddress(sample->stack[i], prev_addr);
prev_addr = sample->stack[i];
}
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
} }
msg.Append('\n'); msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
@ -913,17 +999,19 @@ void Logger::LogCompiledFunctions() {
int line_num = GetScriptLineNumber(script, shared->start_position()); int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) { if (line_num > 0) {
line_num += script->line_offset()->value() + 1; line_num += script->line_offset()->value() + 1;
LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name, LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
shared->code(), *func_name,
*script_name, line_num)); *script_name, line_num));
} else { } else {
// Can't distinguish enum and script here, so always use Script. // Can't distinguish enum and script here, so always use Script.
LOG(CodeCreateEvent("Script", shared->code(), *script_name)); LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
shared->code(), *script_name));
} }
continue; continue;
} }
} }
// If no script or script has no name. // If no script or script has no name.
LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name)); LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
} }
DeleteArray(sfis); DeleteArray(sfis);
@ -1013,6 +1101,12 @@ bool Logger::Setup() {
sliding_state_window_ = new SlidingStateWindow(); sliding_state_window_ = new SlidingStateWindow();
} }
log_events_ = FLAG_compress_log ?
kCompressedLogEventsNames : kLongLogEventsNames;
if (FLAG_compress_log) {
compression_helper_ = new CompressionHelper(kCompressionWindowSize);
}
if (FLAG_prof) { if (FLAG_prof) {
profiler_ = new Profiler(); profiler_ = new Profiler();
if (!FLAG_prof_auto) if (!FLAG_prof_auto)
@ -1041,6 +1135,9 @@ void Logger::TearDown() {
profiler_ = NULL; profiler_ = NULL;
} }
delete compression_helper_;
compression_helper_ = NULL;
delete sliding_state_window_; delete sliding_state_window_;
sliding_state_window_ = NULL; sliding_state_window_ = NULL;

57
deps/v8/src/log.h

@ -71,6 +71,7 @@ class Profiler;
class Semaphore; class Semaphore;
class SlidingStateWindow; class SlidingStateWindow;
class LogMessageBuilder; class LogMessageBuilder;
class CompressionHelper;
#undef LOG #undef LOG
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
@ -102,8 +103,41 @@ class VMState BASE_EMBEDDED {
}; };
#define LOG_EVENTS_AND_TAGS_LIST(V) \
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
V(TICK_EVENT, "tick", "t") \
V(REPEAT_META_EVENT, "repeat", "r") \
V(BUILTIN_TAG, "Builtin", "bi") \
V(CALL_DEBUG_BREAK_TAG, "CallDebugBreak", "cdb") \
V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi") \
V(CALL_IC_TAG, "CallIC", "cic") \
V(CALL_INITIALIZE_TAG, "CallInitialize", "ci") \
V(CALL_MEGAMORPHIC_TAG, "CallMegamorphic", "cmm") \
V(CALL_MISS_TAG, "CallMiss", "cm") \
V(CALL_NORMAL_TAG, "CallNormal", "cn") \
V(CALL_PRE_MONOMORPHIC_TAG, "CallPreMonomorphic", "cpm") \
V(EVAL_TAG, "Eval", "e") \
V(FUNCTION_TAG, "Function", "f") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC", "klic") \
V(KEYED_STORE_IC_TAG, "KeyedStoreIC", "ksic") \
V(LAZY_COMPILE_TAG, "LazyCompile", "lc") \
V(LOAD_IC_TAG, "LoadIC", "lic") \
V(REG_EXP_TAG, "RegExp", "re") \
V(SCRIPT_TAG, "Script", "sc") \
V(STORE_IC_TAG, "StoreIC", "sic") \
V(STUB_TAG, "Stub", "s")
class Logger { class Logger {
public: public:
#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
enum LogEventsAndTags {
LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
NUMBER_OF_LOG_EVENTS
};
#undef DECLARE_ENUM
// Acquires resources for logging if the right flags are set. // Acquires resources for logging if the right flags are set.
static bool Setup(); static bool Setup();
@ -163,14 +197,14 @@ class Logger {
// ==== Events logged by --log-code. ==== // ==== Events logged by --log-code. ====
// Emits a code create event. // Emits a code create event.
static void CodeCreateEvent(const char* tag, Code* code, const char* source); static void CodeCreateEvent(LogEventsAndTags tag,
static void CodeCreateEvent(const char* tag, Code* code, String* name); Code* code, const char* source);
static void CodeCreateEvent(const char* tag, Code* code, String* name, static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
String* source, int line); String* source, int line);
static void CodeCreateEvent(const char* tag, Code* code, int args_count); static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
// Emits a code create event for a RegExp. // Emits a code create event for a RegExp.
static void RegExpCodeCreateEvent(Code* code, String* source); static void RegExpCodeCreateEvent(Code* code, String* source);
static void CodeAllocateEvent(Code* code, Assembler* assem);
// Emits a code move event. // Emits a code move event.
static void CodeMoveEvent(Address from, Address to); static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event. // Emits a code delete event.
@ -223,9 +257,15 @@ class Logger {
// Profiler's sampling interval (in milliseconds). // Profiler's sampling interval (in milliseconds).
static const int kSamplingIntervalMs = 1; static const int kSamplingIntervalMs = 1;
// Size of window used for log records compression.
static const int kCompressionWindowSize = 4;
// Emits the profiler's first message. // Emits the profiler's first message.
static void ProfilerBeginEvent(); static void ProfilerBeginEvent();
// Emits aliases for compressed messages.
static void LogAliases();
// Emits the source code of a regexp. Used by regexp events. // Emits the source code of a regexp. Used by regexp events.
static void LogRegExpSource(Handle<JSRegExp> regexp); static void LogRegExpSource(Handle<JSRegExp> regexp);
@ -261,8 +301,15 @@ class Logger {
// recent VM states. // recent VM states.
static SlidingStateWindow* sliding_state_window_; static SlidingStateWindow* sliding_state_window_;
// An array of log events names.
static const char** log_events_;
// An instance of helper created if log compression is enabled.
static CompressionHelper* compression_helper_;
// Internal implementation classes with access to // Internal implementation classes with access to
// private members. // private members.
friend class CompressionHelper;
friend class EventLog; friend class EventLog;
friend class TimeLog; friend class TimeLog;
friend class Profiler; friend class Profiler;

1
deps/v8/src/macros.py

@ -60,6 +60,7 @@ const msPerSecond = 1000;
const msPerMinute = 60000; const msPerMinute = 60000;
const msPerHour = 3600000; const msPerHour = 3600000;
const msPerDay = 86400000; const msPerDay = 86400000;
const msPerMonth = 2592000000;
# For apinatives.js # For apinatives.js
const kUninitialized = -1; const kUninitialized = -1;

92
deps/v8/src/math.js

@ -44,39 +44,73 @@ $Math.__proto__ = global.Object.prototype;
// ECMA 262 - 15.8.2.1 // ECMA 262 - 15.8.2.1
function MathAbs(x) { function MathAbs(x) {
if (%_IsSmi(x)) { if (%_IsSmi(x)) return x >= 0 ? x : -x;
return x >= 0 ? x : -x; if (!IS_NUMBER(x)) x = ToNumber(x);
} else { return %Math_abs(x);
return %Math_abs(ToNumber(x));
}
} }
// ECMA 262 - 15.8.2.2 // ECMA 262 - 15.8.2.2
function MathAcos(x) { return %Math_acos(ToNumber(x)); } function MathAcos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_acos(x);
}
// ECMA 262 - 15.8.2.3 // ECMA 262 - 15.8.2.3
function MathAsin(x) { return %Math_asin(ToNumber(x)); } function MathAsin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_asin(x);
}
// ECMA 262 - 15.8.2.4 // ECMA 262 - 15.8.2.4
function MathAtan(x) { return %Math_atan(ToNumber(x)); } function MathAtan(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_atan(x);
}
// ECMA 262 - 15.8.2.5 // ECMA 262 - 15.8.2.5
function MathAtan2(x, y) { return %Math_atan2(ToNumber(x), ToNumber(y)); } function MathAtan2(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
return %Math_atan2(x, y);
}
// ECMA 262 - 15.8.2.6 // ECMA 262 - 15.8.2.6
function MathCeil(x) { return %Math_ceil(ToNumber(x)); } function MathCeil(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_ceil(x);
}
// ECMA 262 - 15.8.2.7 // ECMA 262 - 15.8.2.7
function MathCos(x) { return %Math_cos(ToNumber(x)); } function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_cos(x);
}
// ECMA 262 - 15.8.2.8 // ECMA 262 - 15.8.2.8
function MathExp(x) { return %Math_exp(ToNumber(x)); } function MathExp(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_exp(x);
}
// ECMA 262 - 15.8.2.9 // ECMA 262 - 15.8.2.9
function MathFloor(x) { return %Math_floor(ToNumber(x)); } function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (0 < x && x <= 0x7FFFFFFF) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
// has to be -0, which wouldn't be the case with the shift.
return x << 0;
} else {
return %Math_floor(x);
}
}
// ECMA 262 - 15.8.2.10 // ECMA 262 - 15.8.2.10
function MathLog(x) { return %Math_log(ToNumber(x)); } function MathLog(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_log(x);
}
// ECMA 262 - 15.8.2.11 // ECMA 262 - 15.8.2.11
function MathMax(arg1, arg2) { // length == 2 function MathMax(arg1, arg2) { // length == 2
@ -103,22 +137,40 @@ function MathMin(arg1, arg2) { // length == 2
} }
// ECMA 262 - 15.8.2.13 // ECMA 262 - 15.8.2.13
function MathPow(x, y) { return %Math_pow(ToNumber(x), ToNumber(y)); } function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
return %Math_pow(x, y);
}
// ECMA 262 - 15.8.2.14 // ECMA 262 - 15.8.2.14
function MathRandom() { return %Math_random(); } function MathRandom() {
return %_RandomPositiveSmi() / 0x40000000;
}
// ECMA 262 - 15.8.2.15 // ECMA 262 - 15.8.2.15
function MathRound(x) { return %Math_round(ToNumber(x)); } function MathRound(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_round(x);
}
// ECMA 262 - 15.8.2.16 // ECMA 262 - 15.8.2.16
function MathSin(x) { return %Math_sin(ToNumber(x)); } function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_sin(x);
}
// ECMA 262 - 15.8.2.17 // ECMA 262 - 15.8.2.17
function MathSqrt(x) { return %Math_sqrt(ToNumber(x)); } function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_sqrt(x);
}
// ECMA 262 - 15.8.2.18 // ECMA 262 - 15.8.2.18
function MathTan(x) { return %Math_tan(ToNumber(x)); } function MathTan(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %Math_tan(x);
}
// ------------------------------------------------------------------- // -------------------------------------------------------------------

63
deps/v8/src/messages.js

@ -230,6 +230,40 @@ function MakeError(type, args) {
return MakeGenericError($Error, type, args); return MakeGenericError($Error, type, args);
} }
/**
* Find a line number given a specific source position.
* @param {number} position The source position.
* @return {number} 0 if input too small, -1 if input too large,
else the line number.
*/
Script.prototype.lineFromPosition = function(position) {
var lower = 0;
var upper = this.lineCount() - 1;
// We'll never find invalid positions so bail right away.
if (position > this.line_ends[upper]) {
return -1;
}
// This means we don't have to safe-guard indexing line_ends[i - 1].
if (position <= this.line_ends[0]) {
return 0;
}
// Binary search to find line # from position range.
while (upper >= 1) {
var i = (lower + upper) >> 1;
if (position > this.line_ends[i]) {
lower = i + 1;
} else if (position <= this.line_ends[i - 1]) {
upper = i - 1;
} else {
return i;
}
}
return -1;
}
/** /**
* Get information on a specific source position. * Get information on a specific source position.
@ -241,19 +275,7 @@ function MakeError(type, args) {
*/ */
Script.prototype.locationFromPosition = function (position, Script.prototype.locationFromPosition = function (position,
include_resource_offset) { include_resource_offset) {
var lineCount = this.lineCount(); var line = this.lineFromPosition(position);
var line = -1;
if (position <= this.line_ends[0]) {
line = 0;
} else {
for (var i = 1; i < lineCount; i++) {
if (this.line_ends[i - 1] < position && position <= this.line_ends[i]) {
line = i;
break;
}
}
}
if (line == -1) return null; if (line == -1) return null;
// Determine start, end and column. // Determine start, end and column.
@ -308,16 +330,13 @@ Script.prototype.locationFromLine = function (opt_line, opt_column, opt_offset_p
if (line == 0) { if (line == 0) {
return this.locationFromPosition(offset_position + column, false); return this.locationFromPosition(offset_position + column, false);
} else { } else {
// Find the line where the offset position is located // Find the line where the offset position is located.
var lineCount = this.lineCount(); var offset_line = this.lineFromPosition(offset_position);
var offset_line;
for (var i = 0; i < lineCount; i++) { if (offset_line == -1 || offset_line + line >= this.lineCount()) {
if (offset_position <= this.line_ends[i]) { return null;
offset_line = i;
break;
}
} }
if (offset_line + line >= lineCount) return null;
return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here. return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column); // line > 0 here.
} }
} }

154
deps/v8/src/mirror-delay.js

@ -34,9 +34,14 @@ RegExp;
Date; Date;
// Handle id counters.
var next_handle_ = 0; var next_handle_ = 0;
var next_transient_handle_ = -1;
// Mirror cache.
var mirror_cache_ = []; var mirror_cache_ = [];
/** /**
* Clear the mirror handle cache. * Clear the mirror handle cache.
*/ */
@ -50,10 +55,15 @@ function ClearMirrorCache() {
* Returns the mirror for a specified value or object. * Returns the mirror for a specified value or object.
* *
* @param {value or Object} value the value or object to retreive the mirror for * @param {value or Object} value the value or object to retreive the mirror for
* @param {boolean} transient indicate whether this object is transient and
* should not be added to the mirror cache. The default is not transient.
* @returns {Mirror} the mirror reflects the passed value or object * @returns {Mirror} the mirror reflects the passed value or object
*/ */
function MakeMirror(value) { function MakeMirror(value, opt_transient) {
var mirror; var mirror;
// Look for non transient mirrors in the mirror cache.
if (!opt_transient) {
for (id in mirror_cache_) { for (id in mirror_cache_) {
mirror = mirror_cache_[id]; mirror = mirror_cache_[id];
if (mirror.value() === value) { if (mirror.value() === value) {
@ -65,6 +75,7 @@ function MakeMirror(value) {
return mirror; return mirror;
} }
} }
}
if (IS_UNDEFINED(value)) { if (IS_UNDEFINED(value)) {
mirror = new UndefinedMirror(); mirror = new UndefinedMirror();
@ -89,7 +100,7 @@ function MakeMirror(value) {
} else if (IS_SCRIPT(value)) { } else if (IS_SCRIPT(value)) {
mirror = new ScriptMirror(value); mirror = new ScriptMirror(value);
} else { } else {
mirror = new ObjectMirror(value); mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
} }
mirror_cache_[mirror.handle()] = mirror; mirror_cache_[mirror.handle()] = mirror;
@ -155,6 +166,7 @@ const PROPERTY_TYPE = 'property';
const FRAME_TYPE = 'frame'; const FRAME_TYPE = 'frame';
const SCRIPT_TYPE = 'script'; const SCRIPT_TYPE = 'script';
const CONTEXT_TYPE = 'context'; const CONTEXT_TYPE = 'context';
const SCOPE_TYPE = 'scope';
// Maximum length when sending strings through the JSON protocol. // Maximum length when sending strings through the JSON protocol.
const kMaxProtocolStringLength = 80; const kMaxProtocolStringLength = 80;
@ -185,6 +197,13 @@ PropertyAttribute.DontEnum = DONT_ENUM;
PropertyAttribute.DontDelete = DONT_DELETE; PropertyAttribute.DontDelete = DONT_DELETE;
// A copy of the scope types from runtime.cc.
ScopeType = { Global: 0,
Local: 1,
With: 2,
Closure: 3 };
// Mirror hierarchy: // Mirror hierarchy:
// - Mirror // - Mirror
// - ValueMirror // - ValueMirror
@ -372,6 +391,15 @@ Mirror.prototype.isContext = function() {
} }
/**
* Check whether the mirror reflects a scope.
* @returns {boolean} True if the mirror reflects a scope
*/
Mirror.prototype.isScope = function() {
return this instanceof ScopeMirror;
}
/** /**
* Allocate a handle id for this object. * Allocate a handle id for this object.
*/ */
@ -380,6 +408,15 @@ Mirror.prototype.allocateHandle_ = function() {
} }
/**
* Allocate a transient handle id for this object. Transient handles are
* negative.
*/
Mirror.prototype.allocateTransientHandle_ = function() {
this.handle_ = next_transient_handle_--;
}
Mirror.prototype.toText = function() { Mirror.prototype.toText = function() {
// Simpel to text which is used when on specialization in subclass. // Simpel to text which is used when on specialization in subclass.
return "#<" + builtins.GetInstanceName(this.constructor.name) + ">"; return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
@ -390,13 +427,19 @@ Mirror.prototype.toText = function() {
* Base class for all value mirror objects. * Base class for all value mirror objects.
* @param {string} type The type of the mirror * @param {string} type The type of the mirror
* @param {value} value The value reflected by this mirror * @param {value} value The value reflected by this mirror
* @param {boolean} transient indicate whether this object is transient with a
* transient handle
* @constructor * @constructor
* @extends Mirror * @extends Mirror
*/ */
function ValueMirror(type, value) { function ValueMirror(type, value, transient) {
Mirror.call(this, type); Mirror.call(this, type);
this.value_ = value; this.value_ = value;
if (!transient) {
this.allocateHandle_(); this.allocateHandle_();
} else {
this.allocateTransientHandle_();
}
} }
inherits(ValueMirror, Mirror); inherits(ValueMirror, Mirror);
@ -525,11 +568,13 @@ StringMirror.prototype.toText = function() {
/** /**
* Mirror object for objects. * Mirror object for objects.
* @param {object} value The object reflected by this mirror * @param {object} value The object reflected by this mirror
* @param {boolean} transient indicate whether this object is transient with a
* transient handle
* @constructor * @constructor
* @extends ValueMirror * @extends ValueMirror
*/ */
function ObjectMirror(value, type) { function ObjectMirror(value, type, transient) {
ValueMirror.call(this, type || OBJECT_TYPE, value); ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
} }
inherits(ObjectMirror, ValueMirror); inherits(ObjectMirror, ValueMirror);
@ -1080,7 +1125,7 @@ PropertyMirror.prototype.isIndexed = function() {
PropertyMirror.prototype.value = function() { PropertyMirror.prototype.value = function() {
return MakeMirror(this.value_); return MakeMirror(this.value_, false);
} }
@ -1135,7 +1180,7 @@ PropertyMirror.prototype.getter = function() {
if (this.hasGetter()) { if (this.hasGetter()) {
return MakeMirror(this.getter_); return MakeMirror(this.getter_);
} else { } else {
return new UndefinedMirror(); return GetUndefinedMirror();
} }
} }
@ -1149,7 +1194,7 @@ PropertyMirror.prototype.setter = function() {
if (this.hasSetter()) { if (this.hasSetter()) {
return MakeMirror(this.setter_); return MakeMirror(this.setter_);
} else { } else {
return new UndefinedMirror(); return GetUndefinedMirror();
} }
} }
@ -1294,6 +1339,11 @@ FrameDetails.prototype.localValue = function(index) {
} }
FrameDetails.prototype.scopeCount = function() {
return %GetScopeCount(this.break_id_, this.frameId());
}
/** /**
* Mirror object for stack frames. * Mirror object for stack frames.
* @param {number} break_id The break id in the VM for which this frame is * @param {number} break_id The break id in the VM for which this frame is
@ -1419,6 +1469,16 @@ FrameMirror.prototype.sourceLineText = function() {
}; };
FrameMirror.prototype.scopeCount = function() {
return this.details_.scopeCount();
};
FrameMirror.prototype.scope = function(index) {
return new ScopeMirror(this, index);
};
FrameMirror.prototype.evaluate = function(source, disable_break) { FrameMirror.prototype.evaluate = function(source, disable_break) {
var result = %DebugEvaluate(this.break_id_, this.details_.frameId(), var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
source, Boolean(disable_break)); source, Boolean(disable_break));
@ -1562,6 +1622,70 @@ FrameMirror.prototype.toText = function(opt_locals) {
} }
const kScopeDetailsTypeIndex = 0;
const kScopeDetailsObjectIndex = 1;
function ScopeDetails(frame, index) {
this.break_id_ = frame.break_id_;
this.details_ = %GetScopeDetails(frame.break_id_,
frame.details_.frameId(),
index);
}
ScopeDetails.prototype.type = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kScopeDetailsTypeIndex];
}
ScopeDetails.prototype.object = function() {
%CheckExecutionState(this.break_id_);
return this.details_[kScopeDetailsObjectIndex];
}
/**
* Mirror object for scope.
* @param {FrameMirror} frame The frame this scope is a part of
* @param {number} index The scope index in the frame
* @constructor
* @extends Mirror
*/
function ScopeMirror(frame, index) {
Mirror.call(this, SCOPE_TYPE);
this.frame_index_ = frame.index_;
this.scope_index_ = index;
this.details_ = new ScopeDetails(frame, index);
}
inherits(ScopeMirror, Mirror);
ScopeMirror.prototype.frameIndex = function() {
return this.frame_index_;
};
ScopeMirror.prototype.scopeIndex = function() {
return this.scope_index_;
};
ScopeMirror.prototype.scopeType = function() {
return this.details_.type();
};
ScopeMirror.prototype.scopeObject = function() {
// For local and closure scopes create a transient mirror as these objects are
// created on the fly materializing the local or closure scopes and
// therefore will not preserve identity.
var transient = this.scopeType() == ScopeType.Local ||
this.scopeType() == ScopeType.Closure;
return MakeMirror(this.details_.object(), transient);
};
/** /**
* Mirror object for script source. * Mirror object for script source.
* @param {Script} script The script object * @param {Script} script The script object
@ -1829,6 +1953,7 @@ JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
return o; return o;
}; };
JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference, JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
details) { details) {
// If serializing a reference to a mirror just return the reference and add // If serializing a reference to a mirror just return the reference and add
@ -1900,6 +2025,11 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
this.serializeFrame_(mirror, content); this.serializeFrame_(mirror, content);
break; break;
case SCOPE_TYPE:
// Add object representation.
this.serializeScope_(mirror, content);
break;
case SCRIPT_TYPE: case SCRIPT_TYPE:
// Script is represented by id, name and source attributes. // Script is represented by id, name and source attributes.
if (mirror.name()) { if (mirror.name()) {
@ -2102,6 +2232,14 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
} }
JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
content.index = mirror.scopeIndex();
content.frameIndex = mirror.frameIndex();
content.type = mirror.scopeType();
content.object = this.serializeReference(mirror.scopeObject());
}
/** /**
* Convert a number to a protocol value. For all finite numbers the number * Convert a number to a protocol value. For all finite numbers the number
* itself is returned. For non finite numbers NaN, Infinite and * itself is returned. For non finite numbers NaN, Infinite and

58
deps/v8/src/objects.cc

@ -5203,27 +5203,6 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
} }
Object* JSObject::SetElementPostInterceptor(uint32_t index, Object* value) {
if (HasFastElements()) return SetFastElement(index, value);
// Dictionary case.
ASSERT(!HasFastElements());
FixedArray* elms = FixedArray::cast(elements());
Object* result = Dictionary::cast(elms)->AtNumberPut(index, value);
if (result->IsFailure()) return result;
if (elms != FixedArray::cast(result)) {
set_elements(FixedArray::cast(result));
}
if (IsJSArray()) {
return JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
}
return value;
}
Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) { Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
// Make sure that the top context does not change when doing // Make sure that the top context does not change when doing
// callbacks or interceptor calls. // callbacks or interceptor calls.
@ -5250,7 +5229,7 @@ Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
if (!result.IsEmpty()) return *value_handle; if (!result.IsEmpty()) return *value_handle;
} }
Object* raw_result = Object* raw_result =
this_handle->SetElementPostInterceptor(index, *value_handle); this_handle->SetElementWithoutInterceptor(index, *value_handle);
RETURN_IF_SCHEDULED_EXCEPTION(); RETURN_IF_SCHEDULED_EXCEPTION();
return raw_result; return raw_result;
} }
@ -5332,6 +5311,11 @@ Object* JSObject::SetElement(uint32_t index, Object* value) {
return SetElementWithInterceptor(index, value); return SetElementWithInterceptor(index, value);
} }
return SetElementWithoutInterceptor(index, value);
}
Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
// Fast case. // Fast case.
if (HasFastElements()) return SetFastElement(index, value); if (HasFastElements()) return SetFastElement(index, value);
@ -5438,7 +5422,21 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver,
Dictionary* dictionary = element_dictionary(); Dictionary* dictionary = element_dictionary();
int entry = dictionary->FindNumberEntry(index); int entry = dictionary->FindNumberEntry(index);
if (entry != -1) { if (entry != -1) {
return dictionary->ValueAt(entry); Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
// Only accessors allowed as elements.
FixedArray* structure = FixedArray::cast(element);
Object* getter = structure->get(kGetterIndex);
if (getter->IsJSFunction()) {
return GetPropertyWithDefinedGetter(receiver,
JSFunction::cast(getter));
} else {
// Getter is not a function.
return Heap::undefined_value();
}
}
return element;
} }
} }
@ -6436,10 +6434,6 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
AssertNoAllocation no_alloc; AssertNoAllocation no_alloc;
// Loose all details on properties when moving them around.
// Elements do not have special details like properties.
PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
uint32_t pos = 0; uint32_t pos = 0;
uint32_t undefs = 0; uint32_t undefs = 0;
for (int i = 0; i < capacity; i++) { for (int i = 0; i < capacity; i++) {
@ -6450,21 +6444,27 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0); ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32); ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
Object* value = dict->ValueAt(i); Object* value = dict->ValueAt(i);
PropertyDetails details = dict->DetailsAt(i);
if (details.type() == CALLBACKS) {
// Bail out and do the sorting of undefineds and array holes in JS.
return Smi::FromInt(-1);
}
uint32_t key = NumberToUint32(k); uint32_t key = NumberToUint32(k);
if (key < limit) { if (key < limit) {
if (value->IsUndefined()) { if (value->IsUndefined()) {
undefs++; undefs++;
} else { } else {
new_dict->AddNumberEntry(pos, value, no_details); new_dict->AddNumberEntry(pos, value, details);
pos++; pos++;
} }
} else { } else {
new_dict->AddNumberEntry(key, value, no_details); new_dict->AddNumberEntry(key, value, details);
} }
} }
} }
uint32_t result = pos; uint32_t result = pos;
PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
while (undefs > 0) { while (undefs > 0) {
new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details); new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details);
pos++; pos++;

33
deps/v8/src/objects.h

@ -1162,8 +1162,28 @@ class HeapNumber: public HeapObject {
// Layout description. // Layout description.
static const int kValueOffset = HeapObject::kHeaderSize; static const int kValueOffset = HeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second
// is a mixture of sign, exponent and mantissa. Our current platforms are all
// little endian apart from non-EABI arm which is little endian with big
// endian floating point word ordering!
#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
static const int kMantissaOffset = kValueOffset;
static const int kExponentOffset = kValueOffset + 4;
#else
static const int kMantissaOffset = kValueOffset + 4;
static const int kExponentOffset = kValueOffset;
# define BIG_ENDIAN_FLOATING_POINT 1
#endif
static const int kSize = kValueOffset + kDoubleSize; static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
static const int kNonMantissaBitsInTopWord = 12;
private: private:
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber); DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
}; };
@ -1518,7 +1538,7 @@ class JSObject: public HeapObject {
private: private:
Object* SetElementWithInterceptor(uint32_t index, Object* value); Object* SetElementWithInterceptor(uint32_t index, Object* value);
Object* SetElementPostInterceptor(uint32_t index, Object* value); Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index); Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
@ -2470,7 +2490,7 @@ class Map: public HeapObject {
return ((1 << kIsHiddenPrototype) & bit_field()) != 0; return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
} }
// Tells whether the instance has a named interceptor. // Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor() { inline void set_has_named_interceptor() {
set_bit_field(bit_field() | (1 << kHasNamedInterceptor)); set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
} }
@ -2479,7 +2499,7 @@ class Map: public HeapObject {
return ((1 << kHasNamedInterceptor) & bit_field()) != 0; return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
} }
// Tells whether the instance has a named interceptor. // Records and queries whether the instance has an indexed interceptor.
inline void set_has_indexed_interceptor() { inline void set_has_indexed_interceptor() {
set_bit_field(bit_field() | (1 << kHasIndexedInterceptor)); set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
} }
@ -4008,10 +4028,9 @@ class JSArray: public JSObject {
// If an accessor was found and it does not have a setter, // If an accessor was found and it does not have a setter,
// the request is ignored. // the request is ignored.
// //
// To allow shadow an accessor property, the accessor can // If the accessor in the prototype has the READ_ONLY property attribute, then
// have READ_ONLY property attribute so that a new value // a new value is added to the local object when the property is set.
// is added to the local object to shadow the accessor // This shadows the accessor in the prototype.
// in prototypes.
class AccessorInfo: public Struct { class AccessorInfo: public Struct {
public: public:
DECL_ACCESSORS(getter, Object) DECL_ACCESSORS(getter, Object)

15
deps/v8/src/parser.cc

@ -2647,6 +2647,21 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
} }
} }
// Convert constant divisions to multiplications for speed.
if (op == Token::DIV &&
y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
double y_val = y->AsLiteral()->handle()->Number();
int64_t y_int = static_cast<int64_t>(y_val);
// There are rounding issues with this optimization, but they don't
// apply if the number to be divided with has a reciprocal that can
// be precisely represented as a floating point number. This is
// the case if the number is an integer power of 2.
if (static_cast<double>(y_int) == y_val && IsPowerOf2(y_int)) {
y = NewNumberLiteral(1 / y_val);
op = Token::MUL;
}
}
// For now we distinguish between comparisons and other binary // For now we distinguish between comparisons and other binary
// operations. (We could combine the two and get rid of this // operations. (We could combine the two and get rid of this
// code an AST node eventually.) // code an AST node eventually.)

1
deps/v8/src/regexp-macro-assembler-irregexp.cc

@ -47,6 +47,7 @@ RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() { RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
if (backtrack_.is_linked()) backtrack_.Unuse(); if (backtrack_.is_linked()) backtrack_.Unuse();
if (own_buffer_) buffer_.Dispose();
} }

488
deps/v8/src/runtime.cc

@ -2416,6 +2416,19 @@ static Object* Runtime_NumberToRadixString(Arguments args) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
// Fast case where the result is a one character string.
if (args[0]->IsSmi() && args[1]->IsSmi()) {
int value = Smi::cast(args[0])->value();
int radix = Smi::cast(args[1])->value();
if (value >= 0 && value < radix) {
RUNTIME_ASSERT(radix <= 36);
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
}
}
// Slow case.
CONVERT_DOUBLE_CHECKED(value, args[0]); CONVERT_DOUBLE_CHECKED(value, args[0]);
if (isnan(value)) { if (isnan(value)) {
return Heap::AllocateStringFromAscii(CStrVector("NaN")); return Heap::AllocateStringFromAscii(CStrVector("NaN"));
@ -4168,24 +4181,6 @@ static Object* Runtime_Math_pow(Arguments args) {
} }
} }
// Returns a number value with positive sign, greater than or equal to
// 0 but less than 1, chosen randomly.
static Object* Runtime_Math_random(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 0);
// To get much better precision, we combine the results of two
// invocations of random(). The result is computed by normalizing a
// double in the range [0, RAND_MAX + 1) obtained by adding the
// high-order bits in the range [0, RAND_MAX] with the low-order
// bits in the range [0, 1).
double lo = static_cast<double>(random()) * (1.0 / (RAND_MAX + 1.0));
double hi = static_cast<double>(random());
double result = (hi + lo) * (1.0 / (RAND_MAX + 1.0));
ASSERT(result >= 0 && result < 1);
return Heap::AllocateHeapNumber(result);
}
static Object* Runtime_Math_round(Arguments args) { static Object* Runtime_Math_round(Arguments args) {
NoHandleAllocation ha; NoHandleAllocation ha;
@ -4821,8 +4816,8 @@ static Object* Runtime_DebugPrint(Arguments args) {
// and print some interesting cpu debugging info. // and print some interesting cpu debugging info.
JavaScriptFrameIterator it; JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame(); JavaScriptFrame* frame = it.frame();
PrintF("fp = %p, sp = %p, pp = %p: ", PrintF("fp = %p, sp = %p, caller_sp = %p: ",
frame->fp(), frame->sp(), frame->pp()); frame->fp(), frame->sp(), frame->caller_sp());
} else { } else {
PrintF("DebugPrint: "); PrintF("DebugPrint: ");
} }
@ -6106,6 +6101,405 @@ static Object* Runtime_GetFrameDetails(Arguments args) {
} }
// Copy all the context locals into an object used to materialize a scope.
static void CopyContextLocalsToScopeObject(Handle<Code> code,
ScopeInfo<>& scope_info,
Handle<Context> context,
Handle<JSObject> scope_object) {
// Fill all context locals to the context extension.
for (int i = Context::MIN_CONTEXT_SLOTS;
i < scope_info.number_of_context_slots();
i++) {
int context_index =
ScopeInfo<>::ContextSlotIndex(*code,
*scope_info.context_slot_name(i),
NULL);
// Don't include the arguments shadow (.arguments) context variable.
if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
SetProperty(scope_object,
scope_info.context_slot_name(i),
Handle<Object>(context->get(context_index)), NONE);
}
}
}
// Create a plain JSObject which materializes the local scope for the specified
// frame.
static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
Handle<JSFunction> function(JSFunction::cast(frame->function()));
Handle<Code> code(function->code());
ScopeInfo<> scope_info(*code);
// Allocate and initialize a JSObject with all the arguments, stack locals
// heap locals and extension properties of the debugged function.
Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
// First fill all parameters.
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
SetProperty(local_scope,
scope_info.parameter_name(i),
Handle<Object>(frame->GetParameter(i)), NONE);
}
// Second fill all stack locals.
for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
SetProperty(local_scope,
scope_info.stack_slot_name(i),
Handle<Object>(frame->GetExpression(i)), NONE);
}
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
CopyContextLocalsToScopeObject(code, scope_info,
function_context, local_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
SetProperty(local_scope, key, GetProperty(ext, key), NONE);
}
}
}
return local_scope;
}
// Create a plain JSObject which materializes the closure content for the
// context.
static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
ASSERT(context->is_function_context());
Handle<Code> code(context->closure()->code());
ScopeInfo<> scope_info(*code);
// Allocate and initialize a JSObject with all the content of theis function
// closure.
Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
// Check whether the arguments shadow object exists.
int arguments_shadow_index =
ScopeInfo<>::ContextSlotIndex(*code,
Heap::arguments_shadow_symbol(),
NULL);
if (arguments_shadow_index >= 0) {
// In this case all the arguments are available in the arguments shadow
// object.
Handle<JSObject> arguments_shadow(
JSObject::cast(context->get(arguments_shadow_index)));
for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
SetProperty(closure_scope,
scope_info.parameter_name(i),
Handle<Object>(arguments_shadow->GetElement(i)), NONE);
}
}
// Fill all context locals to the context extension.
CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
SetProperty(closure_scope, key, GetProperty(ext, key), NONE);
}
}
return closure_scope;
}
// Iterate over the actual scopes visible from a stack frame. All scopes are
// backed by an actual context except the local scope, which is inserted
// "artifically" in the context chain.
class ScopeIterator {
public:
enum ScopeType {
ScopeTypeGlobal = 0,
ScopeTypeLocal,
ScopeTypeWith,
ScopeTypeClosure
};
explicit ScopeIterator(JavaScriptFrame* frame)
: frame_(frame),
function_(JSFunction::cast(frame->function())),
context_(Context::cast(frame->context())),
local_done_(false),
at_local_(false) {
// Check whether the first scope is actually a local scope.
if (context_->IsGlobalContext()) {
// If there is a stack slot for .result then this local scope has been
// created for evaluating top level code and it is not a real local scope.
// Checking for the existence of .result seems fragile, but the scope info
// saved with the code object does not otherwise have that information.
Handle<Code> code(function_->code());
int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
at_local_ = index < 0;
} else if (context_->is_function_context()) {
at_local_ = true;
}
}
// More scopes?
bool Done() { return context_.is_null(); }
// Move to the next scope.
void Next() {
// If at a local scope mark the local scope as passed.
if (at_local_) {
at_local_ = false;
local_done_ = true;
// If the current context is not associated with the local scope the
// current context is the next real scope, so don't move to the next
// context in this case.
if (context_->closure() != *function_) {
return;
}
}
// The global scope is always the last in the chain.
if (context_->IsGlobalContext()) {
context_ = Handle<Context>();
return;
}
// Move to the next context.
if (context_->is_function_context()) {
context_ = Handle<Context>(Context::cast(context_->closure()->context()));
} else {
context_ = Handle<Context>(context_->previous());
}
// If passing the local scope indicate that the current scope is now the
// local scope.
if (!local_done_ &&
(context_->IsGlobalContext() || (context_->is_function_context()))) {
at_local_ = true;
}
}
// Return the type of the current scope.
int Type() {
if (at_local_) {
return ScopeTypeLocal;
}
if (context_->IsGlobalContext()) {
ASSERT(context_->global()->IsGlobalObject());
return ScopeTypeGlobal;
}
if (context_->is_function_context()) {
return ScopeTypeClosure;
}
ASSERT(context_->has_extension());
ASSERT(!context_->extension()->IsJSContextExtensionObject());
return ScopeTypeWith;
}
// Return the JavaScript object with the content of the current scope.
Handle<JSObject> ScopeObject() {
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
return Handle<JSObject>(CurrentContext()->global());
break;
case ScopeIterator::ScopeTypeLocal:
// Materialize the content of the local scope into a JSObject.
return MaterializeLocalScope(frame_);
break;
case ScopeIterator::ScopeTypeWith:
// Return the with object.
return Handle<JSObject>(CurrentContext()->extension());
break;
case ScopeIterator::ScopeTypeClosure:
// Materialize the content of the closure scope into a JSObject.
return MaterializeClosure(CurrentContext());
break;
}
UNREACHABLE();
return Handle<JSObject>();
}
// Return the context for this scope. For the local context there might not
// be an actual context.
Handle<Context> CurrentContext() {
if (at_local_ && context_->closure() != *function_) {
return Handle<Context>();
}
return context_;
}
#ifdef DEBUG
// Debug print of the content of the current scope.
void DebugPrint() {
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
PrintF("Global:\n");
CurrentContext()->Print();
break;
case ScopeIterator::ScopeTypeLocal: {
PrintF("Local:\n");
Handle<Code> code(function_->code());
ScopeInfo<> scope_info(*code);
scope_info.Print();
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
Handle<JSObject> extension =
Handle<JSObject>(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
}
}
break;
}
case ScopeIterator::ScopeTypeWith: {
PrintF("With:\n");
Handle<JSObject> extension =
Handle<JSObject>(CurrentContext()->extension());
extension->Print();
break;
}
case ScopeIterator::ScopeTypeClosure: {
PrintF("Closure:\n");
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
Handle<JSObject> extension =
Handle<JSObject>(CurrentContext()->extension());
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
}
break;
}
default:
UNREACHABLE();
}
PrintF("\n");
}
#endif
private:
JavaScriptFrame* frame_;
Handle<JSFunction> function_;
Handle<Context> context_;
bool local_done_;
bool at_local_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
static Object* Runtime_GetScopeCount(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
// Check arguments.
Object* check = Runtime_CheckExecutionState(args);
if (check->IsFailure()) return check;
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator it(id);
JavaScriptFrame* frame = it.frame();
// Count the visible scopes.
int n = 0;
for (ScopeIterator it(frame); !it.Done(); it.Next()) {
n++;
}
return Smi::FromInt(n);
}
static const int kScopeDetailsTypeIndex = 0;
static const int kScopeDetailsObjectIndex = 1;
static const int kScopeDetailsSize = 2;
// Return an array with scope details
// args[0]: number: break id
// args[1]: number: frame index
// args[2]: number: scope index
//
// The array returned contains the following information:
// 0: Scope type
// 1: Scope object
static Object* Runtime_GetScopeDetails(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
// Check arguments.
Object* check = Runtime_CheckExecutionState(args);
if (check->IsFailure()) return check;
CONVERT_CHECKED(Smi, wrapped_id, args[1]);
CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
// Get the frame where the debugging is performed.
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator frame_it(id);
JavaScriptFrame* frame = frame_it.frame();
// Find the requested scope.
int n = 0;
ScopeIterator it(frame);
for (; !it.Done() && n < index; it.Next()) {
n++;
}
if (it.Done()) {
return Heap::undefined_value();
}
// Calculate the size of the result.
int details_size = kScopeDetailsSize;
Handle<FixedArray> details = Factory::NewFixedArray(details_size);
// Fill in scope details.
details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
return *Factory::NewJSArrayWithElements(details);
}
static Object* Runtime_DebugPrintScopes(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 0);
#ifdef DEBUG
// Print the scopes for the top frame.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
for (ScopeIterator it(frame); !it.Done(); it.Next()) {
it.DebugPrint();
}
#endif
return Heap::undefined_value();
}
static Object* Runtime_GetCFrames(Arguments args) { static Object* Runtime_GetCFrames(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
@ -6568,54 +6962,17 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
ASSERT(go_between_sinfo.number_of_context_slots() == 0); ASSERT(go_between_sinfo.number_of_context_slots() == 0);
#endif #endif
// Allocate and initialize a context extension object with all the // Materialize the content of the local scope into a JSObject.
// arguments, stack locals heap locals and extension properties of the Handle<JSObject> local_scope = MaterializeLocalScope(frame);
// debugged function.
Handle<JSObject> context_ext = Factory::NewJSObject(Top::object_function());
// First fill all parameters to the context extension.
for (int i = 0; i < sinfo.number_of_parameters(); ++i) {
SetProperty(context_ext,
sinfo.parameter_name(i),
Handle<Object>(frame->GetParameter(i)), NONE);
}
// Second fill all stack locals to the context extension.
for (int i = 0; i < sinfo.number_of_stack_slots(); i++) {
SetProperty(context_ext,
sinfo.stack_slot_name(i),
Handle<Object>(frame->GetExpression(i)), NONE);
}
// Third fill all context locals to the context extension.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
for (int i = Context::MIN_CONTEXT_SLOTS;
i < sinfo.number_of_context_slots();
++i) {
int context_index =
ScopeInfo<>::ContextSlotIndex(*code, *sinfo.context_slot_name(i), NULL);
SetProperty(context_ext,
sinfo.context_slot_name(i),
Handle<Object>(function_context->get(context_index)), NONE);
}
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (function_context->has_extension() &&
!function_context->IsGlobalContext()) {
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
ASSERT(keys->get(i)->IsString());
Handle<String> key(String::cast(keys->get(i)));
SetProperty(context_ext, key, GetProperty(ext, key), NONE);
}
}
// Allocate a new context for the debug evaluation and set the extension // Allocate a new context for the debug evaluation and set the extension
// object build. // object build.
Handle<Context> context = Handle<Context> context =
Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between); Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
context->set_extension(*context_ext); context->set_extension(*local_scope);
// Copy any with contexts present and chain them in front of this context. // Copy any with contexts present and chain them in front of this context.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->fcontext());
context = CopyWithContextChain(frame_context, context); context = CopyWithContextChain(frame_context, context);
// Wrap the evaluation statement in a new function compiled in the newly // Wrap the evaluation statement in a new function compiled in the newly
@ -6657,6 +7014,13 @@ static Object* Runtime_DebugEvaluate(Arguments args) {
Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver, Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
argc, argv, &has_pending_exception); argc, argv, &has_pending_exception);
if (has_pending_exception) return Failure::Exception(); if (has_pending_exception) return Failure::Exception();
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (result->IsJSGlobalProxy()) {
result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
}
return *result; return *result;
} }

4
deps/v8/src/runtime.h

@ -135,7 +135,6 @@ namespace internal {
F(Math_floor, 1) \ F(Math_floor, 1) \
F(Math_log, 1) \ F(Math_log, 1) \
F(Math_pow, 2) \ F(Math_pow, 2) \
F(Math_random, 0) \
F(Math_round, 1) \ F(Math_round, 1) \
F(Math_sin, 1) \ F(Math_sin, 1) \
F(Math_sqrt, 1) \ F(Math_sqrt, 1) \
@ -288,6 +287,9 @@ namespace internal {
F(CheckExecutionState, 1) \ F(CheckExecutionState, 1) \
F(GetFrameCount, 1) \ F(GetFrameCount, 1) \
F(GetFrameDetails, 2) \ F(GetFrameDetails, 2) \
F(GetScopeCount, 2) \
F(GetScopeDetails, 3) \
F(DebugPrintScopes, 0) \
F(GetCFrames, 1) \ F(GetCFrames, 1) \
F(GetThreadCount, 1) \ F(GetThreadCount, 1) \
F(GetThreadDetails, 2) \ F(GetThreadDetails, 2) \

108
deps/v8/src/runtime.js

@ -160,40 +160,48 @@ function ADD(x) {
// Left operand (this) is already a string. // Left operand (this) is already a string.
function STRING_ADD_LEFT(x) { function STRING_ADD_LEFT(y) {
x = %ToString(%ToPrimitive(x, NO_HINT)); if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT));
return %StringAdd(this, x); return %StringAdd(this, y);
} }
// Right operand (x) is already a string. // Right operand (y) is already a string.
function STRING_ADD_RIGHT(x) { function STRING_ADD_RIGHT(y) {
var a = %ToString(%ToPrimitive(this, NO_HINT)); var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT));
return %StringAdd(a, x); return %StringAdd(x, y);
} }
// ECMA-262, section 11.6.2, page 50. // ECMA-262, section 11.6.2, page 50.
function SUB(x) { function SUB(y) {
return %NumberSub(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberSub(x, y);
} }
// ECMA-262, section 11.5.1, page 48. // ECMA-262, section 11.5.1, page 48.
function MUL(x) { function MUL(y) {
return %NumberMul(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberMul(x, y);
} }
// ECMA-262, section 11.5.2, page 49. // ECMA-262, section 11.5.2, page 49.
function DIV(x) { function DIV(y) {
return %NumberDiv(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberDiv(x, y);
} }
// ECMA-262, section 11.5.3, page 49. // ECMA-262, section 11.5.3, page 49.
function MOD(x) { function MOD(y) {
return %NumberMod(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberMod(x, y);
} }
@ -204,50 +212,92 @@ function MOD(x) {
*/ */
// ECMA-262, section 11.10, page 57. // ECMA-262, section 11.10, page 57.
function BIT_OR(x) { function BIT_OR(y) {
return %NumberOr(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberOr(x, y);
} }
// ECMA-262, section 11.10, page 57. // ECMA-262, section 11.10, page 57.
function BIT_AND(x) { function BIT_AND(y) {
return %NumberAnd(%ToNumber(this), %ToNumber(x)); var x;
if (IS_NUMBER(this)) {
x = this;
if (!IS_NUMBER(y)) y = %ToNumber(y);
} else {
x = %ToNumber(this);
// Make sure to convert the right operand to a number before
// bailing out in the fast case, but after converting the
// left operand. This ensures that valueOf methods on the right
// operand are always executed.
if (!IS_NUMBER(y)) y = %ToNumber(y);
// Optimize for the case where we end up AND'ing a value
// that doesn't convert to a number. This is common in
// certain benchmarks.
if (NUMBER_IS_NAN(x)) return 0;
}
return %NumberAnd(x, y);
} }
// ECMA-262, section 11.10, page 57. // ECMA-262, section 11.10, page 57.
function BIT_XOR(x) { function BIT_XOR(y) {
return %NumberXor(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberXor(x, y);
} }
// ECMA-262, section 11.4.7, page 47. // ECMA-262, section 11.4.7, page 47.
function UNARY_MINUS() { function UNARY_MINUS() {
return %NumberUnaryMinus(%ToNumber(this)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
return %NumberUnaryMinus(x);
} }
// ECMA-262, section 11.4.8, page 48. // ECMA-262, section 11.4.8, page 48.
function BIT_NOT() { function BIT_NOT() {
return %NumberNot(%ToNumber(this)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
return %NumberNot(x);
} }
// ECMA-262, section 11.7.1, page 51. // ECMA-262, section 11.7.1, page 51.
function SHL(x) { function SHL(y) {
return %NumberShl(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberShl(x, y);
} }
// ECMA-262, section 11.7.2, page 51. // ECMA-262, section 11.7.2, page 51.
function SAR(x) { function SAR(y) {
return %NumberSar(%ToNumber(this), %ToNumber(x)); var x;
if (IS_NUMBER(this)) {
x = this;
if (!IS_NUMBER(y)) y = %ToNumber(y);
} else {
x = %ToNumber(this);
// Make sure to convert the right operand to a number before
// bailing out in the fast case, but after converting the
// left operand. This ensures that valueOf methods on the right
// operand are always executed.
if (!IS_NUMBER(y)) y = %ToNumber(y);
// Optimize for the case where we end up shifting a value
// that doesn't convert to a number. This is common in
// certain benchmarks.
if (NUMBER_IS_NAN(x)) return 0;
}
return %NumberSar(x, y);
} }
// ECMA-262, section 11.7.3, page 52. // ECMA-262, section 11.7.3, page 52.
function SHR(x) { function SHR(y) {
return %NumberShr(%ToNumber(this), %ToNumber(x)); var x = IS_NUMBER(this) ? this : %ToNumber(this);
if (!IS_NUMBER(y)) y = %ToNumber(y);
return %NumberShr(x, y);
} }

32
deps/v8/src/serialize.cc

@ -450,20 +450,26 @@ void ExternalReferenceTable::AddFromId(TypeCode type,
const char* name) { const char* name) {
Address address; Address address;
switch (type) { switch (type) {
case C_BUILTIN: case C_BUILTIN: {
address = Builtins::c_function_address( ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
static_cast<Builtins::CFunctionId>(id)); address = ref.address();
break; break;
case BUILTIN: }
address = Builtins::builtin_address(static_cast<Builtins::Name>(id)); case BUILTIN: {
ExternalReference ref(static_cast<Builtins::Name>(id));
address = ref.address();
break; break;
case RUNTIME_FUNCTION: }
address = Runtime::FunctionForId( case RUNTIME_FUNCTION: {
static_cast<Runtime::FunctionId>(id))->entry; ExternalReference ref(static_cast<Runtime::FunctionId>(id));
address = ref.address();
break; break;
case IC_UTILITY: }
address = IC::AddressFromUtilityId(static_cast<IC::UtilityId>(id)); case IC_UTILITY: {
ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
address = ref.address();
break; break;
}
default: default:
UNREACHABLE(); UNREACHABLE();
return; return;
@ -642,10 +648,14 @@ void ExternalReferenceTable::PopulateTable() {
"StubCache::secondary_->value"); "StubCache::secondary_->value");
// Runtime entries // Runtime entries
Add(FUNCTION_ADDR(Runtime::PerformGC), Add(ExternalReference::perform_gc_function().address(),
RUNTIME_ENTRY, RUNTIME_ENTRY,
1, 1,
"Runtime::PerformGC"); "Runtime::PerformGC");
Add(ExternalReference::random_positive_smi_function().address(),
RUNTIME_ENTRY,
2,
"V8::RandomPositiveSmi");
// Miscellaneous // Miscellaneous
Add(ExternalReference::builtin_passed_function().address(), Add(ExternalReference::builtin_passed_function().address(),

60
deps/v8/src/stub-cache.cc

@ -103,7 +103,7 @@ Object* StubCache::ComputeLoadField(String* name,
LoadStubCompiler compiler; LoadStubCompiler compiler;
code = compiler.CompileLoadField(receiver, holder, field_index, name); code = compiler.CompileLoadField(receiver, holder, field_index, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("LoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code; if (result->IsFailure()) return code;
} }
@ -122,7 +122,7 @@ Object* StubCache::ComputeLoadCallback(String* name,
LoadStubCompiler compiler; LoadStubCompiler compiler;
code = compiler.CompileLoadCallback(receiver, holder, callback, name); code = compiler.CompileLoadCallback(receiver, holder, callback, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("LoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code; if (result->IsFailure()) return code;
} }
@ -141,7 +141,7 @@ Object* StubCache::ComputeLoadConstant(String* name,
LoadStubCompiler compiler; LoadStubCompiler compiler;
code = compiler.CompileLoadConstant(receiver, holder, value, name); code = compiler.CompileLoadConstant(receiver, holder, value, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("LoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code; if (result->IsFailure()) return code;
} }
@ -158,7 +158,7 @@ Object* StubCache::ComputeLoadInterceptor(String* name,
LoadStubCompiler compiler; LoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name); code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("LoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return code; if (result->IsFailure()) return code;
} }
@ -182,7 +182,7 @@ Object* StubCache::ComputeKeyedLoadField(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadField(name, receiver, holder, field_index); code = compiler.CompileLoadField(name, receiver, holder, field_index);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -201,7 +201,7 @@ Object* StubCache::ComputeKeyedLoadConstant(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadConstant(name, receiver, holder, value); code = compiler.CompileLoadConstant(name, receiver, holder, value);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -219,7 +219,7 @@ Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadInterceptor(receiver, holder, name); code = compiler.CompileLoadInterceptor(receiver, holder, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -238,7 +238,7 @@ Object* StubCache::ComputeKeyedLoadCallback(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadCallback(name, receiver, holder, callback); code = compiler.CompileLoadCallback(name, receiver, holder, callback);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -256,7 +256,7 @@ Object* StubCache::ComputeKeyedLoadArrayLength(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadArrayLength(name); code = compiler.CompileLoadArrayLength(name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -273,7 +273,7 @@ Object* StubCache::ComputeKeyedLoadStringLength(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadStringLength(name); code = compiler.CompileLoadStringLength(name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -290,7 +290,7 @@ Object* StubCache::ComputeKeyedLoadFunctionPrototype(String* name,
KeyedLoadStubCompiler compiler; KeyedLoadStubCompiler compiler;
code = compiler.CompileLoadFunctionPrototype(name); code = compiler.CompileLoadFunctionPrototype(name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -309,7 +309,7 @@ Object* StubCache::ComputeStoreField(String* name,
StoreStubCompiler compiler; StoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name); code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("StoreIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -327,7 +327,7 @@ Object* StubCache::ComputeStoreCallback(String* name,
StoreStubCompiler compiler; StoreStubCompiler compiler;
code = compiler.CompileStoreCallback(receiver, callback, name); code = compiler.CompileStoreCallback(receiver, callback, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("StoreIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -344,7 +344,7 @@ Object* StubCache::ComputeStoreInterceptor(String* name,
StoreStubCompiler compiler; StoreStubCompiler compiler;
code = compiler.CompileStoreInterceptor(receiver, name); code = compiler.CompileStoreInterceptor(receiver, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("StoreIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -361,7 +361,7 @@ Object* StubCache::ComputeKeyedStoreField(String* name, JSObject* receiver,
KeyedStoreStubCompiler compiler; KeyedStoreStubCompiler compiler;
code = compiler.CompileStoreField(receiver, field_index, transition, name); code = compiler.CompileStoreField(receiver, field_index, transition, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("KeyedStoreIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -412,7 +412,7 @@ Object* StubCache::ComputeCallConstant(int argc,
CallStubCompiler compiler(argc); CallStubCompiler compiler(argc);
code = compiler.CompileCallConstant(object, holder, function, check, flags); code = compiler.CompileCallConstant(object, holder, function, check, flags);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("CallIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code)); Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -445,7 +445,7 @@ Object* StubCache::ComputeCallField(int argc,
CallStubCompiler compiler(argc); CallStubCompiler compiler(argc);
code = compiler.CompileCallField(object, holder, index, name, flags); code = compiler.CompileCallField(object, holder, index, name, flags);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("CallIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code)); Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -478,7 +478,7 @@ Object* StubCache::ComputeCallInterceptor(int argc,
CallStubCompiler compiler(argc); CallStubCompiler compiler(argc);
code = compiler.CompileCallInterceptor(object, holder, name); code = compiler.CompileCallInterceptor(object, holder, name);
if (code->IsFailure()) return code; if (code->IsFailure()) return code;
LOG(CodeCreateEvent("CallIC", Code::cast(code), name)); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
Object* result = map->UpdateCodeCache(name, Code::cast(code)); Object* result = map->UpdateCodeCache(name, Code::cast(code));
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
} }
@ -632,7 +632,8 @@ Object* StubCache::ComputeLazyCompile(int argc) {
if (result->IsCode()) { if (result->IsCode()) {
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("LazyCompile", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -780,7 +781,8 @@ Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
Counters::call_initialize_stubs.Increment(); Counters::call_initialize_stubs.Increment();
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallInitialize", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -795,7 +797,8 @@ Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
Counters::call_premonomorphic_stubs.Increment(); Counters::call_premonomorphic_stubs.Increment();
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallPreMonomorphic", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -810,7 +813,8 @@ Object* StubCompiler::CompileCallNormal(Code::Flags flags) {
Counters::call_normal_stubs.Increment(); Counters::call_normal_stubs.Increment();
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallNormal", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -825,7 +829,8 @@ Object* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
Counters::call_megamorphic_stubs.Increment(); Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallMegamorphic", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -840,7 +845,7 @@ Object* StubCompiler::CompileCallMiss(Code::Flags flags) {
Counters::call_megamorphic_stubs.Increment(); Counters::call_megamorphic_stubs.Increment();
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallMiss", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
} }
return result; return result;
} }
@ -854,7 +859,8 @@ Object* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
if (!result->IsFailure()) { if (!result->IsFailure()) {
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallDebugBreak", code, code->arguments_count())); LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
code, code->arguments_count()));
} }
return result; return result;
} }
@ -870,8 +876,8 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
if (!result->IsFailure()) { if (!result->IsFailure()) {
Code* code = Code::cast(result); Code* code = Code::cast(result);
USE(code); USE(code);
LOG(CodeCreateEvent("CallDebugPrepareStepIn", code, LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
code->arguments_count())); code, code->arguments_count()));
} }
return result; return result;
} }

5
deps/v8/src/utils.h

@ -362,6 +362,11 @@ class Vector {
Sort(PointerValueCompare<T>); Sort(PointerValueCompare<T>);
} }
void Truncate(int length) {
ASSERT(length <= length_);
length_ = length;
}
// Releases the array underlying this vector. Once disposed the // Releases the array underlying this vector. Once disposed the
// vector is empty. // vector is empty.
void Dispose() { void Dispose() {

2
deps/v8/src/v8-counters.h

@ -131,6 +131,8 @@ namespace internal {
SC(named_load_inline, V8.NamedLoadInline) \ SC(named_load_inline, V8.NamedLoadInline) \
SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \ SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \
SC(keyed_store_field, V8.KeyedStoreField) \ SC(keyed_store_field, V8.KeyedStoreField) \
SC(keyed_store_inline, V8.KeyedStoreInline) \
SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \
SC(for_in, V8.ForIn) \ SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \ SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \ SC(enum_cache_misses, V8.EnumCacheMisses) \

37
deps/v8/src/v8.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -33,6 +33,10 @@
#include "stub-cache.h" #include "stub-cache.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
#if V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#endif
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -62,6 +66,11 @@ bool V8::Initialize(Deserializer *des) {
// Setup the platform OS support. // Setup the platform OS support.
OS::Setup(); OS::Setup();
// Initialize other runtime facilities
#if !V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
::assembler::arm::Simulator::Initialize();
#endif
// Setup the object heap // Setup the object heap
ASSERT(!Heap::HasBeenSetup()); ASSERT(!Heap::HasBeenSetup());
if (!Heap::Setup(create_heap_objects)) { if (!Heap::Setup(create_heap_objects)) {
@ -69,7 +78,6 @@ bool V8::Initialize(Deserializer *des) {
return false; return false;
} }
// Initialize other runtime facilities
Bootstrapper::Initialize(create_heap_objects); Bootstrapper::Initialize(create_heap_objects);
Builtins::Setup(create_heap_objects); Builtins::Setup(create_heap_objects);
Top::Initialize(); Top::Initialize();
@ -130,4 +138,29 @@ void V8::TearDown() {
} }
uint32_t V8::Random() {
// Random number generator using George Marsaglia's MWC algorithm.
static uint32_t hi = 0;
static uint32_t lo = 0;
// Initialize seed using the system random(). If one of the seeds
// should ever become zero again, or if random() returns zero, we
// avoid getting stuck with zero bits in hi or lo by re-initializing
// them on demand.
if (hi == 0) hi = random();
if (lo == 0) lo = random();
// Mix the bits.
hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
lo = 18273 * (lo & 0xFFFF) + (lo >> 16);
return (hi << 16) + (lo & 0xFFFF);
}
Smi* V8::RandomPositiveSmi() {
uint32_t random = Random();
ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
return Smi::FromInt(random & Smi::kMaxValue);
}
} } // namespace v8::internal } } // namespace v8::internal

13
deps/v8/src/v8.h

@ -80,10 +80,10 @@ class V8 : public AllStatic {
public: public:
// Global actions. // Global actions.
// If Initialize is called with des == NULL, the // If Initialize is called with des == NULL, the initial state is
// initial state is created from scratch. If a non-null Deserializer // created from scratch. If a non-null Deserializer is given, the
// is given, the initial state is created by reading the // initial state is created by reading the deserialized data into an
// deserialized data into an empty heap. // empty heap.
static bool Initialize(Deserializer* des); static bool Initialize(Deserializer* des);
static void TearDown(); static void TearDown();
static bool IsRunning() { return is_running_; } static bool IsRunning() { return is_running_; }
@ -93,6 +93,11 @@ class V8 : public AllStatic {
// Report process out of memory. Implementation found in api.cc. // Report process out of memory. Implementation found in api.cc.
static void FatalProcessOutOfMemory(const char* location); static void FatalProcessOutOfMemory(const char* location);
// Random number generation support. Not cryptographically safe.
static uint32_t Random();
static Smi* RandomPositiveSmi();
private: private:
// True if engine is currently running // True if engine is currently running
static bool is_running_; static bool is_running_;

4
deps/v8/src/version.cc

@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script. // cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1 #define MAJOR_VERSION 1
#define MINOR_VERSION 2 #define MINOR_VERSION 2
#define BUILD_NUMBER 7 #define BUILD_NUMBER 8
#define PATCH_LEVEL 0 #define PATCH_LEVEL 1
#define CANDIDATE_VERSION false #define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the // Define SONAME to have the SCons build the put a specific SONAME into the

28
deps/v8/src/x64/assembler-x64-inl.h

@ -151,11 +151,6 @@ void RelocInfo::apply(int delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) { if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
intptr_t* p = reinterpret_cast<intptr_t*>(pc_); intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
*p -= delta; // relocate entry *p -= delta; // relocate entry
} else if (rmode_ == JS_RETURN && IsCallInstruction()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
intptr_t* p = reinterpret_cast<intptr_t*>(pc_ + 1);
*p -= delta; // relocate entry
} else if (IsInternalReference(rmode_)) { } else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object. // absolute code pointer inside code object moves with the code object.
intptr_t* p = reinterpret_cast<intptr_t*>(pc_); intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
@ -249,27 +244,9 @@ Object** RelocInfo::call_object_address() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Operand // Implementation of Operand
Operand::Operand(Register base, int32_t disp) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
set_sib(kTimes1, rsp, base);
}
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
set_modrm(0, rsp);
} else if (is_int8(disp)) {
set_modrm(1, base);
set_disp8(disp);
} else {
set_modrm(2, base);
set_disp32(disp);
}
}
void Operand::set_modrm(int mod, Register rm) { void Operand::set_modrm(int mod, Register rm) {
ASSERT((mod & -4) == 0); ASSERT((mod & -4) == 0);
buf_[0] = mod << 6 | (rm.code() & 0x7); buf_[0] = (mod << 6) | (rm.code() & 0x7);
// Set REX.B to the high bit of rm.code(). // Set REX.B to the high bit of rm.code().
rex_ |= (rm.code() >> 3); rex_ |= (rm.code() >> 3);
} }
@ -278,7 +255,8 @@ void Operand::set_modrm(int mod, Register rm) {
void Operand::set_sib(ScaleFactor scale, Register index, Register base) { void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
ASSERT(len_ == 1); ASSERT(len_ == 1);
ASSERT(is_uint2(scale)); ASSERT(is_uint2(scale));
// Use SIB with no index register only for base rsp or r12. // Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12)); ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7); buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
rex_ |= (index.code() >> 3) << 1 | base.code() >> 3; rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;

314
deps/v8/src/x64/assembler-x64.cc

@ -72,7 +72,49 @@ XMMRegister xmm13 = { 13 };
XMMRegister xmm14 = { 14 }; XMMRegister xmm14 = { 14 };
XMMRegister xmm15 = { 15 }; XMMRegister xmm15 = { 15 };
Operand::Operand(Register base, int32_t disp): rex_(0) {
len_ = 1;
if (base.is(rsp) || base.is(r12)) {
// SIB byte is needed to encode (rsp + offset) or (r12 + offset).
set_sib(kTimes1, rsp, base);
}
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
set_modrm(0, base);
} else if (is_int8(disp)) {
set_modrm(1, base);
set_disp8(disp);
} else {
set_modrm(2, base);
set_disp32(disp);
}
}
Operand::Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp): rex_(0) {
ASSERT(!index.is(rsp));
len_ = 1;
set_sib(scale, index, base);
if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
// This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
// possibly set by set_sib.
set_modrm(0, rsp);
} else if (is_int8(disp)) {
set_modrm(1, rsp);
set_disp8(disp);
} else {
set_modrm(2, rsp);
set_disp32(disp);
}
}
// Safe default is no features. // Safe default is no features.
// TODO(X64): Safe defaults include SSE2 for X64.
uint64_t CpuFeatures::supported_ = 0; uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::enabled_ = 0; uint64_t CpuFeatures::enabled_ = 0;
@ -140,7 +182,8 @@ void CpuFeatures::Probe() {
Object* code = Object* code =
Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
if (!code->IsCode()) return; if (!code->IsCode()) return;
LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe")); LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)(); typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe(); supported_ = probe();
@ -398,16 +441,47 @@ void Assembler::immediate_arithmetic_op(byte subcode,
emit_rex_64(dst); emit_rex_64(dst);
if (is_int8(src.value_)) { if (is_int8(src.value_)) {
emit(0x83); emit(0x83);
emit_operand(Register::toRegister(subcode), dst); emit_operand(subcode, dst);
emit(src.value_);
} else {
emit(0x81);
emit_operand(subcode, dst);
emitl(src.value_);
}
}
void Assembler::immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
if (is_int8(src.value_)) {
emit(0x83);
emit_operand(subcode, dst);
emit(src.value_); emit(src.value_);
} else { } else {
emit(0x81); emit(0x81);
emit_operand(Register::toRegister(subcode), dst); emit_operand(subcode, dst);
emitl(src.value_); emitl(src.value_);
} }
} }
void Assembler::immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
ASSERT(is_int8(src.value_));
emit(0x80);
emit_operand(subcode, dst);
emit(src.value_);
}
void Assembler::shift(Register dst, Immediate shift_amount, int subcode) { void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -486,14 +560,6 @@ void Assembler::call(Register adr) {
emit_modrm(0x2, adr); emit_modrm(0x2, adr);
} }
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
emit(0xA2);
}
void Assembler::call(const Operand& op) { void Assembler::call(const Operand& op) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -505,6 +571,15 @@ void Assembler::call(const Operand& op) {
} }
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
emit(0xA2);
}
void Assembler::cqo() { void Assembler::cqo() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -513,7 +588,7 @@ void Assembler::cqo() {
} }
void Assembler::dec(Register dst) { void Assembler::decq(Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(dst); emit_rex_64(dst);
@ -522,7 +597,7 @@ void Assembler::dec(Register dst) {
} }
void Assembler::dec(const Operand& dst) { void Assembler::decq(const Operand& dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(dst); emit_rex_64(dst);
@ -531,6 +606,15 @@ void Assembler::dec(const Operand& dst) {
} }
void Assembler::decl(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(1, dst);
}
void Assembler::enter(Immediate size) { void Assembler::enter(Immediate size) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -582,7 +666,7 @@ void Assembler::imul(Register dst, Register src, Immediate imm) {
} }
void Assembler::inc(Register dst) { void Assembler::incq(Register dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(dst); emit_rex_64(dst);
@ -591,7 +675,7 @@ void Assembler::inc(Register dst) {
} }
void Assembler::inc(const Operand& dst) { void Assembler::incq(const Operand& dst) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_rex_64(dst); emit_rex_64(dst);
@ -600,6 +684,15 @@ void Assembler::inc(const Operand& dst) {
} }
void Assembler::incl(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xFF);
emit_operand(0, dst);
}
void Assembler::int3() { void Assembler::int3() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -768,6 +861,16 @@ void Assembler::movl(const Operand& dst, Register src) {
} }
void Assembler::movl(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
emit(0xC7);
emit_operand(0x0, dst);
emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
}
void Assembler::movl(Register dst, Immediate value) { void Assembler::movl(Register dst, Immediate value) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -843,6 +946,31 @@ void Assembler::movq(Register dst, ExternalReference ref) {
} }
void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
emit(0xC7);
emit_operand(0, dst);
emit(value);
}
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(!Heap::InNewSpace(*value));
emit_rex_64(dst);
emit(0xB8 | dst.code() & 0x7);
if (value->IsHeapObject()) {
emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
} else {
ASSERT_EQ(RelocInfo::NONE, mode);
emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE);
}
}
void Assembler::mul(Register src) { void Assembler::mul(Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1063,6 +1191,13 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
} }
} }
void Assembler::rdtsc() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit(0x0F);
emit(0x31);
}
void Assembler::ret(int imm16) { void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
@ -1078,6 +1213,19 @@ void Assembler::ret(int imm16) {
} }
void Assembler::setcc(Condition cc, Register reg) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(0 <= cc && cc < 16);
if (reg.code() > 3) { // Use x64 byte registers, where different.
emit_rex_32(reg);
}
emit(0x0F);
emit(0x90 | cc);
emit_modrm(0x0, reg);
}
void Assembler::shld(Register dst, Register src) { void Assembler::shld(Register dst, Register src) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1128,6 +1276,7 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register reg, Immediate mask) { void Assembler::testb(Register reg, Immediate mask) {
ASSERT(is_int8(mask.value_));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
if (reg.is(rax)) { if (reg.is(rax)) {
@ -1146,6 +1295,7 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) { void Assembler::testb(const Operand& op, Immediate mask) {
ASSERT(is_int8(mask.value_));
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
emit_optional_rex_32(rax, op); emit_optional_rex_32(rax, op);
@ -1198,6 +1348,22 @@ void Assembler::testq(Register dst, Register src) {
} }
void Assembler::testq(Register dst, Immediate mask) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (dst.is(rax)) {
emit_rex_64();
emit(0xA9);
emit(mask);
} else {
emit_rex_64(dst);
emit(0xF7);
emit_modrm(0, dst);
emit(mask);
}
}
// Relocation information implementations // Relocation information implementations
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@ -1360,19 +1526,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* a,
return NULL; return NULL;
} }
void JumpTarget::DoBind() {
StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
StackFrame::State* b) {
// TODO(X64): UNIMPLEMENTED
return NONE;
}
int JavaScriptFrame::GetProvidedParametersCount() const {
UNIMPLEMENTED();
return 0;
}
void JumpTarget::DoBind(int a) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
@ -1384,7 +1538,6 @@ void JumpTarget::DoJump() {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
Object* LoadStubCompiler::CompileLoadCallback(JSObject* a, Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
JSObject* b, JSObject* b,
AccessorInfo* c, AccessorInfo* c,
@ -1416,11 +1569,6 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
return NULL; return NULL;
} }
StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
UNIMPLEMENTED();
return NONE;
}
Object* StoreStubCompiler::CompileStoreCallback(JSObject* a, Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
AccessorInfo* b, AccessorInfo* b,
String* c) { String* c) {
@ -1446,102 +1594,4 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags a) {
return NULL; return NULL;
} }
void VirtualFrame::Drop(int a) {
UNIMPLEMENTED();
}
int VirtualFrame::InvalidateFrameSlotAt(int a) {
UNIMPLEMENTED();
return -1;
}
void VirtualFrame::MergeTo(VirtualFrame* a) {
UNIMPLEMENTED();
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
return Result(NULL);
}
Result VirtualFrame::RawCallStub(CodeStub* a) {
UNIMPLEMENTED();
return Result(NULL);
}
void VirtualFrame::SyncElementBelowStackPointer(int a) {
UNIMPLEMENTED();
}
void VirtualFrame::SyncElementByPushing(int a) {
UNIMPLEMENTED();
}
void VirtualFrame::SyncRange(int a, int b) {
UNIMPLEMENTED();
}
VirtualFrame::VirtualFrame() : elements_(0) {
UNIMPLEMENTED();
}
byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void ExitFrame::Iterate(ObjectVisitor* a) const {
UNIMPLEMENTED();
}
byte* InternalFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
byte* JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
} } // namespace v8::internal } } // namespace v8::internal

127
deps/v8/src/x64/assembler-x64.h

@ -89,11 +89,11 @@ struct Register {
return code_; return code_;
} }
int bit() const { int bit() const {
UNIMPLEMENTED(); return 1 << code_;
return 0;
} }
// (unfortunately we can't make this private in a struct) // (unfortunately we can't make this private in a struct when initializing
// by assignment.)
int code_; int code_;
}; };
@ -250,7 +250,7 @@ enum ScaleFactor {
class Operand BASE_EMBEDDED { class Operand BASE_EMBEDDED {
public: public:
// [base + disp/r] // [base + disp/r]
INLINE(Operand(Register base, int32_t disp)); Operand(Register base, int32_t disp);
// [base + index*scale + disp/r] // [base + index*scale + disp/r]
Operand(Register base, Operand(Register base,
@ -385,7 +385,8 @@ class Assembler : public Malloced {
// //
// If we need versions of an assembly instruction that operate on different // If we need versions of an assembly instruction that operate on different
// width arguments, we add a single-letter suffix specifying the width. // width arguments, we add a single-letter suffix specifying the width.
// This is done for the following instructions: mov, cmp. // This is done for the following instructions: mov, cmp, inc, dec,
// add, sub, and test.
// There are no versions of these instructions without the suffix. // There are no versions of these instructions without the suffix.
// - Instructions on 8-bit (byte) operands/registers have a trailing 'b'. // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'. // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
@ -423,10 +424,10 @@ class Assembler : public Malloced {
void movl(Register dst, Register src); void movl(Register dst, Register src);
void movl(Register dst, const Operand& src); void movl(Register dst, const Operand& src);
void movl(const Operand& dst, Register src); void movl(const Operand& dst, Register src);
void movl(const Operand& dst, Immediate imm);
// Load a 32-bit immediate value, zero-extended to 64 bits. // Load a 32-bit immediate value, zero-extended to 64 bits.
void movl(Register dst, Immediate imm32); void movl(Register dst, Immediate imm32);
void movq(Register dst, int32_t imm32);
void movq(Register dst, const Operand& src); void movq(Register dst, const Operand& src);
// Sign extends immediate 32-bit value to 64 bits. // Sign extends immediate 32-bit value to 64 bits.
void movq(Register dst, Immediate x); void movq(Register dst, Immediate x);
@ -434,7 +435,8 @@ class Assembler : public Malloced {
// Move 64 bit register value to 64-bit memory location. // Move 64 bit register value to 64-bit memory location.
void movq(const Operand& dst, Register src); void movq(const Operand& dst, Register src);
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
// New x64 instructions to load a 64-bit immediate into a register. // New x64 instructions to load a 64-bit immediate into a register.
// All 64-bit immediates must have a relocation mode. // All 64-bit immediates must have a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode); void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
@ -444,66 +446,63 @@ class Assembler : public Malloced {
void movq(Register dst, ExternalReference ext); void movq(Register dst, ExternalReference ext);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode); void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
// New x64 instruction to load from an immediate 64-bit pointer into RAX. // New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode); void load_rax(void* ptr, RelocInfo::Mode rmode);
void load_rax(ExternalReference ext); void load_rax(ExternalReference ext);
void movsx_b(Register dst, const Operand& src);
void movsx_w(Register dst, const Operand& src);
void movzx_b(Register dst, const Operand& src);
void movzx_w(Register dst, const Operand& src);
// Conditional moves // Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32); // Implement conditional moves here.
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
// Exchange two registers // Exchange two registers
void xchg(Register dst, Register src); void xchg(Register dst, Register src);
// Arithmetics // Arithmetics
void add(Register dst, Register src) { void addq(Register dst, Register src) {
arithmetic_op(0x03, dst, src); arithmetic_op(0x03, dst, src);
} }
void add(Register dst, const Operand& src) { void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src); arithmetic_op(0x03, dst, src);
} }
void add(const Operand& dst, Register src) { void addq(const Operand& dst, Register src) {
arithmetic_op(0x01, src, dst); arithmetic_op(0x01, src, dst);
} }
void add(Register dst, Immediate src) { void addq(Register dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src); immediate_arithmetic_op(0x0, dst, src);
} }
void add(const Operand& dst, Immediate src) { void addq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x0, dst, src); immediate_arithmetic_op(0x0, dst, src);
} }
void cmp(Register dst, Register src) { void addl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x0, dst, src);
}
void cmpb(const Operand& dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src); arithmetic_op(0x3B, dst, src);
} }
void cmp(Register dst, const Operand& src) { void cmpq(Register dst, const Operand& src) {
arithmetic_op(0x3B, dst, src); arithmetic_op(0x3B, dst, src);
} }
void cmp(const Operand& dst, Register src) { void cmpq(const Operand& dst, Register src) {
arithmetic_op(0x39, src, dst); arithmetic_op(0x39, src, dst);
} }
void cmp(Register dst, Immediate src) { void cmpq(Register dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src); immediate_arithmetic_op(0x7, dst, src);
} }
void cmp(const Operand& dst, Immediate src) { void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src); immediate_arithmetic_op(0x7, dst, src);
} }
@ -527,15 +526,9 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x4, dst, src); immediate_arithmetic_op(0x4, dst, src);
} }
void cmpb(const Operand& op, int8_t imm8); void decq(Register dst);
void cmpb_al(const Operand& op); void decq(const Operand& dst);
void cmpw_ax(const Operand& op); void decl(const Operand& dst);
void cmpw(const Operand& op, Immediate imm16);
void dec_b(Register dst);
void dec(Register dst);
void dec(const Operand& dst);
// Sign-extends rax into rdx:rax. // Sign-extends rax into rdx:rax.
void cqo(); void cqo();
@ -548,8 +541,9 @@ class Assembler : public Malloced {
// Performs the operation dst = src * imm. // Performs the operation dst = src * imm.
void imul(Register dst, Register src, Immediate imm); void imul(Register dst, Register src, Immediate imm);
void inc(Register dst); void incq(Register dst);
void inc(const Operand& dst); void incq(const Operand& dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src); void lea(Register dst, const Operand& src);
@ -621,32 +615,37 @@ class Assembler : public Malloced {
void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref); void store_rax(ExternalReference ref);
void sub(Register dst, Register src) { void subq(Register dst, Register src) {
arithmetic_op(0x2B, dst, src); arithmetic_op(0x2B, dst, src);
} }
void sub(Register dst, const Operand& src) { void subq(Register dst, const Operand& src) {
arithmetic_op(0x2B, dst, src); arithmetic_op(0x2B, dst, src);
} }
void sub(const Operand& dst, Register src) { void subq(const Operand& dst, Register src) {
arithmetic_op(0x29, src, dst); arithmetic_op(0x29, src, dst);
} }
void sub(Register dst, Immediate src) { void subq(Register dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src); immediate_arithmetic_op(0x5, dst, src);
} }
void sub(const Operand& dst, Immediate src) { void subq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x5, dst, src); immediate_arithmetic_op(0x5, dst, src);
} }
void subl(const Operand& dst, Immediate src) {
immediate_arithmetic_op_32(0x5, dst, src);
}
void testb(Register reg, Immediate mask); void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask); void testb(const Operand& op, Immediate mask);
void testl(Register reg, Immediate mask); void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask); void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg); void testq(const Operand& op, Register reg);
void testq(Register dst, Register src); void testq(Register dst, Register src);
void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) { void xor_(Register dst, Register src) {
arithmetic_op(0x33, dst, src); arithmetic_op(0x33, dst, src);
@ -668,18 +667,19 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x6, dst, src); immediate_arithmetic_op(0x6, dst, src);
} }
// Bit operations. // Bit operations.
void bt(const Operand& dst, Register src); void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src); void bts(const Operand& dst, Register src);
// Miscellaneous // Miscellaneous
void cpuid();
void hlt(); void hlt();
void int3(); void int3();
void nop(); void nop();
void nop(int n); void nop(int n);
void rdtsc(); void rdtsc();
void ret(int imm16); void ret(int imm16);
void setcc(Condition cc, Register reg);
// Label operations & relative jumps (PPUM Appendix D) // Label operations & relative jumps (PPUM Appendix D)
// //
@ -717,8 +717,6 @@ class Assembler : public Malloced {
// Conditional jumps // Conditional jumps
void j(Condition cc, Label* L); void j(Condition cc, Label* L);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> code);
// Floating-point operations // Floating-point operations
void fld(int i); void fld(int i);
@ -774,11 +772,6 @@ class Assembler : public Malloced {
void frndint(); void frndint();
void sahf();
void setcc(Condition cc, Register reg);
void cpuid();
// SSE2 instructions // SSE2 instructions
void cvttss2si(Register dst, const Operand& src); void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src);
@ -791,8 +784,8 @@ class Assembler : public Malloced {
void divsd(XMMRegister dst, XMMRegister src); void divsd(XMMRegister dst, XMMRegister src);
// Use either movsd or movlpd. // Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src); // void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src); // void movdbl(const Operand& dst, XMMRegister src);
// Debugging // Debugging
void Print(); void Print();
@ -813,11 +806,11 @@ class Assembler : public Malloced {
// Writes a doubleword of data in the code stream. // Writes a doubleword of data in the code stream.
// Used for inline tables, e.g., jump-tables. // Used for inline tables, e.g., jump-tables.
void dd(uint32_t data); // void dd(uint32_t data);
// Writes a quadword of data in the code stream. // Writes a quadword of data in the code stream.
// Used for inline tables, e.g., jump-tables. // Used for inline tables, e.g., jump-tables.
void dd(uint64_t data, RelocInfo::Mode reloc_info); // void dd(uint64_t data, RelocInfo::Mode reloc_info);
// Writes the absolute address of a bound label at the given position in // Writes the absolute address of a bound label at the given position in
// the generated code. That positions should have the relocation mode // the generated code. That positions should have the relocation mode
@ -841,11 +834,11 @@ class Assembler : public Malloced {
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
protected: protected:
void movsd(XMMRegister dst, const Operand& src); // void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src); // void movsd(const Operand& dst, XMMRegister src);
void emit_sse_operand(XMMRegister reg, const Operand& adr); // void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src); // void emit_sse_operand(XMMRegister dst, XMMRegister src);
private: private:
@ -969,15 +962,23 @@ class Assembler : public Malloced {
void arithmetic_op(byte opcode, Register reg, const Operand& op); void arithmetic_op(byte opcode, Register reg, const Operand& op);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
// Operate on a 32-bit word in memory.
void immediate_arithmetic_op_32(byte subcode,
const Operand& dst,
Immediate src);
// Operate on a byte in memory.
void immediate_arithmetic_op_8(byte subcode,
const Operand& dst,
Immediate src);
// Emit machine code for a shift operation. // Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode); void shift(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits. // Shift dst by cl % 64 bits.
void shift(Register dst, int subcode); void shift(Register dst, int subcode);
void emit_farith(int b1, int b2, int i); // void emit_farith(int b1, int b2, int i);
// labels // labels
void print(Label* L); // void print(Label* L);
void bind_to(Label* L, int pos); void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix); void link_to(Label* L, Label* appendix);

240
deps/v8/src/x64/builtins-x64.cc

@ -27,19 +27,138 @@
#include "v8.h" #include "v8.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#include "macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, void Builtins::Generate_Adaptor(MacroAssembler* masm,
Builtins::CFunctionId id) { Builtins::CFunctionId id) {
masm->int3(); // UNIMPLEMENTED. masm->int3(); // UNIMPLEMENTED.
} }
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(rbp);
__ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
// Push the function on the stack.
__ push(rdi);
// Preserve the number of arguments on the stack. Must preserve both
// eax and ebx because these registers are used when copying the
// arguments and the receiver.
ASSERT(kSmiTagSize == 1);
__ lea(rcx, Operand(rax, rax, kTimes1, kSmiTag));
__ push(rcx);
}
static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack. Number is a Smi.
__ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
// Leave the frame.
__ movq(rsp, rbp);
__ pop(rbp);
// Remove caller arguments from the stack.
// rbx holds a Smi, so we convery to dword offset by multiplying by 4.
ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, kTimes4, 1 * kPointerSize)); // 1 ~ receiver
__ push(rcx);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED. // ----------- S t a t e -------------
// -- rax : actual number of arguments
// -- rbx : expected number of arguments
// -- rdx : code entry to call
// -----------------------------------
Label invoke, dont_adapt_arguments;
__ IncrementCounter(&Counters::arguments_adaptors, 1);
Label enough, too_few;
__ cmpq(rax, rbx);
__ j(less, &too_few);
__ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rax, Operand(rbp, rax, kTimesPointerSize, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
__ bind(&copy);
__ incq(rcx);
__ push(Operand(rax, 0));
__ subq(rax, Immediate(kPointerSize));
__ cmpq(rcx, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
{ // Too few parameters: Actual < expected.
__ bind(&too_few);
EnterArgumentsAdaptorFrame(masm);
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(rdi, Operand(rbp, rax, kTimesPointerSize, offset));
__ movq(rcx, Immediate(-1)); // account for receiver
Label copy;
__ bind(&copy);
__ incq(rcx);
__ push(Operand(rdi, 0));
__ subq(rdi, Immediate(kPointerSize));
__ cmpq(rcx, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ movq(kScratchRegister,
Factory::undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
__ bind(&fill);
__ incq(rcx);
__ push(kScratchRegister);
__ cmpq(rcx, rbx);
__ j(less, &fill);
// Restore function pointer.
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// Call the entry point.
__ bind(&invoke);
__ call(rdx);
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ jmp(rdx);
} }
void Builtins::Generate_FunctionApply(MacroAssembler* masm) { void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED. masm->int3(); // UNIMPLEMENTED.
} }
@ -52,14 +171,125 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED. masm->int3(); // UNIMPLEMENTED.
} }
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
masm->int3(); // UNIMPLEMENTED. bool is_construct) {
// Expects five C++ function parameters.
// - Address entry (ignored)
// - JSFunction* function (
// - Object* receiver
// - int argc
// - Object*** argv
// (see Handle::Invoke in execution.cc).
// Platform specific argument handling. After this, the stack contains
// an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array,
// while rdi holds the function pointer and rsi the context.
#ifdef __MSVC__
// MSVC parameters in:
// rcx : entry (ignored)
// rdx : function
// r8 : receiver
// r9 : argc
// [rsp+0x20] : argv
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
// Enter an internal frame.
__ EnterInternalFrame();
// Load the function context into rsi.
__ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
__ push(rdx);
__ push(r8);
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, r9);
// Load the previous frame pointer to access C argument on stack
__ movq(kScratchRegister, Operand(rbp, 0));
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movq(rdi, rdx);
#else // !defined(__MSVC__)
// GCC parameters in:
// rdi : entry (ignored)
// rsi : function
// rdx : receiver
// rcx : argc
// r8 : argv
__ movq(rdi, rsi);
// rdi : function
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
// Enter an internal frame.
__ EnterInternalFrame();
// Push the function and receiver and setup the context.
__ push(rdi);
__ push(rdx);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, rcx);
__ movq(rbx, r8);
#endif // __MSVC__
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
// [rsp] : receiver
// Current register contents:
// rax : argc
// rbx : argv
// rsi : context
// rdi : function
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
Label loop, entry;
__ xor_(rcx, rcx); // Set loop variable to 0.
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(rbx, rcx, kTimesPointerSize, 0));
__ push(Operand(kScratchRegister, 0)); // dereference handle
__ addq(rcx, Immediate(1));
__ bind(&entry);
__ cmpq(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// Expects rdi to hold function pointer.
__ movq(kScratchRegister,
Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
RelocInfo::CODE_TARGET);
__ call(kScratchRegister);
} else {
ParameterCount actual(rax);
// Function must be in rdi.
__ InvokeFunction(rdi, actual, CALL_FUNCTION);
}
// Exit the JS frame. Notice that this also removes the empty
// context and the function left on the stack by the code
// invocation.
__ LeaveInternalFrame();
// TODO(X64): Is argument correct? Is there a receiver to remove?
__ ret(1 * kPointerSize); // remove receiver
} }
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
masm->int3(); // UNIMPLEMENTED. Generate_JSEntryTrampolineHelper(masm, false);
} }
} } // namespace v8::internal
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
} } // namespace v8::internal

11
deps/v8/src/x64/codegen-x64-inl.h

@ -37,6 +37,17 @@ namespace internal {
void DeferredCode::Jump() { UNIMPLEMENTED(); } void DeferredCode::Jump() { UNIMPLEMENTED(); }
void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); } void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_INL_H_ #endif // V8_X64_CODEGEN_X64_INL_H_

466
deps/v8/src/x64/codegen-x64.cc

@ -30,6 +30,8 @@
#include "macro-assembler.h" #include "macro-assembler.h"
#include "register-allocator-inl.h" #include "register-allocator-inl.h"
#include "codegen.h" #include "codegen.h"
// TEST
#include "compiler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -41,6 +43,37 @@ void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); } void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
// -------------------------------------------------------------------------
// CodeGenState implementation.
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
}
CodeGenState::CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
}
CodeGenState::~CodeGenState() {
ASSERT(owner_->state() == this);
owner_->set_state(previous_);
}
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
CodeGenerator::CodeGenerator(int buffer_size, CodeGenerator::CodeGenerator(int buffer_size,
Handle<Script> script, Handle<Script> script,
@ -58,17 +91,127 @@ CodeGenerator::CodeGenerator(int buffer_size,
in_spilled_code_(false) { in_spilled_code_(false) {
} }
#define __ masm-> #define __ ACCESS_MASM(masm_)
void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) { void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void CodeGenerator::GenCode(FunctionLiteral* a) { void CodeGenerator::TestCodeGenerator() {
masm_->int3(); // UNIMPLEMENTED // Compile a function from a string, and run it.
Handle<JSFunction> test_function = Compiler::Compile(
Factory::NewStringFromAscii(CStrVector("42")),
Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")),
0,
0,
NULL,
NULL);
Code* code_object = test_function->code(); // Local for debugging ease.
USE(code_object);
// Create a dummy function and context.
Handle<JSFunction> bridge =
Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
Handle<Context> context =
Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
test_function = Factory::NewFunctionFromBoilerplate(
test_function,
context);
bool pending_exceptions;
Handle<Object> result =
Execution::Call(test_function,
Handle<Object>::cast(test_function),
0,
NULL,
&pending_exceptions);
CHECK(result->IsSmi());
CHECK_EQ(42, Smi::cast(*result)->value());
} }
void CodeGenerator::GenCode(FunctionLiteral* function) {
// Record the position for debugging purposes.
CodeForFunctionPosition(function);
// ZoneList<Statement*>* body = fun->body();
// Initialize state.
ASSERT(scope_ == NULL);
scope_ = function->scope();
ASSERT(allocator_ == NULL);
RegisterAllocator register_allocator(this);
allocator_ = &register_allocator;
ASSERT(frame_ == NULL);
frame_ = new VirtualFrame();
set_in_spilled_code(false);
// Adjust for function-level loop nesting.
loop_nesting_ += function->loop_nesting();
JumpTarget::set_compiling_deferred_code(false);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
// fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
false) {
frame_->SpillAll();
__ int3();
}
#endif
// New scope to get automatic timing calculation.
{ // NOLINT
HistogramTimerScope codegen_timer(&Counters::code_generation);
CodeGenState state(this);
// Entry:
// Stack: receiver, arguments, return address.
// ebp: caller's frame pointer
// esp: stack pointer
// edi: called JS function
// esi: callee's context
allocator_->Initialize();
frame_->Enter();
Result return_register = allocator_->Allocate(rax);
__ movq(return_register.reg(), Immediate(0x54)); // Smi 42
GenerateReturnSequence(&return_register);
}
}
void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// The return value is a live (but not currently reference counted)
// reference to rax. This is safe because the current frame does not
// contain a reference to rax (it is prepared for the return by spilling
// all registers).
if (FLAG_trace) {
frame_->Push(return_value);
// *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
}
return_value->ToRegister(rax);
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
// Check that the size of the code used for returning matches what is
// expected by the debugger.
// ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
// masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
}
void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a, void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
int b, int b,
int c, int c,
@ -235,9 +378,316 @@ void CodeGenerator::VisitThisFunction(ThisFunction* a) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
UNIMPLEMENTED();
}
#undef __
// End of CodeGenerator implementation.
// -----------------------------------------------------------------------------
// Implementation of stubs.
// Stub classes have public member named masm, not masm_.
#define __ ACCESS_MASM(masm)
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Check that stack should contain frame pointer, code pointer, state and
// return address in that order.
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
StackHandlerConstants::kPCOffset);
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
__ movq(rdx, Operand(kScratchRegister, 0));
// get next in chain
__ movq(rcx, Operand(rdx, 0));
__ movq(Operand(kScratchRegister, 0), rcx);
__ movq(rsp, rdx);
__ pop(rbp); // pop frame pointer
__ pop(rdx); // remove code pointer
__ pop(rdx); // remove state
// Before returning we restore the context from the frame pointer if not NULL.
// The frame pointer is NULL in the exception handler of a JS entry frame.
__ xor_(rsi, rsi); // tentatively set context pointer to NULL
Label skip;
__ cmpq(rbp, Immediate(0));
__ j(equal, &skip);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
__ ret(0);
}
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// rdi: number of arguments including receiver.
// r15: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
if (do_gc) {
__ movq(Operand(rsp, 0), rax); // Result.
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
RelocInfo::RUNTIME_ENTRY);
__ call(kScratchRegister);
}
ExternalReference scope_depth =
ExternalReference::heap_always_allocate_scope_depth();
if (always_allocate_scope) {
__ movq(kScratchRegister, scope_depth);
__ incl(Operand(kScratchRegister, 0));
}
// Call C function.
#ifdef __MSVC__
// MSVC passes arguments in rcx, rdx, r8, r9
__ movq(rcx, rdi); // argc.
__ movq(rdx, r15); // argv.
#else // ! defined(__MSVC__)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
// First argument is already in rdi.
__ movq(rsi, r15); // argv.
#endif
__ call(rbx);
// Result is in rax - do not destroy this register!
if (always_allocate_scope) {
__ movq(kScratchRegister, scope_depth);
__ decl(Operand(kScratchRegister, 0));
}
// Check for failure result.
Label failure_returned;
ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
__ lea(rcx, Operand(rax, 1));
// Lower 2 bits of rcx are 0 iff rax has failure tag.
__ testl(rcx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(frame_type);
__ ret(0);
// Handling of failure.
__ bind(&failure_returned);
Label retry;
// If the returned exception is RETRY_AFTER_GC continue at retry label
ASSERT(Failure::RETRY_AFTER_GC == 0);
__ testq(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry);
Label continue_exception;
// If the returned failure is EXCEPTION then promote Top::pending_exception().
__ movq(kScratchRegister, Failure::Exception(), RelocInfo::NONE);
__ cmpq(rax, kScratchRegister);
__ j(not_equal, &continue_exception);
// Retrieve the pending exception and clear the variable.
ExternalReference pending_exception_address(Top::k_pending_exception_address);
__ movq(kScratchRegister, pending_exception_address);
__ movq(rax, Operand(kScratchRegister, 0));
__ movq(rdx, ExternalReference::the_hole_value_location());
__ movq(rdx, Operand(rdx, 0));
__ movq(Operand(kScratchRegister, 0), rdx);
__ bind(&continue_exception);
// Special handling of out of memory exception.
__ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
__ cmpq(rax, kScratchRegister);
__ j(equal, throw_out_of_memory_exception);
// Handle normal exception.
__ jmp(throw_normal_exception);
// Retry.
__ bind(&retry);
}
void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
// Fetch top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
__ movq(rdx, Operand(kScratchRegister, 0));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
__ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
__ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
__ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
__ store_rax(handler_address);
// Set external caught exception to false.
__ movq(rax, Immediate(false));
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ store_rax(external_caught);
// Set pending exception and rax to out of memory exception.
__ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
ExternalReference pending_exception(Top::k_pending_exception_address);
__ store_rax(pending_exception);
// Restore the stack to the address of the ENTRY handler
__ movq(rsp, rdx);
// Clear the context pointer;
__ xor_(rsi, rsi);
// Restore registers from handler.
__ pop(rbp); // FP
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
__ pop(rdx); // State
ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
StackHandlerConstants::kPCOffset);
__ ret(0);
}
void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
masm->int3(); // TODO(X64): UNIMPLEMENTED. // rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
// rbp: frame pointer (restored after C call)
// rsp: stack pointer (restored after C call)
// rsi: current context (C callee-saved)
// rdi: caller's parameter pointer pp (C callee-saved)
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
// this by performing a garbage collection and retrying the
// builtin once.
StackFrame::Type frame_type = is_debug_break ?
StackFrame::EXIT_DEBUG :
StackFrame::EXIT;
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type);
// rax: result parameter for PerformGC, if any (setup below).
// Holds the result of a previous call to GenerateCore that
// returned a failure. On next call, it's used as parameter
// to Runtime::PerformGC.
// rbx: pointer to builtin function (C callee-saved).
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
// rdi: number of arguments including receiver (destroyed by C call).
// The rdi register is not callee-save in Unix 64-bit ABI, so
// we must treat it as volatile.
// r15: argv pointer (C callee-saved).
Label throw_out_of_memory_exception;
Label throw_normal_exception;
// Call into the runtime system. Collect garbage before the call if
// running with --gc-greedy set.
if (FLAG_gc_greedy) {
Failure* failure = Failure::RetryAfterGC(0);
__ movq(rax, failure, RelocInfo::NONE);
}
GenerateCore(masm,
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
FLAG_gc_greedy,
false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
true,
false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
__ movq(rax, failure, RelocInfo::NONE);
GenerateCore(masm,
&throw_normal_exception,
&throw_out_of_memory_exception,
frame_type,
true,
true);
__ bind(&throw_out_of_memory_exception);
GenerateThrowOutOfMemory(masm);
// control flow for generated will not return.
__ bind(&throw_normal_exception);
GenerateThrowTOS(masm);
} }
@ -281,7 +731,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Invoke: Link this frame into the handler chain. // Invoke: Link this frame into the handler chain.
__ bind(&invoke); __ bind(&invoke);
__ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
__ push(rax); // flush TOS
// Clear any pending exceptions. // Clear any pending exceptions.
__ load_rax(ExternalReference::the_hole_value_location()); __ load_rax(ExternalReference::the_hole_value_location());
@ -302,13 +751,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference entry(Builtins::JSEntryTrampoline); ExternalReference entry(Builtins::JSEntryTrampoline);
__ load_rax(entry); __ load_rax(entry);
} }
__ call(FieldOperand(rax, Code::kHeaderSize)); __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain. // Unlink this frame from the handler chain.
__ movq(kScratchRegister, ExternalReference(Top::k_handler_address)); __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
__ pop(Operand(kScratchRegister, 0)); __ pop(Operand(kScratchRegister, 0));
// Pop next_sp. // Pop next_sp.
__ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
// Restore the top frame descriptor from the stack. // Restore the top frame descriptor from the stack.
__ bind(&exit); __ bind(&exit);
@ -323,7 +773,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ pop(r14); __ pop(r14);
__ pop(r13); __ pop(r13);
__ pop(r12); __ pop(r12);
__ add(rsp, Immediate(2 * kPointerSize)); // remove markers __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return. // Restore frame pointer and return.
__ pop(rbp); __ pop(rbp);

17
deps/v8/src/x64/codegen-x64.h

@ -286,6 +286,15 @@ class CodeGenerator: public AstVisitor {
Handle<Script> script, Handle<Script> script,
bool is_eval); bool is_eval);
// During implementation of CodeGenerator, this call creates a
// CodeGenerator instance, and calls GenCode on it with a null
// function literal. CodeGenerator will then construct and return
// a simple dummy function. Call this during bootstrapping before
// trying to compile any real functions, to get CodeGenerator up
// and running.
// TODO(X64): Remove once we can get through the bootstrapping process.
static void TestCodeGenerator();
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type); static bool ShouldGenerateLog(Expression* type);
#endif #endif
@ -515,6 +524,14 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args); void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support. // Methods and constants for fast case switch statement support.
// //

45
deps/v8/src/x64/frames-x64.cc

@ -25,3 +25,48 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "frames-inl.h"
namespace v8 {
namespace internal {
StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
StackFrame::State* b) {
// TODO(X64): UNIMPLEMENTED
return NONE;
}
int JavaScriptFrame::GetProvidedParametersCount() const {
UNIMPLEMENTED();
return 0;
}
StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
UNIMPLEMENTED();
return NONE;
}
byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
void ExitFrame::Iterate(ObjectVisitor* a) const {
UNIMPLEMENTED();
}
byte* InternalFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
byte* JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED();
return NULL;
}
} } // namespace v8::internal

58
deps/v8/src/x64/frames-x64.h

@ -32,70 +32,74 @@ namespace v8 {
namespace internal { namespace internal {
// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version. // TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
// This will all need to change to be correct for x64. // This might all need to change to be correct for x64.
static const int kNumRegs = 8; static const int kNumRegs = 8;
static const RegList kJSCallerSaved = 0; static const RegList kJSCallerSaved =
1 << 0 | // rax
1 << 1 | // rcx
1 << 2 | // rdx
1 << 3 | // rbx - used as a caller-saved register in JavaScript code
1 << 7; // rdi - callee function
static const int kNumJSCallerSaved = 5; static const int kNumJSCallerSaved = 5;
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved]; typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic { class StackHandlerConstants : public AllStatic {
public: public:
static const int kNextOffset = 0 * kPointerSize; static const int kNextOffset = 0 * kPointerSize;
static const int kPPOffset = 1 * kPointerSize; static const int kFPOffset = 1 * kPointerSize;
static const int kFPOffset = 2 * kPointerSize; static const int kStateOffset = 2 * kPointerSize;
static const int kPCOffset = 3 * kPointerSize;
static const int kCodeOffset = 3 * kPointerSize;
static const int kStateOffset = 4 * kPointerSize;
static const int kPCOffset = 5 * kPointerSize;
static const int kAddressDisplacement = -1 * kPointerSize; static const int kSize = 4 * kPointerSize;
static const int kSize = 6 * kPointerSize;
}; };
class EntryFrameConstants : public AllStatic { class EntryFrameConstants : public AllStatic {
public: public:
static const int kCallerFPOffset = -1 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;
static const int kFunctionArgOffset = -1 * kPointerSize; static const int kFunctionArgOffset = 1 * kPointerSize;
static const int kReceiverArgOffset = -1 * kPointerSize; static const int kReceiverArgOffset = 2 * kPointerSize;
static const int kArgcOffset = -1 * kPointerSize; static const int kArgcOffset = 3 * kPointerSize;
static const int kArgvOffset = -1 * kPointerSize; static const int kArgvOffset = 4 * kPointerSize;
}; };
class ExitFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic {
public: public:
static const int kDebugMarkOffset = -1 * kPointerSize; static const int kDebugMarkOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize; static const int kSPOffset = -1 * kPointerSize;
static const int kPPDisplacement = -1 * kPointerSize; static const int kCallerFPOffset = +0 * kPointerSize;
static const int kCallerPCOffset = +1 * kPointerSize;
static const int kCallerFPOffset = -1 * kPointerSize; // FP-relative displacement of the caller's SP. It points just
static const int kCallerPCOffset = -1 * kPointerSize; // below the saved PC.
static const int kCallerSPDisplacement = +2 * kPointerSize;
}; };
class StandardFrameConstants : public AllStatic { class StandardFrameConstants : public AllStatic {
public: public:
static const int kExpressionsOffset = -1 * kPointerSize; static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -1 * kPointerSize; static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize; static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = -1 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize;
static const int kCallerPCOffset = -1 * kPointerSize; static const int kCallerPCOffset = +1 * kPointerSize;
static const int kCallerSPOffset = -1 * kPointerSize; static const int kCallerSPOffset = +2 * kPointerSize;
}; };
class JavaScriptFrameConstants : public AllStatic { class JavaScriptFrameConstants : public AllStatic {
public: public:
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = -1 * kPointerSize; static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
static const int kParam0Offset = -1 * kPointerSize; static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize; static const int kReceiverOffset = -1 * kPointerSize;
}; };

13
deps/v8/src/x64/ic-x64.cc

@ -40,6 +40,14 @@ void KeyedLoadIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void KeyedStoreIC::ClearInlinedVersion(Address address) {
UNIMPLEMENTED();
}
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
UNIMPLEMENTED();
}
void KeyedLoadIC::Generate(MacroAssembler* masm, void KeyedLoadIC::Generate(MacroAssembler* masm,
ExternalReference const& f) { ExternalReference const& f) {
masm->int3(); // UNIMPLEMENTED. masm->int3(); // UNIMPLEMENTED.
@ -58,6 +66,11 @@ bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false; return false;
} }
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
UNIMPLEMENTED();
return false;
}
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
UNIMPLEMENTED(); UNIMPLEMENTED();
return NULL; return NULL;

599
deps/v8/src/x64/macro-assembler-x64.cc

@ -29,7 +29,9 @@
#include "bootstrapper.h" #include "bootstrapper.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h" #include "macro-assembler-x64.h"
#include "debug.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -43,8 +45,152 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
} }
void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) { void MacroAssembler::Assert(Condition cc, const char* msg) {
UNIMPLEMENTED(); if (FLAG_debug_code) Check(cc, msg);
}
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
j(cc, &L);
Abort(msg);
// will not return here
bind(&L);
}
void MacroAssembler::ConstructAndTestJSFunction() {
const int initial_buffer_size = 4 * KB;
char* buffer = new char[initial_buffer_size];
MacroAssembler masm(buffer, initial_buffer_size);
const uint64_t secret = V8_INT64_C(0xdeadbeefcafebabe);
Handle<String> constant =
Factory::NewStringFromAscii(Vector<const char>("451", 3), TENURED);
#define __ ACCESS_MASM((&masm))
// Construct a simple JSfunction here, using Assembler and MacroAssembler
// commands.
__ movq(rax, constant, RelocInfo::EMBEDDED_OBJECT);
__ push(rax);
__ CallRuntime(Runtime::kStringParseFloat, 1);
__ movq(kScratchRegister, secret, RelocInfo::NONE);
__ addq(rax, kScratchRegister);
__ ret(0);
#undef __
CodeDesc desc;
masm.GetCode(&desc);
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Object* code = Heap::CreateCode(desc, NULL, flags, Handle<Object>::null());
if (!code->IsFailure()) {
Handle<Code> code_handle(Code::cast(code));
Handle<String> name =
Factory::NewStringFromAscii(Vector<const char>("foo", 3), NOT_TENURED);
Handle<JSFunction> function =
Factory::NewFunction(name,
JS_FUNCTION_TYPE,
JSObject::kHeaderSize,
code_handle,
true);
bool pending_exceptions;
Handle<Object> result =
Execution::Call(function,
Handle<Object>::cast(function),
0,
NULL,
&pending_exceptions);
CHECK(result->IsSmi());
CHECK(secret + (451 << kSmiTagSize) == reinterpret_cast<uint64_t>(*result));
}
}
void MacroAssembler::Abort(const char* msg) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
// properly. Instead, we pass an aligned pointer that is
// a proper v8 smi, but also pass the alignment difference
// from the real pointer as a smi.
intptr_t p1 = reinterpret_cast<intptr_t>(msg);
intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
// Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
}
#endif
push(rax);
movq(kScratchRegister, p0, RelocInfo::NONE);
push(kScratchRegister);
movq(kScratchRegister,
reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
RelocInfo::NONE);
push(kScratchRegister);
CallRuntime(Runtime::kAbort, 2);
// will not return here
}
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
movq(kScratchRegister, stub->GetCode(), RelocInfo::CODE_TARGET);
call(kScratchRegister);
}
void MacroAssembler::StubReturn(int argc) {
ASSERT(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
}
movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
}
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments);
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
if (f->nargs >= 0 && f->nargs != num_arguments) {
IllegalOperation(num_arguments);
return;
}
Runtime::FunctionId function_id =
static_cast<Runtime::FunctionId>(f->stub_id);
RuntimeStub stub(function_id, num_arguments);
CallStub(&stub);
}
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
int num_arguments) {
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
movq(rax, Immediate(num_arguments));
JumpToBuiltin(ext);
}
void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
movq(rbx, ext);
CEntryStub ces;
movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
jmp(kScratchRegister);
} }
@ -71,18 +217,43 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
} }
void MacroAssembler::Jump(ExternalReference ext) {
movq(kScratchRegister, ext);
jmp(kScratchRegister);
}
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
movq(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
}
void MacroAssembler::Call(ExternalReference ext) {
movq(kScratchRegister, ext);
call(kScratchRegister);
}
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
movq(kScratchRegister, destination, rmode);
call(kScratchRegister);
}
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
// The pc (return address) is already on TOS. // Adjust this code if not the case.
// This code pushes state, code, frame pointer and parameter pointer. ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Check that they are expected next on the stack, int that order.
// The pc (return address) is already on TOS. This code pushes state,
// frame pointer and current handler. Check that they are expected
// next on the stack, in that order.
ASSERT_EQ(StackHandlerConstants::kStateOffset, ASSERT_EQ(StackHandlerConstants::kStateOffset,
StackHandlerConstants::kPCOffset - kPointerSize); StackHandlerConstants::kPCOffset - kPointerSize);
ASSERT_EQ(StackHandlerConstants::kCodeOffset,
StackHandlerConstants::kStateOffset - kPointerSize);
ASSERT_EQ(StackHandlerConstants::kFPOffset, ASSERT_EQ(StackHandlerConstants::kFPOffset,
StackHandlerConstants::kCodeOffset - kPointerSize); StackHandlerConstants::kStateOffset - kPointerSize);
ASSERT_EQ(StackHandlerConstants::kPPOffset, ASSERT_EQ(StackHandlerConstants::kNextOffset,
StackHandlerConstants::kFPOffset - kPointerSize); StackHandlerConstants::kFPOffset - kPointerSize);
if (try_location == IN_JAVASCRIPT) { if (try_location == IN_JAVASCRIPT) {
@ -91,26 +262,414 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
} else { } else {
push(Immediate(StackHandler::TRY_FINALLY)); push(Immediate(StackHandler::TRY_FINALLY));
} }
push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
push(rbp); push(rbp);
push(rdi);
} else { } else {
ASSERT(try_location == IN_JS_ENTRY); ASSERT(try_location == IN_JS_ENTRY);
// The parameter pointer is meaningless here and ebp does not // The frame pointer does not point to a JS frame so we save NULL
// point to a JS frame. So we save NULL for both pp and ebp. We // for rbp. We expect the code throwing an exception to check rbp
// expect the code throwing an exception to check ebp before // before dereferencing it to restore the context.
// dereferencing it to restore the context.
push(Immediate(StackHandler::ENTRY)); push(Immediate(StackHandler::ENTRY));
push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent))); push(Immediate(0)); // NULL frame pointer.
push(Immediate(0)); // NULL frame pointer
push(Immediate(0)); // NULL parameter pointer
} }
// Save the current handler.
movq(kScratchRegister, ExternalReference(Top::k_handler_address)); movq(kScratchRegister, ExternalReference(Top::k_handler_address));
// Cached TOS. push(Operand(kScratchRegister, 0));
movq(rax, Operand(kScratchRegister, 0));
// Link this handler. // Link this handler.
movq(Operand(kScratchRegister, 0), rsp); movq(Operand(kScratchRegister, 0), rsp);
} }
void MacroAssembler::Ret() {
ret(0);
}
void MacroAssembler::CmpObjectType(Register heap_object,
InstanceType type,
Register map) {
movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(type)));
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
movq(kScratchRegister, ExternalReference(counter));
movl(Operand(kScratchRegister, 0), Immediate(value));
}
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
movq(kScratchRegister, ExternalReference(counter));
Operand operand(kScratchRegister, 0);
if (value == 1) {
incl(operand);
} else {
addl(operand, Immediate(value));
}
}
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
ASSERT(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
movq(kScratchRegister, ExternalReference(counter));
Operand operand(kScratchRegister, 0);
if (value == 1) {
decl(operand);
} else {
subl(operand, Immediate(value));
}
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::PushRegistersFromMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Push the content of the memory location to the stack.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
movq(kScratchRegister, reg_addr);
push(Operand(kScratchRegister, 0));
}
}
}
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
movq(kScratchRegister, reg_addr);
movq(Operand(kScratchRegister, 0), reg);
}
}
}
void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of memory location to registers.
for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
movq(kScratchRegister, reg_addr);
movq(reg, Operand(kScratchRegister, 0));
}
}
}
void MacroAssembler::PopRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Pop the content from the stack to the memory location.
for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
movq(kScratchRegister, reg_addr);
pop(Operand(kScratchRegister, 0));
}
}
}
void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs) {
ASSERT(!scratch.is(kScratchRegister));
ASSERT(!base.is(kScratchRegister));
ASSERT(!base.is(scratch));
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of the stack to the memory location and adjust base.
for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
movq(scratch, Operand(base, 0));
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
movq(kScratchRegister, reg_addr);
movq(Operand(kScratchRegister, 0), scratch);
lea(base, Operand(base, kPointerSize));
}
}
}
#endif // ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
Label* done,
InvokeFlag flag) {
bool definitely_matches = false;
Label invoke;
if (expected.is_immediate()) {
ASSERT(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
movq(rax, Immediate(actual.immediate()));
if (expected.immediate() ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
// arguments.
definitely_matches = true;
} else {
movq(rbx, Immediate(expected.immediate()));
}
}
} else {
if (actual.is_immediate()) {
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
cmpq(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke);
ASSERT(expected.reg().is(rbx));
movq(rax, Immediate(actual.immediate()));
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmpq(expected.reg(), actual.reg());
j(equal, &invoke);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
}
}
if (!definitely_matches) {
Handle<Code> adaptor =
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (!code_constant.is_null()) {
movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movq(rdx, code_register);
}
movq(kScratchRegister, adaptor, RelocInfo::CODE_TARGET);
if (flag == CALL_FUNCTION) {
call(kScratchRegister);
jmp(done);
} else {
jmp(kScratchRegister);
}
bind(&invoke);
}
}
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
if (flag == CALL_FUNCTION) {
call(code);
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
}
void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag) {
Label done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
movq(kScratchRegister, code, rmode);
if (flag == CALL_FUNCTION) {
call(kScratchRegister);
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(kScratchRegister);
}
bind(&done);
}
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
movl(rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
// Advances rdx to the end of the Code object headers, to the start of
// the executable code.
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
ParameterCount expected(rbx);
InvokeCode(rdx, expected, actual, flag);
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(rbp);
movq(rbp, rsp);
push(rsi); // Context.
push(Immediate(Smi::FromInt(type)));
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
push(kScratchRegister);
if (FLAG_debug_code) {
movq(kScratchRegister,
Factory::undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
cmpq(Operand(rsp, 0), kScratchRegister);
Check(not_equal, "code object not properly patched");
}
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (FLAG_debug_code) {
movq(kScratchRegister, Immediate(Smi::FromInt(type)));
cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, "stack frame types must match");
}
movq(rsp, rbp);
pop(rbp);
}
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(rbp);
movq(rbp, rsp);
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
movq(rdi, rax); // Backup rax before we use it.
movq(rax, rbp);
store_rax(c_entry_fp_address);
movq(rax, rsi);
store_rax(context_address);
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r15, Operand(rbp, rdi, kTimesPointerSize, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
// prone! FIX THIS. Actually there are deeper problems with
// register saving than this asymmetry (see the bug report
// associated with this issue).
PushRegistersFromMemory(kJSCallerSaved);
}
#endif
// Reserve space for two arguments: argc and argv
subq(rsp, Immediate(2 * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
movq(kScratchRegister, Immediate(-kFrameAlignment));
and_(rsp, kScratchRegister);
}
// Patch the saved entry sp.
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
// Registers:
// r15 : argv
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
lea(rbx, Operand(rbp, kOffset));
CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
}
#endif
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
// Pop the arguments and the receiver from the caller stack.
lea(rsp, Operand(r15, 1 * kPointerSize));
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);
movq(kScratchRegister, context_address);
movq(rsi, Operand(kScratchRegister, 0));
#ifdef DEBUG
movq(Operand(kScratchRegister, 0), Immediate(0));
#endif
// Push the return address to get ready to return.
push(rcx);
// Clear the top frame.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
movq(kScratchRegister, c_entry_fp_address);
movq(Operand(kScratchRegister, 0), Immediate(0));
}
} } // namespace v8::internal } } // namespace v8::internal

30
deps/v8/src/x64/macro-assembler-x64.h

@ -66,6 +66,16 @@ class MacroAssembler: public Assembler {
public: public:
MacroAssembler(void* buffer, int size); MacroAssembler(void* buffer, int size);
// ---------------------------------------------------------------------------
// x64 Implementation Support
// Test the MacroAssembler by constructing and calling a simple JSFunction.
// Cannot be done using API because this must be done in the middle of the
// bootstrapping process.
// TODO(X64): Remove once we can get through the bootstrapping process.
static void ConstructAndTestJSFunction();
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// GC Support // GC Support
@ -117,7 +127,7 @@ class MacroAssembler: public Assembler {
// JavaScript invokes // JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping. // Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(const Operand& code, void InvokeCode(Register code,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag); InvokeFlag flag);
@ -141,10 +151,19 @@ class MacroAssembler: public Assembler {
// Store the code object for the given builtin in the target register. // Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id); void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// ---------------------------------------------------------------------------
// Macro instructions
// Expression support // Expression support
void Set(Register dst, int64_t x); void Set(Register dst, int64_t x);
void Set(const Operand& dst, int64_t x); void Set(const Operand& dst, int64_t x);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
// Compare object type for heap object. // Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map. // Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map); void CmpObjectType(Register heap_object, InstanceType type, Register map);
@ -159,9 +178,8 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
// Push a new try handler and link into try handler chain. // Push a new try handler and link into try handler chain. The return
// The return address must be pushed before calling this helper. // address must be pushed before calling this helper.
// On exit, rax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type); void PushTryHandler(CodeLocation try_location, HandlerType type);
@ -292,13 +310,13 @@ class MacroAssembler: public Assembler {
bool generating_stub_; bool generating_stub_;
bool allow_stub_calls_; bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code Handle<Object> code_object_; // This handle will be patched with the code
// code object on installation. // object on installation.
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
Handle<Code> code_constant, Handle<Code> code_constant,
const Operand& code_operand, Register code_register,
Label* done, Label* done,
InvokeFlag flag); InvokeFlag flag);

37
deps/v8/src/x64/register-allocator-x64-inl.h

@ -37,33 +37,50 @@ namespace internal {
// RegisterAllocator implementation. // RegisterAllocator implementation.
bool RegisterAllocator::IsReserved(Register reg) { bool RegisterAllocator::IsReserved(Register reg) {
// All registers are reserved for now. return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
return true; reg.is(kScratchRegister);
} }
// The register allocator uses small integers to represent the // The register allocator uses small integers to represent the
// non-reserved assembler registers. // non-reserved assembler registers.
int RegisterAllocator::ToNumber(Register reg) { int RegisterAllocator::ToNumber(Register reg) {
ASSERT(reg.is_valid() && !IsReserved(reg)); ASSERT(reg.is_valid() && !IsReserved(reg));
UNIMPLEMENTED(); static const int numbers[] = {
return -1; 0, // rax
2, // rcx
3, // rdx
1, // rbx
-1, // rsp
-1, // rbp
-1, // rsi
4, // rdi
5, // r8
6, // r9
-1, // r10
7, // r11
11, // r12
10, // r13
8, // r14
9 // r15
};
return numbers[reg.code()];
} }
Register RegisterAllocator::ToRegister(int num) { Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters); ASSERT(num >= 0 && num < kNumRegisters);
UNIMPLEMENTED(); static Register registers[] =
return no_reg; { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
return registers[num];
} }
void RegisterAllocator::Initialize() { void RegisterAllocator::Initialize() {
UNIMPLEMENTED(); Reset();
// The non-reserved rdi register is live on JS function entry.
Use(rdi); // JS function.
} }
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_ #endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_

63
deps/v8/src/x64/register-allocator-x64.cc

@ -25,3 +25,66 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
ASSERT(is_valid());
if (is_constant()) {
// TODO(X64): Handle constant results.
/*
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
}
// This result becomes a copy of the fresh one.
*this = fresh;
*/
}
ASSERT(is_register());
}
void Result::ToRegister(Register target) {
ASSERT(is_valid());
if (!is_register() || !reg().is(target)) {
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
ASSERT(fresh.is_valid());
if (is_register()) {
CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
} else {
ASSERT(is_constant());
/*
TODO(X64): Handle constant results.
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
}
*/
}
*this = fresh;
} else if (is_register() && reg().is(target)) {
ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
CodeGeneratorScope::Current()->frame()->Spill(target);
ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
}
ASSERT(is_register());
ASSERT(reg().is(target));
}
} } // namespace v8::internal

2
deps/v8/src/x64/register-allocator-x64.h

@ -35,7 +35,7 @@ class RegisterAllocatorConstants : public AllStatic {
public: public:
// Register allocation is not yet implemented on x64, but C++ // Register allocation is not yet implemented on x64, but C++
// forbids 0-length arrays so we use 1 as the number of registers. // forbids 0-length arrays so we use 1 as the number of registers.
static const int kNumRegisters = 1; static const int kNumRegisters = 12;
static const int kInvalidRegister = -1; static const int kInvalidRegister = -1;
}; };

1
deps/v8/src/x64/simulator-x64.h

@ -31,6 +31,7 @@
// Since there is no simulator for the ia32 architecture the only thing we can // Since there is no simulator for the ia32 architecture the only thing we can
// do is to call the entry directly. // do is to call the entry directly.
// TODO(X64): Don't pass p0, since it isn't used?
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4); entry(p0, p1, p2, p3, p4);

169
deps/v8/src/x64/virtual-frame-x64.cc

@ -25,3 +25,172 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
// -------------------------------------------------------------------------
// VirtualFrame implementation.
// On entry to a function, the virtual frame already contains the receiver,
// the parameters, and a return address. All frame elements are in memory.
VirtualFrame::VirtualFrame()
: elements_(parameter_count() + local_count() + kPreallocatedElements),
stack_pointer_(parameter_count() + 1) { // 0-based index of TOS.
for (int i = 0; i <= stack_pointer_; i++) {
elements_.Add(FrameElement::MemoryElement());
}
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
register_locations_[i] = kIllegalIndex;
}
}
void VirtualFrame::Enter() {
// Registers live on entry to a JS frame:
// rsp: stack pointer, points to return address from this function.
// rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
// Trampoline frame.
// rsi: context of this function call.
// rdi: pointer to this function object.
Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that rdi contains a JS function. The following code
// relies on rax being available for use.
__ testq(rdi, Immediate(kSmiTagMask));
__ Check(not_zero,
"VirtualFrame::Enter - rdi is not a function (smi check).");
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
__ Check(equal,
"VirtualFrame::Enter - rdi is not a function (map check).");
#endif
EmitPush(rbp);
__ movq(rbp, rsp);
// Store the context in the frame. The context is kept in rsi and a
// copy is stored in the frame. The external reference to rsi
// remains.
EmitPush(rsi);
// Store the function in the frame. The frame owns the register
// reference now (ie, it can keep it in rdi or spill it later).
Push(rdi);
// SyncElementAt(element_count() - 1);
cgen()->allocator()->Unuse(rdi);
}
void VirtualFrame::Exit() {
Comment cmnt(masm(), "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
// Avoid using the leave instruction here, because it is too
// short. We need the return sequence to be a least the size of a
// call instruction to support patching the exit code in the
// debugger. See VisitReturnStatement for the full return sequence.
// TODO(X64): A patched call will be very long now. Make sure we
// have enough room.
__ movq(rsp, rbp);
stack_pointer_ = frame_pointer();
for (int i = element_count() - 1; i > stack_pointer_; i--) {
FrameElement last = elements_.RemoveLast();
if (last.is_register()) {
Unuse(last.reg());
}
}
EmitPop(rbp);
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(reg);
}
void VirtualFrame::EmitPop(const Operand& operand) {
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
elements_.RemoveLast();
__ pop(operand);
}
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(reg);
}
void VirtualFrame::EmitPush(const Operand& operand) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(operand);
}
void VirtualFrame::EmitPush(Immediate immediate) {
ASSERT(stack_pointer_ == element_count() - 1);
elements_.Add(FrameElement::MemoryElement());
stack_pointer_++;
__ push(immediate);
}
void VirtualFrame::Drop(int a) {
UNIMPLEMENTED();
}
int VirtualFrame::InvalidateFrameSlotAt(int a) {
UNIMPLEMENTED();
return -1;
}
void VirtualFrame::MergeTo(VirtualFrame* a) {
UNIMPLEMENTED();
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
return Result(NULL);
}
Result VirtualFrame::RawCallStub(CodeStub* a) {
UNIMPLEMENTED();
return Result(NULL);
}
void VirtualFrame::SyncElementBelowStackPointer(int a) {
UNIMPLEMENTED();
}
void VirtualFrame::SyncElementByPushing(int a) {
UNIMPLEMENTED();
}
void VirtualFrame::SyncRange(int a, int b) {
UNIMPLEMENTED();
}
#undef __
} } // namespace v8::internal

4
deps/v8/src/x64/virtual-frame-x64.h

@ -372,12 +372,12 @@ class VirtualFrame : public ZoneObject {
// Pop and save an element from the top of the expression stack and // Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction. // emit a corresponding pop instruction.
void EmitPop(Register reg); void EmitPop(Register reg);
void EmitPop(Operand operand); void EmitPop(const Operand& operand);
// Push an element on top of the expression stack and emit a // Push an element on top of the expression stack and emit a
// corresponding push instruction. // corresponding push instruction.
void EmitPush(Register reg); void EmitPush(Register reg);
void EmitPush(Operand operand); void EmitPush(const Operand& operand);
void EmitPush(Immediate immediate); void EmitPush(Immediate immediate);
// Push an element on the virtual frame. // Push an element on the virtual frame.

75
deps/v8/test/cctest/test-api.cc

@ -551,6 +551,7 @@ THREADED_TEST(UsingExternalString) {
CHECK(isymbol->IsSymbol()); CHECK(isymbol->IsSymbol());
} }
i::Heap::CollectAllGarbage(); i::Heap::CollectAllGarbage();
i::Heap::CollectAllGarbage();
} }
@ -568,6 +569,7 @@ THREADED_TEST(UsingExternalAsciiString) {
CHECK(isymbol->IsSymbol()); CHECK(isymbol->IsSymbol());
} }
i::Heap::CollectAllGarbage(); i::Heap::CollectAllGarbage();
i::Heap::CollectAllGarbage();
} }
@ -2281,7 +2283,7 @@ static v8::Handle<Value> XPropertyGetter(Local<String> property,
} }
THREADED_TEST(NamedInterceporPropertyRead) { THREADED_TEST(NamedInterceptorPropertyRead) {
v8::HandleScope scope; v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New(); Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(XPropertyGetter); templ->SetNamedPropertyHandler(XPropertyGetter);
@ -2294,6 +2296,58 @@ THREADED_TEST(NamedInterceporPropertyRead) {
} }
} }
static v8::Handle<Value> IndexedPropertyGetter(uint32_t index,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
if (index == 37) {
return v8::Handle<Value>(v8_num(625));
}
return v8::Handle<Value>();
}
static v8::Handle<Value> IndexedPropertySetter(uint32_t index,
Local<Value> value,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
if (index == 39) {
return value;
}
return v8::Handle<Value>();
}
THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
v8::HandleScope scope;
Local<ObjectTemplate> templ = ObjectTemplate::New();
templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
IndexedPropertySetter);
LocalContext context;
context->Global()->Set(v8_str("obj"), templ->NewInstance());
Local<Script> getter_script = Script::Compile(v8_str(
"obj.__defineGetter__(\"3\", function(){return 5;});obj[3];"));
Local<Script> setter_script = Script::Compile(v8_str(
"obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
"obj[17] = 23;"
"obj.foo;"));
Local<Script> interceptor_setter_script = Script::Compile(v8_str(
"obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
"obj[39] = 47;"
"obj.foo;")); // This setter should not run, due to the interceptor.
Local<Script> interceptor_getter_script = Script::Compile(v8_str(
"obj[37];"));
Local<Value> result = getter_script->Run();
CHECK_EQ(v8_num(5), result);
result = setter_script->Run();
CHECK_EQ(v8_num(23), result);
result = interceptor_setter_script->Run();
CHECK_EQ(v8_num(23), result);
result = interceptor_getter_script->Run();
CHECK_EQ(v8_num(625), result);
}
THREADED_TEST(MultiContexts) { THREADED_TEST(MultiContexts) {
v8::HandleScope scope; v8::HandleScope scope;
v8::Handle<ObjectTemplate> templ = ObjectTemplate::New(); v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@ -2742,14 +2796,17 @@ static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
CHECK_EQ(v8::Undefined(), message->GetScriptResourceName()); CHECK_EQ(v8::Undefined(), message->GetScriptResourceName());
message->GetLineNumber(); message->GetLineNumber();
message->GetSourceLine(); message->GetSourceLine();
message_received = true;
} }
THREADED_TEST(ErrorWithMissingScriptInfo) { THREADED_TEST(ErrorWithMissingScriptInfo) {
message_received = false;
v8::HandleScope scope; v8::HandleScope scope;
LocalContext context; LocalContext context;
v8::V8::AddMessageListener(MissingScriptInfoMessageListener); v8::V8::AddMessageListener(MissingScriptInfoMessageListener);
Script::Compile(v8_str("throw Error()"))->Run(); Script::Compile(v8_str("throw Error()"))->Run();
CHECK(message_received);
v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener); v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener);
} }
@ -5008,6 +5065,22 @@ THREADED_TEST(InterceptorStoreIC) {
} }
THREADED_TEST(InterceptorStoreICWithNoSetter) {
v8::HandleScope scope;
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
LocalContext context;
context->Global()->Set(v8_str("o"), templ->NewInstance());
v8::Handle<Value> value = CompileRun(
"for (var i = 0; i < 1000; i++) {"
" o.y = 239;"
"}"
"42 + o.y");
CHECK_EQ(239 + 42, value->Int32Value());
}
v8::Handle<Value> call_ic_function; v8::Handle<Value> call_ic_function;
v8::Handle<Value> call_ic_function2; v8::Handle<Value> call_ic_function2;

15
deps/v8/test/cctest/test-assembler-x64.cc

@ -132,7 +132,7 @@ TEST(AssemblerX64ArithmeticOperations) {
// Assemble a simple function that copies argument 2 and returns it. // Assemble a simple function that copies argument 2 and returns it.
__ movq(rax, rsi); __ movq(rax, rsi);
__ add(rax, rdi); __ addq(rax, rdi);
__ ret(0); __ ret(0);
CodeDesc desc; CodeDesc desc;
@ -215,12 +215,12 @@ TEST(AssemblerX64LoopImmediates) {
Label Loop1_body; Label Loop1_body;
__ jmp(&Loop1_test); __ jmp(&Loop1_test);
__ bind(&Loop1_body); __ bind(&Loop1_body);
__ add(rax, Immediate(7)); __ addq(rax, Immediate(7));
__ bind(&Loop1_test); __ bind(&Loop1_test);
__ cmp(rax, Immediate(20)); __ cmpq(rax, Immediate(20));
__ j(less_equal, &Loop1_body); __ j(less_equal, &Loop1_body);
// Did the loop terminate with the expected value? // Did the loop terminate with the expected value?
__ cmp(rax, Immediate(25)); __ cmpq(rax, Immediate(25));
__ j(not_equal, &Fail); __ j(not_equal, &Fail);
Label Loop2_test; Label Loop2_test;
@ -228,12 +228,12 @@ TEST(AssemblerX64LoopImmediates) {
__ movq(rax, Immediate(0x11FEED00)); __ movq(rax, Immediate(0x11FEED00));
__ jmp(&Loop2_test); __ jmp(&Loop2_test);
__ bind(&Loop2_body); __ bind(&Loop2_body);
__ add(rax, Immediate(-0x1100)); __ addq(rax, Immediate(-0x1100));
__ bind(&Loop2_test); __ bind(&Loop2_test);
__ cmp(rax, Immediate(0x11FE8000)); __ cmpq(rax, Immediate(0x11FE8000));
__ j(greater, &Loop2_body); __ j(greater, &Loop2_body);
// Did the loop terminate with the expected value? // Did the loop terminate with the expected value?
__ cmp(rax, Immediate(0x11FE7600)); __ cmpq(rax, Immediate(0x11FE7600));
__ j(not_equal, &Fail); __ j(not_equal, &Fail);
__ movq(rax, Immediate(1)); __ movq(rax, Immediate(1));
@ -248,4 +248,5 @@ TEST(AssemblerX64LoopImmediates) {
int result = FUNCTION_CAST<F0>(buffer)(); int result = FUNCTION_CAST<F0>(buffer)();
CHECK_EQ(1, result); CHECK_EQ(1, result);
} }
#undef __ #undef __

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save