Browse Source

Upgrade v8 to 2.0

(With just one change: remove -Werror)
v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
728d8a37f4
  1. 1
      deps/v8/AUTHORS
  2. 17
      deps/v8/ChangeLog
  3. 2
      deps/v8/SConstruct
  4. 2
      deps/v8/include/v8-debug.h
  5. 26
      deps/v8/include/v8.h
  6. 1
      deps/v8/src/SConscript
  7. 9
      deps/v8/src/accessors.cc
  8. 6
      deps/v8/src/allocation.cc
  9. 54
      deps/v8/src/api.cc
  10. 9
      deps/v8/src/api.h
  11. 6
      deps/v8/src/arguments.h
  12. 2
      deps/v8/src/arm/assembler-arm-inl.h
  13. 294
      deps/v8/src/arm/assembler-arm.cc
  14. 175
      deps/v8/src/arm/assembler-arm.h
  15. 40
      deps/v8/src/arm/builtins-arm.cc
  16. 13
      deps/v8/src/arm/codegen-arm-inl.h
  17. 495
      deps/v8/src/arm/codegen-arm.cc
  18. 47
      deps/v8/src/arm/codegen-arm.h
  19. 20
      deps/v8/src/arm/constants-arm.cc
  20. 24
      deps/v8/src/arm/constants-arm.h
  21. 3
      deps/v8/src/arm/cpu-arm.cc
  22. 2
      deps/v8/src/arm/debug-arm.cc
  23. 199
      deps/v8/src/arm/disasm-arm.cc
  24. 1243
      deps/v8/src/arm/fast-codegen-arm.cc
  25. 15
      deps/v8/src/arm/frames-arm.cc
  26. 2
      deps/v8/src/arm/frames-arm.h
  27. 30
      deps/v8/src/arm/macro-assembler-arm.cc
  28. 13
      deps/v8/src/arm/macro-assembler-arm.h
  29. 12
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  30. 345
      deps/v8/src/arm/simulator-arm.cc
  31. 79
      deps/v8/src/arm/simulator-arm.h
  32. 44
      deps/v8/src/assembler.cc
  33. 21
      deps/v8/src/assembler.h
  34. 54
      deps/v8/src/ast.h
  35. 42
      deps/v8/src/bootstrapper.cc
  36. 18
      deps/v8/src/bootstrapper.h
  37. 2
      deps/v8/src/checks.cc
  38. 50
      deps/v8/src/code-stubs.cc
  39. 45
      deps/v8/src/code-stubs.h
  40. 115
      deps/v8/src/codegen.cc
  41. 31
      deps/v8/src/codegen.h
  42. 464
      deps/v8/src/compiler.cc
  43. 13
      deps/v8/src/compiler.h
  44. 31
      deps/v8/src/conversions.cc
  45. 10
      deps/v8/src/debug-agent.cc
  46. 9
      deps/v8/src/debug.cc
  47. 6
      deps/v8/src/debug.h
  48. 4
      deps/v8/src/disassembler.cc
  49. 39
      deps/v8/src/execution.cc
  50. 41
      deps/v8/src/execution.h
  51. 11
      deps/v8/src/factory.cc
  52. 9
      deps/v8/src/factory.h
  53. 396
      deps/v8/src/fast-codegen.cc
  54. 57
      deps/v8/src/fast-codegen.h
  55. 3
      deps/v8/src/flag-definitions.h
  56. 4
      deps/v8/src/flags.cc
  57. 22
      deps/v8/src/frames.cc
  58. 25
      deps/v8/src/frames.h
  59. 18
      deps/v8/src/global-handles.cc
  60. 8
      deps/v8/src/global-handles.h
  61. 18
      deps/v8/src/globals.h
  62. 61
      deps/v8/src/handles.cc
  63. 17
      deps/v8/src/handles.h
  64. 2
      deps/v8/src/heap-profiler.cc
  65. 158
      deps/v8/src/heap.cc
  66. 35
      deps/v8/src/heap.h
  67. 2
      deps/v8/src/ia32/assembler-ia32-inl.h
  68. 80
      deps/v8/src/ia32/assembler-ia32.cc
  69. 33
      deps/v8/src/ia32/assembler-ia32.h
  70. 41
      deps/v8/src/ia32/builtins-ia32.cc
  71. 338
      deps/v8/src/ia32/codegen-ia32.cc
  72. 62
      deps/v8/src/ia32/codegen-ia32.h
  73. 40
      deps/v8/src/ia32/disasm-ia32.cc
  74. 1251
      deps/v8/src/ia32/fast-codegen-ia32.cc
  75. 13
      deps/v8/src/ia32/frames-ia32.cc
  76. 2
      deps/v8/src/ia32/frames-ia32.h
  77. 102
      deps/v8/src/ia32/macro-assembler-ia32.cc
  78. 19
      deps/v8/src/ia32/macro-assembler-ia32.h
  79. 12
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  80. 4
      deps/v8/src/ia32/register-allocator-ia32.cc
  81. 9
      deps/v8/src/ia32/simulator-ia32.h
  82. 42
      deps/v8/src/ia32/stub-cache-ia32.cc
  83. 18
      deps/v8/src/ia32/virtual-frame-ia32.cc
  84. 3
      deps/v8/src/ic.cc
  85. 23
      deps/v8/src/interpreter-irregexp.cc
  86. 157
      deps/v8/src/jsregexp.cc
  87. 11
      deps/v8/src/jsregexp.h
  88. 4
      deps/v8/src/list.h
  89. 10
      deps/v8/src/log-utils.cc
  90. 7
      deps/v8/src/log-utils.h
  91. 47
      deps/v8/src/log.cc
  92. 2
      deps/v8/src/log.h
  93. 5
      deps/v8/src/macros.py
  94. 19
      deps/v8/src/mark-compact.cc
  95. 65
      deps/v8/src/mirror-delay.js
  96. 106
      deps/v8/src/mksnapshot.cc
  97. 18
      deps/v8/src/objects-debug.cc
  98. 113
      deps/v8/src/objects-inl.h
  99. 201
      deps/v8/src/objects.cc
  100. 229
      deps/v8/src/objects.h

1
deps/v8/AUTHORS

@ -19,3 +19,4 @@ Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Ryan Dahl <coldredlemur@gmail.com>
Patrick Gansterer <paroga@paroga.com>
John Jozwiak <jjozwiak@codeaurora.org>

17
deps/v8/ChangeLog

@ -1,3 +1,20 @@
2009-11-18: Version 2.0.0
Added support for VFP on ARM.
Added TryCatch::ReThrow method to the API.
Reduced the size of snapshots and improved the snapshot load time.
Improved heap profiler support.
64-bit version now supported on Windows.
Fixed a number of debugger issues.
Fixed bugs.
2009-10-29: Version 1.3.18
Reverted a change which caused crashes in RegExp replace.

2
deps/v8/SConstruct

@ -272,7 +272,7 @@ V8_EXTRA_FLAGS = {
'WARNINGFLAGS': ['/W3']
},
'arch:x64': {
'WARNINGFLAGS': ['/W2']
'WARNINGFLAGS': ['/W3']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],

2
deps/v8/include/v8-debug.h

@ -218,7 +218,7 @@ class EXPORT Debug {
/**
* Register a callback function to be called when a debug message has been
* received and is ready to be precessed. For the debug messages to be
* received and is ready to be processed. For the debug messages to be
* processed V8 needs to be entered, and in certain embedding scenarios this
* callback can be used to make sure V8 is entered for the debug message to
* be processed. Note that debug messages will only be processed if there is

26
deps/v8/include/v8.h

@ -129,8 +129,9 @@ class Data;
namespace internal {
class Object;
class Arguments;
class Object;
class Top;
}
@ -2472,6 +2473,15 @@ class V8EXPORT TryCatch {
*/
bool CanContinue() const;
/**
* Throws the exception caught by this TryCatch in a way that avoids
* it being caught again by this same TryCatch. As with ThrowException
* it is illegal to execute any JavaScript operations after calling
* ReThrow; the caller must return immediately to where the exception
* is caught.
*/
Handle<Value> ReThrow();
/**
* Returns the exception caught by this try/catch block. If no exception has
* been caught an empty handle is returned.
@ -2523,14 +2533,16 @@ class V8EXPORT TryCatch {
*/
void SetCaptureMessage(bool value);
public:
TryCatch* next_;
private:
void* next_;
void* exception_;
void* message_;
bool is_verbose_;
bool can_continue_;
bool capture_message_;
void* js_handler_;
bool is_verbose_ : 1;
bool can_continue_ : 1;
bool capture_message_ : 1;
bool rethrow_ : 1;
friend class v8::internal::Top;
};

1
deps/v8/src/SConscript

@ -264,7 +264,6 @@ def ConfigureObjectFiles():
else:
snapshot_cc = Command('snapshot.cc', [], [])
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
libraries_obj = context.ConfigureObject(env, libraries_empty_src, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]

9
deps/v8/src/accessors.cc

@ -315,7 +315,14 @@ Object* Accessors::ScriptGetLineEnds(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
InitScriptLineEnds(script);
return script->line_ends();
if (script->line_ends_js_array()->IsUndefined()) {
Handle<FixedArray> line_ends_fixed_array(
FixedArray::cast(script->line_ends_fixed_array()));
Handle<FixedArray> copy = Factory::CopyFixedArray(line_ends_fixed_array);
Handle<JSArray> js_array = Factory::NewJSArrayWithElements(copy);
script->set_line_ends_js_array(*js_array);
}
return script->line_ends_js_array();
}

6
deps/v8/src/allocation.cc

@ -80,7 +80,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = strlen(str);
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);
result[length] = '\0';
@ -88,8 +88,8 @@ char* StrDup(const char* str) {
}
char* StrNDup(const char* str, size_t n) {
size_t length = strlen(str);
char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
memcpy(result, str, length * kCharSize);

54
deps/v8/src/api.cc

@ -37,6 +37,7 @@
#include "platform.h"
#include "serialize.h"
#include "snapshot.h"
#include "utils.h"
#include "v8threads.h"
#include "version.h"
@ -1191,19 +1192,26 @@ void Script::SetData(v8::Handle<Value> data) {
v8::TryCatch::TryCatch()
: next_(i::Top::try_catch_handler()),
: next_(i::Top::try_catch_handler_address()),
exception_(i::Heap::the_hole_value()),
message_(i::Smi::FromInt(0)),
is_verbose_(false),
can_continue_(true),
capture_message_(true),
js_handler_(NULL) {
rethrow_(false) {
i::Top::RegisterTryCatchHandler(this);
}
v8::TryCatch::~TryCatch() {
i::Top::UnregisterTryCatchHandler(this);
if (rethrow_) {
v8::HandleScope scope;
v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception());
i::Top::UnregisterTryCatchHandler(this);
v8::ThrowException(exc);
} else {
i::Top::UnregisterTryCatchHandler(this);
}
}
@ -1217,6 +1225,13 @@ bool v8::TryCatch::CanContinue() const {
}
v8::Handle<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
rethrow_ = true;
return v8::Undefined();
}
v8::Local<Value> v8::TryCatch::Exception() const {
if (HasCaught()) {
// Check for out of memory exception.
@ -2032,11 +2047,11 @@ Local<String> v8::Object::ObjectProtoToString() {
Local<String> str = Utils::ToLocal(class_name);
const char* postfix = "]";
size_t prefix_len = strlen(prefix);
size_t str_len = str->Length();
size_t postfix_len = strlen(postfix);
int prefix_len = i::StrLength(prefix);
int str_len = str->Length();
int postfix_len = i::StrLength(postfix);
size_t buf_len = prefix_len + str_len + postfix_len;
int buf_len = prefix_len + str_len + postfix_len;
char* buf = i::NewArray<char>(buf_len);
// Write prefix.
@ -2621,11 +2636,8 @@ bool v8::V8::Initialize() {
if (i::V8::IsRunning()) return true;
ENTER_V8;
HandleScope scope;
if (i::Snapshot::Initialize()) {
return true;
} else {
return i::V8::Initialize(NULL);
}
if (i::Snapshot::Initialize()) return true;
return i::V8::Initialize(NULL);
}
@ -2950,7 +2962,7 @@ Local<String> v8::String::New(const char* data, int length) {
LOG_API("String::New(char)");
if (length == 0) return Empty();
ENTER_V8;
if (length == -1) length = strlen(data);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
return Utils::ToLocal(result);
@ -2973,7 +2985,7 @@ Local<String> v8::String::NewUndetectable(const char* data, int length) {
EnsureInitialized("v8::String::NewUndetectable()");
LOG_API("String::NewUndetectable(char)");
ENTER_V8;
if (length == -1) length = strlen(data);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length));
result->MarkAsUndetectable();
@ -3041,7 +3053,8 @@ static void DisposeExternalString(v8::Persistent<v8::Value> obj,
v8::String::ExternalStringResource* resource =
reinterpret_cast<v8::String::ExternalStringResource*>(parameter);
if (resource != NULL) {
const size_t total_size = resource->length() * sizeof(*resource->data());
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@ -3071,7 +3084,8 @@ static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj,
v8::String::ExternalAsciiStringResource* resource =
reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter);
if (resource != NULL) {
const size_t total_size = resource->length() * sizeof(*resource->data());
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Decrement(total_size);
// The object will continue to live in the JavaScript heap until the
@ -3093,7 +3107,8 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
const size_t total_size = resource->length() * sizeof(*resource->data());
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@ -3128,7 +3143,8 @@ Local<String> v8::String::NewExternal(
EnsureInitialized("v8::String::NewExternal()");
LOG_API("String::NewExternal");
ENTER_V8;
const size_t total_size = resource->length() * sizeof(*resource->data());
const int total_size =
static_cast<int>(resource->length() * sizeof(*resource->data()));
i::Counters::total_external_string_memory.Increment(total_size);
i::Handle<i::String> result = NewExternalAsciiStringHandle(resource);
i::Handle<i::Object> handle = i::GlobalHandles::Create(*result);
@ -3250,7 +3266,7 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)");
ENTER_V8;
if (length == -1) length = strlen(data);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> result =
i::Factory::LookupSymbol(i::Vector<const char>(data, length));
return Utils::ToLocal(result);

9
deps/v8/src/api.h

@ -125,6 +125,15 @@ static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
}
class ApiFunction {
public:
explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
v8::internal::Address address() { return addr_; }
private:
v8::internal::Address addr_;
};
v8::Arguments::Arguments(v8::Local<v8::Value> data,
v8::Local<v8::Object> holder,
v8::Local<v8::Function> callee,

6
deps/v8/src/arguments.h

@ -77,9 +77,9 @@ class Arguments BASE_EMBEDDED {
// can.
class CustomArguments : public Relocatable {
public:
inline CustomArguments(Object *data,
JSObject *self,
JSObject *holder) {
inline CustomArguments(Object* data,
JSObject* self,
JSObject* holder) {
values_[3] = self;
values_[2] = holder;
values_[1] = Smi::FromInt(0);

2
deps/v8/src/arm/assembler-arm-inl.h

@ -85,7 +85,7 @@ Object* RelocInfo::target_object() {
}
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}

294
deps/v8/src/arm/assembler-arm.cc

@ -42,6 +42,34 @@
namespace v8 {
namespace internal {
// Safe default is no features.
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::enabled_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
void CpuFeatures::Probe() {
// If the compiler is allowed to use vfp then we can use vfp too in our
// code generation.
#if !defined(__arm__)
// For the simulator=arm build, always use VFP since the arm simulator has
// VFP support.
supported_ |= 1u << VFP3;
#else
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
if (OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if
// runtime detection of VFP returns true.
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
#endif
}
// -----------------------------------------------------------------------------
// Implementation of Register and CRegister
@ -84,6 +112,57 @@ CRegister cr13 = { 13 };
CRegister cr14 = { 14 };
CRegister cr15 = { 15 };
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2".
Register s0 = { 0 };
Register s1 = { 1 };
Register s2 = { 2 };
Register s3 = { 3 };
Register s4 = { 4 };
Register s5 = { 5 };
Register s6 = { 6 };
Register s7 = { 7 };
Register s8 = { 8 };
Register s9 = { 9 };
Register s10 = { 10 };
Register s11 = { 11 };
Register s12 = { 12 };
Register s13 = { 13 };
Register s14 = { 14 };
Register s15 = { 15 };
Register s16 = { 16 };
Register s17 = { 17 };
Register s18 = { 18 };
Register s19 = { 19 };
Register s20 = { 20 };
Register s21 = { 21 };
Register s22 = { 22 };
Register s23 = { 23 };
Register s24 = { 24 };
Register s25 = { 25 };
Register s26 = { 26 };
Register s27 = { 27 };
Register s28 = { 28 };
Register s29 = { 29 };
Register s30 = { 30 };
Register s31 = { 31 };
Register d0 = { 0 };
Register d1 = { 1 };
Register d2 = { 2 };
Register d3 = { 3 };
Register d4 = { 4 };
Register d5 = { 5 };
Register d6 = { 6 };
Register d7 = { 7 };
Register d8 = { 8 };
Register d9 = { 9 };
Register d10 = { 10 };
Register d11 = { 11 };
Register d12 = { 12 };
Register d13 = { 13 };
Register d14 = { 14 };
Register d15 = { 15 };
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@ -203,10 +282,14 @@ enum {
B4 = 1 << 4,
B5 = 1 << 5,
B6 = 1 << 6,
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
B12 = 1 << 12,
B16 = 1 << 16,
B18 = 1 << 18,
B19 = 1 << 19,
B20 = 1 << 20,
B21 = 1 << 21,
B22 = 1 << 22,
@ -523,6 +606,11 @@ static bool fits_shifter(uint32_t imm32,
// encoded.
static bool MustUseIp(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
return Serializer::enabled();
} else if (rmode == RelocInfo::NONE) {
return false;
@ -1282,6 +1370,187 @@ void Assembler::stc2(Coprocessor coproc,
}
// Support for VFP.
void Assembler::fmdrr(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src1.is(pc) && !src2.is(pc));
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
src1.code()*B12 | 0xB*B8 | B4 | dst.code());
}
void Assembler::fmrrd(const Register dst1,
const Register dst2,
const Register src,
const SBit s,
const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406A, A8-646.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
dst1.code()*B12 | 0xB*B8 | B4 | src.code());
}
void Assembler::fmsr(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc));
emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
}
void Assembler::fmrs(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc));
emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
}
void Assembler::fsitod(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
(0x1 & src.code())*B5 | (src.code() >> 1));
}
void Assembler::ftosid(const Register dst,
const Register src,
const SBit s,
const Condition cond) {
// Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
// Instruction details available in ARM DDI 0406A, A8-576.
// cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
// Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
0x5*B9 | B8 | B7 | B6 | src.code());
}
void Assembler::faddd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = faddd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-536.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
void Assembler::fsubd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fsubd(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
void Assembler::fmuld(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fmuld(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
void Assembler::fdivd(const Register dst,
const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// Dd = fdivd(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406A, A8-584.
// cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
dst.code()*B12 | 0x5*B9 | B8 | src2.code());
}
void Assembler::fcmp(const Register src1,
const Register src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
}
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
// Pseudo instructions
void Assembler::lea(Register dst,
const MemOperand& x,
@ -1311,6 +1580,18 @@ void Assembler::lea(Register dst,
}
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
return fits_shifter(imm32, &dummy1, &dummy2, NULL);
}
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging
void Assembler::RecordJSReturn() {
WriteRecordedPositions();
@ -1429,10 +1710,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!Serializer::enabled() &&
!FLAG_debug_code) {
return;
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
if (!Serializer::enabled() && !FLAG_debug_code) {
return;
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);

175
deps/v8/src/arm/assembler-arm.h

@ -41,6 +41,7 @@
#define V8_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include "assembler.h"
#include "serialize.h"
namespace v8 {
namespace internal {
@ -102,6 +103,57 @@ extern Register sp;
extern Register lr;
extern Register pc;
// Support for VFP registers s0 to s32 (d0 to d16).
// Note that "sN:sM" is the same as "dN/2".
extern Register s0;
extern Register s1;
extern Register s2;
extern Register s3;
extern Register s4;
extern Register s5;
extern Register s6;
extern Register s7;
extern Register s8;
extern Register s9;
extern Register s10;
extern Register s11;
extern Register s12;
extern Register s13;
extern Register s14;
extern Register s15;
extern Register s16;
extern Register s17;
extern Register s18;
extern Register s19;
extern Register s20;
extern Register s21;
extern Register s22;
extern Register s23;
extern Register s24;
extern Register s25;
extern Register s26;
extern Register s27;
extern Register s28;
extern Register s29;
extern Register s30;
extern Register s31;
extern Register d0;
extern Register d1;
extern Register d2;
extern Register d3;
extern Register d4;
extern Register d5;
extern Register d6;
extern Register d7;
extern Register d8;
extern Register d9;
extern Register d10;
extern Register d11;
extern Register d12;
extern Register d13;
extern Register d14;
extern Register d15;
// Coprocessor register
struct CRegister {
@ -372,6 +424,51 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
return (enabled_ & (1u << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f) {
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(found_by_runtime_probing_ & (1u << f)) == 0);
old_enabled_ = CpuFeatures::enabled_;
CpuFeatures::enabled_ |= 1u << f;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
private:
static unsigned supported_;
static unsigned enabled_;
static unsigned found_by_runtime_probing_;
};
typedef int32_t Instr;
@ -437,13 +534,22 @@ class Assembler : public Malloced {
INLINE(static Address target_address_at(Address pc));
INLINE(static void set_target_address_at(Address pc, Address target));
// Modify the code target address in a constant pool entry.
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void set_target_at(Address constant_pool_entry, Address target);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address constant_pool_entry,
Address target) {
set_target_at(constant_pool_entry, target);
}
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
static const int kCallTargetSize = kPointerSize;
static const int kExternalTargetSize = kPointerSize;
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@ -646,6 +752,66 @@ class Assembler : public Malloced {
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
// However, some simple modifications can allow
// these APIs to support D16 to D31.
void fmdrr(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fmrrd(const Register dst1,
const Register dst2,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void fmsr(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void fmrs(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void fsitod(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void ftosid(const Register dst,
const Register src,
const SBit s = LeaveCC,
const Condition cond = al);
void faddd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fsubd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fmuld(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fdivd(const Register dst,
const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void fcmp(const Register src1,
const Register src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);
// Pseudo instructions
void nop() { mov(r0, Operand(r0)); }
@ -673,6 +839,13 @@ class Assembler : public Malloced {
return (pc_offset() - l->pos()) / kInstrSize;
}
// Check whether an immediate fits an addressing mode 1 instruction.
bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Debugging
// Mark address of the ExitJSFrame code.

40
deps/v8/src/arm/builtins-arm.cc

@ -284,7 +284,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label *call_generic_code) {
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@ -1029,44 +1029,24 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
Label no_preemption, retry_preemption;
__ bind(&retry_preemption);
ExternalReference stack_guard_limit_address =
ExternalReference::address_of_stack_guard_limit();
__ mov(r2, Operand(stack_guard_limit_address));
__ ldr(r2, MemOperand(r2));
__ cmp(sp, r2);
__ b(hi, &no_preemption);
// We have encountered a preemption or stack overflow already before we push
// the array contents. Save r0 which is the Smi-tagged length of the array.
__ push(r0);
// Runtime routines expect at least one argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ CallRuntime(Runtime::kStackGuard, 1);
// Since we returned, it wasn't a stack overflow. Restore r0 and try again.
__ pop(r0);
__ b(&retry_preemption);
__ bind(&no_preemption);
// Eagerly check for stack-overflow before starting to push the arguments.
// r0: number of arguments.
// r2: stack limit.
// Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
// Make r2 the space we have left. The stack might already be overflowed
// here which will cause r2 to become negative.
__ sub(r2, sp, r2);
// Check if the arguments will overflow the stack.
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ b(hi, &okay);
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
// End of stack check.
// Push current limit and index.
__ bind(&okay);

13
deps/v8/src/arm/codegen-arm-inl.h

@ -35,18 +35,15 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control) {
LoadCondition(expression, typeof_state, true_target, false_target,
force_control);
LoadCondition(expression, true_target, false_target, force_control);
}
void CodeGenerator::LoadAndSpill(Expression* expression,
TypeofState typeof_state) {
Load(expression, typeof_state);
void CodeGenerator::LoadAndSpill(Expression* expression) {
Load(expression);
}
@ -60,8 +57,8 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
}
void Reference::GetValueAndSpill(TypeofState typeof_state) {
GetValue(typeof_state);
void Reference::GetValueAndSpill() {
GetValue();
}

495
deps/v8/src/arm/codegen-arm.cc

@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "parser.h"
#include "register-allocator-inl.h"
@ -92,7 +93,6 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@ -101,11 +101,9 @@ CodeGenState::CodeGenState(CodeGenerator* owner)
CodeGenState::CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target)
: owner_(owner),
typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@ -144,6 +142,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
// cp: callee's context
void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Record the position for debugging purposes.
CodeForFunctionPosition(fun);
ZoneList<Statement*>* body = fun->body();
// Initialize state.
@ -322,18 +323,32 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
Label check_exit_codesize;
masm_->bind(&check_exit_codesize);
// Calculate the exact length of the return sequence and make sure that
// the constant pool is not emitted inside of the return sequence.
int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize;
int return_sequence_length = Debug::kARMJSReturnSequenceLength;
if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) {
// Additional mov instruction generated.
return_sequence_length++;
}
masm_->BlockConstPoolFor(return_sequence_length);
// Tear down the frame which will restore the caller's frame pointer and
// the link register.
frame_->Exit();
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
masm_->add(sp, sp, Operand(sp_delta));
masm_->Jump(lr);
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(kJSReturnSequenceLength,
// expected by the debugger. The add instruction above is an addressing
// mode 1 instruction where there are restrictions on which immediate values
// can be encoded in the instruction and which immediate values requires
// use of an additional instruction for moving the immediate to a temporary
// register.
ASSERT_EQ(return_sequence_length,
masm_->InstructionsGeneratedSince(&check_exit_codesize));
}
@ -442,14 +457,13 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
// register was set, has_cc() is true and cc_reg_ contains the condition to
// test for 'true'.
void CodeGenerator::LoadCondition(Expression* x,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
ASSERT(!has_cc());
int original_height = frame_->height();
{ CodeGenState new_state(this, typeof_state, true_target, false_target);
{ CodeGenState new_state(this, true_target, false_target);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@ -479,13 +493,13 @@ void CodeGenerator::LoadCondition(Expression* x,
}
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
JumpTarget true_target;
JumpTarget false_target;
LoadCondition(x, typeof_state, &true_target, &false_target, false);
LoadCondition(expr, &true_target, &false_target, false);
if (has_cc()) {
// Convert cc_reg_ into a boolean value.
@ -552,24 +566,27 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) {
}
// TODO(1241834): Get rid of this function in favor of just using Load, now
// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
// variables w/o reference errors elsewhere.
void CodeGenerator::LoadTypeofExpression(Expression* x) {
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
VirtualFrame::SpilledScope spilled_scope;
Variable* variable = x->AsVariableProxy()->AsVariable();
Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
// the variable as if through '<global>.<variable>' to make sure we
// do not get reference errors.
// For a global variable we build the property reference
// <global>.<variable> and perform a (regular non-contextual) property
// load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
// TODO(1241834): Fetch the position from the variable instead of using
// no position.
Property property(&global, &key, RelocInfo::kNoPosition);
LoadAndSpill(&property);
Reference ref(this, &property);
ref.GetValueAndSpill();
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
LoadFromSlot(variable->slot(), INSIDE_TYPEOF);
frame_->SpillAll();
} else {
LoadAndSpill(x, INSIDE_TYPEOF);
// Anything else can be handled normally.
LoadAndSpill(expr);
}
}
@ -1066,27 +1083,6 @@ void CodeGenerator::Comparison(Condition cc,
}
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) {}
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
#endif // defined(DEBUG)
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
// Call the function on the stack with the given arguments.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
@ -1297,8 +1293,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
@ -1321,8 +1316,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &exit, true);
LoadConditionAndSpill(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
@ -1337,8 +1331,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&exit, &else_, true);
LoadConditionAndSpill(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
@ -1352,8 +1345,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&exit, &exit, false);
LoadConditionAndSpill(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@ -1591,8 +1583,9 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
Comment cmnt(masm_, "[ DoWhileCondition");
CodeForDoWhileConditionPosition(node);
LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@ -1631,8 +1624,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (info == DONT_KNOW) {
JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@ -1691,8 +1683,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
&body, node->break_target(), true);
LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@ -2270,7 +2261,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = BuildBoilerplate(node);
Handle<JSFunction> boilerplate =
Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) {
ASSERT(frame_->height() == original_height);
@ -2301,20 +2293,19 @@ void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
&then, &else_, true);
LoadConditionAndSpill(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
LoadAndSpill(node->then_expression(), typeof_state());
LoadAndSpill(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
LoadAndSpill(node->else_expression(), typeof_state());
LoadAndSpill(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT(frame_->height() == original_height + 1);
@ -2381,10 +2372,6 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
frame_->EmitPush(r0);
} else {
// Note: We would like to keep the assert below, but it fires because of
// some nasty code in LoadTypeofExpression() which should be removed...
// ASSERT(!slot->var()->is_dynamic());
// Special handling for locals allocated in registers.
__ ldr(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
@ -2479,7 +2466,7 @@ void CodeGenerator::VisitSlot(Slot* node) {
#endif
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state());
LoadFromSlot(node, NOT_INSIDE_TYPEOF);
ASSERT(frame_->height() == original_height + 1);
}
@ -2498,7 +2485,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
} else {
ASSERT(var->is_global());
Reference ref(this, node);
ref.GetValueAndSpill(typeof_state());
ref.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@ -2834,7 +2821,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
target.GetValueAndSpill();
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
@ -2899,7 +2886,7 @@ void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
{ Reference property(this, node);
property.GetValueAndSpill(typeof_state());
property.GetValueAndSpill();
}
ASSERT(frame_->height() == original_height + 1);
}
@ -3069,7 +3056,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function to call from the property through a reference.
Reference ref(this, property);
ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
ref.GetValueAndSpill(); // receiver
// Pass receiver to called function.
if (property->is_synthetic()) {
@ -3299,7 +3286,82 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(args->length() == 2);
Comment(masm_, "[ GenerateFastCharCodeAt");
LoadAndSpill(args->at(0));
LoadAndSpill(args->at(1));
frame_->EmitPop(r0); // Index.
frame_->EmitPop(r1); // String.
Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string;
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow); // The 'string' was a Smi.
ASSERT(kSmiTag == 0);
__ tst(r0, Operand(kSmiTagMask | 0x80000000u));
__ b(ne, &slow); // The index was negative or not a Smi.
__ bind(&try_again_with_new_string);
__ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &slow);
// Now r2 has the string type.
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
__ and_(r4, r2, Operand(kStringSizeMask));
__ add(r4, r4, Operand(String::kLongLengthShift));
__ mov(r3, Operand(r3, LSR, r4));
// Now r3 has the length of the string. Compare with the index.
__ cmp(r3, Operand(r0, LSR, kSmiTagSize));
__ b(le, &slow);
// Here we know the index is in range. Check that string is sequential.
ASSERT_EQ(0, kSeqStringTag);
__ tst(r2, Operand(kStringRepresentationMask));
__ b(ne, &not_a_flat_string);
// Check whether it is an ASCII string.
ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r2, Operand(kStringEncodingMask));
__ b(ne, &ascii_string);
// 2-byte string. We can add without shifting since the Smi tag size is the
// log2 of the number of bytes in a two-byte character.
ASSERT_EQ(1, kSmiTagSize);
ASSERT_EQ(0, kSmiShiftSize);
__ add(r1, r1, Operand(r0));
__ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ jmp(&end);
__ bind(&ascii_string);
__ add(r1, r1, Operand(r0, LSR, kSmiTagSize));
__ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
__ jmp(&end);
__ bind(&not_a_flat_string);
__ and_(r2, r2, Operand(kStringRepresentationMask));
__ cmp(r2, Operand(kConsStringTag));
__ b(ne, &slow);
// ConsString.
// Check that the right hand side is the empty string (ie if this is really a
// flat string in a cons string). If that is not the case we would rather go
// to the runtime system now, to flatten the string.
__ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset));
__ LoadRoot(r3, Heap::kEmptyStringRootIndex);
__ cmp(r2, Operand(r3));
__ b(ne, &slow);
// Get the first of the two strings.
__ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
__ bind(&slow);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ bind(&end);
frame_->EmitPush(r0);
}
@ -3474,7 +3536,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (op == Token::NOT) {
LoadConditionAndSpill(node->expression(),
NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@ -3635,7 +3696,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
ASSERT(frame_->height() == original_height + 1);
return;
}
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
target.GetValueAndSpill();
frame_->EmitPop(r0);
JumpTarget slow;
@ -3729,7 +3790,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (op == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@ -3765,7 +3825,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
is_true.Bind();
LoadConditionAndSpill(node->right(),
NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@ -3777,7 +3836,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (op == Token::OR) {
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@ -3813,7 +3871,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
}
is_false.Bind();
LoadConditionAndSpill(node->right(),
NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@ -3998,28 +4055,35 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} else if (check->Equals(Heap::function_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
Register map_reg = r2;
__ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE);
true_target()->Branch(eq);
// Regular expressions are callable so typeof == 'function'.
__ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE);
cc_reg_ = eq;
} else if (check->Equals(Heap::object_symbol())) {
__ tst(r1, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(r1, ip);
true_target()->Branch(eq);
Register map_reg = r2;
__ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE);
false_target()->Branch(eq);
// It can be an undetectable object.
__ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
__ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
__ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
__ cmp(r1, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(eq);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
__ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
} else {
@ -4112,7 +4176,7 @@ Handle<String> Reference::GetName() {
}
void Reference::GetValue(TypeofState typeof_state) {
void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
ASSERT(!cgen_->has_cc());
@ -4127,16 +4191,11 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlot(slot, typeof_state);
cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
// TODO(1241834): Make sure that this it is safe to ignore the
// distinction between expressions in a typeof and not in a typeof. If
// there is a chance that reference errors can be thrown below, we
// must distinguish between the two kinds of loads (typeof expression
// loads must not throw a reference error).
VirtualFrame* frame = cgen_->frame();
Comment cmnt(masm, "[ Load from named Property");
Handle<String> name(GetName());
@ -4155,9 +4214,6 @@ void Reference::GetValue(TypeofState typeof_state) {
}
case KEYED: {
// TODO(1241834): Make sure that this it is safe to ignore the
// distinction between expressions in a typeof and not in a typeof.
// TODO(181): Implement inlined version of array indexing once
// loop nesting is properly tracked on ARM.
VirtualFrame* frame = cgen_->frame();
@ -4493,7 +4549,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
@ -4566,6 +4622,22 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
if (cc != eq) {
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but (undefined <= undefined)
// == false! See ECMAScript 11.8.5.
if (cc == le || cc == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ cmp(r0, Operand(r2));
__ b(ne, &return_equal);
if (cc == le) {
__ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
} else {
__ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
}
__ mov(pc, Operand(lr)); // Return.
}
}
}
__ bind(&return_equal);
@ -4643,9 +4715,17 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a number.
__ push(lr);
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
} else {
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
}
// r3 and r2 are rhs as double.
__ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
@ -4673,9 +4753,16 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ push(lr);
__ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
} else {
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
}
__ pop(lr);
// Fall through to both_loaded_as_doubles.
}
@ -4878,9 +4965,23 @@ void CompareStub::Generate(MacroAssembler* masm) {
// fall through if neither is a NaN. Also binds rhs_not_nan.
EmitNanCheck(masm, &rhs_not_nan, cc_);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
EmitTwoNonNanDoubleComparison(masm, cc_);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
__ fcmp(d6, d7);
__ vmrs(pc);
__ mov(r0, Operand(0), LeaveCC, eq);
__ mov(r0, Operand(1), LeaveCC, lt);
__ mvn(r0, Operand(0), LeaveCC, gt);
__ mov(pc, Operand(lr));
} else {
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
EmitTwoNonNanDoubleComparison(masm, cc_);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
@ -4980,16 +5081,24 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
__ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
// Write Smi from r1 to r1 and r0 in double format. r6 is scratch.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ jmp(&do_the_call); // Tail call. No return.
// We jump to here if something goes wrong (one param is not a number of any
@ -5025,12 +5134,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r6);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub3(r3, r2, r7, r6);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ bind(&finished_loading_r0);
// Move r1 to a double in r0-r1.
@ -5050,12 +5167,19 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// We can't overwrite a Smi so get address of new heap number into r5.
AllocateHeapNumber(masm, &slow, r5, r6, r7);
}
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r6);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub4(r1, r0, r7, r6);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
__ bind(&finished_loading_r1);
__ bind(&do_the_call);
@ -5064,6 +5188,38 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
// r5: Address of heap number for result.
if (CpuFeatures::IsSupported(VFP3) &&
((Token::MUL == operation) ||
(Token::DIV == operation) ||
(Token::ADD == operation) ||
(Token::SUB == operation))) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions to implement
// double precision, add, subtract, multiply, divide.
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
if (Token::MUL == operation) {
__ fmuld(d5, d6, d7);
} else if (Token::DIV == operation) {
__ fdivd(d5, d6, d7);
} else if (Token::ADD == operation) {
__ faddd(d5, d6, d7);
} else if (Token::SUB == operation) {
__ fsubd(d5, d6, d7);
} else {
UNREACHABLE();
}
__ fmrrd(r0, r1, d5);
__ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
__ mov(r0, Operand(r5));
__ mov(pc, lr);
return;
}
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
__ AlignStack(0);
@ -5132,38 +5288,49 @@ static void GetInt32(MacroAssembler* masm,
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
// We have a shifted exponent between 0 and 30 in scratch2.
__ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
__ rsb(dest, dest, Operand(30));
if (!CpuFeatures::IsSupported(VFP3)) {
// We have a shifted exponent between 0 and 30 in scratch2.
__ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
__ rsb(dest, dest, Operand(30));
}
__ bind(&right_exponent);
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
__ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to take.
// We just orred in the implicit bit so that took care of one and we want to
// leave the sign bit 0 so we subtract 2 bits from the shift distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
__ tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. For some exponents we don't actually
// need this because the bits get shifted out again, but it's probably slower
// to test than just to do it.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
__ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
__ mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
__ rsb(dest, dest, Operand(0), LeaveCC, ne);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ fmdrr(d7, scratch2, scratch);
__ ftosid(s15, d7);
__ fmrs(dest, s15);
} else {
// Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
__ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
__ tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
__ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
__ mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
__ rsb(dest, dest, Operand(0), LeaveCC, ne);
}
__ bind(&done);
}
// For bitwise ops where the inputs are not both Smis we here try to determine
// whether both inputs are either Smis or at least heap numbers that can be
// represented by a 32 bit signed value. We truncate towards zero as required
@ -5180,7 +5347,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
GetInt32(masm, r1, r3, r4, r5, &slow);
GetInt32(masm, r1, r3, r5, r4, &slow);
__ jmp(&done_checking_r1);
__ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1));
@ -5190,7 +5357,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow);
GetInt32(masm, r0, r2, r4, r5, &slow);
GetInt32(masm, r0, r2, r5, r4, &slow);
__ jmp(&done_checking_r0);
__ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1));
@ -5795,7 +5962,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
ExitFrame::Mode mode,
bool do_gc,
bool always_allocate) {
// r0: result parameter for PerformGC, if any
@ -5855,7 +6022,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
__ LeaveExitFrame(frame_type);
__ LeaveExitFrame(mode);
// check if we should retry or throw exception
Label retry;
@ -5901,12 +6068,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// this by performing a garbage collection and retrying the
// builtin once.
StackFrame::Type frame_type = is_debug_break
? StackFrame::EXIT_DEBUG
: StackFrame::EXIT;
ExitFrame::Mode mode = is_debug_break
? ExitFrame::MODE_DEBUG
: ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type);
__ EnterExitFrame(mode);
// r4: number of arguments (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@ -5921,7 +6088,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
false,
false);
@ -5930,7 +6097,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
true,
false);
@ -5941,7 +6108,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
true,
true);

47
deps/v8/src/arm/codegen-arm.h

@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED {
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
void GetValue();
// Generate code to push the value of a reference on top of the expression
// stack and then spill the stack frame. This function is used temporarily
// while the code generator is being transformed.
inline void GetValueAndSpill(TypeofState typeof_state);
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@ -112,10 +112,8 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state has its own typeof state and pair of branch
// labels.
// state. The new state has its own pair of branch labels.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target);
@ -123,13 +121,11 @@ class CodeGenState BASE_EMBEDDED {
// previous state.
~CodeGenState();
TypeofState typeof_state() const { return typeof_state_; }
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
private:
CodeGenerator* owner_;
TypeofState typeof_state_;
JumpTarget* true_target_;
JumpTarget* false_target_;
CodeGenState* previous_;
@ -169,8 +165,8 @@ class CodeGenerator: public AstVisitor {
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@ -191,10 +187,6 @@ class CodeGenerator: public AstVisitor {
static const int kUnknownIntValue = -1;
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceLength = 4;
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
@ -210,7 +202,6 @@ class CodeGenerator: public AstVisitor {
// State
bool has_cc() const { return cc_reg_ != al; }
TypeofState typeof_state() const { return state_->typeof_state(); }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
@ -259,25 +250,22 @@ class CodeGenerator: public AstVisitor {
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
inline void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
inline void LoadAndSpill(Expression* expression);
// Call LoadCondition and then spill the virtual frame unless control flow
// cannot reach the end of the expression (ie, by emitting only
// unconditional jumps to the control targets).
inline void LoadConditionAndSpill(Expression* expression,
TypeofState typeof_state,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_control);
@ -331,7 +319,6 @@ class CodeGenerator: public AstVisitor {
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@ -391,6 +378,7 @@ class CodeGenerator: public AstVisitor {
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@ -433,6 +421,27 @@ class CodeGenerator: public AstVisitor {
};
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) {}
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
#endif // defined(DEBUG)
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,

20
deps/v8/src/arm/constants-arm.cc

@ -67,6 +67,26 @@ const char* Registers::Name(int reg) {
}
// Support for VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2"
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
};
const char* VFPRegisters::Name(int reg) {
ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg];
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {

24
deps/v8/src/arm/constants-arm.h

@ -75,6 +75,9 @@ namespace arm {
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
// VFP support.
static const int kNumVFPRegisters = 48;
// PC is register 15.
static const int kPCRegister = 15;
static const int kNoRegister = -1;
@ -231,6 +234,16 @@ class Instr {
inline int RnField() const { return Bits(19, 16); }
inline int RdField() const { return Bits(15, 12); }
// Support for VFP.
// Vn(19-16) | Vd(15-12) | Vm(3-0)
inline int VnField() const { return Bits(19, 16); }
inline int VmField() const { return Bits(3, 0); }
inline int VdField() const { return Bits(15, 12); }
inline int NField() const { return Bit(7); }
inline int MField() const { return Bit(5); }
inline int DField() const { return Bit(22); }
inline int RtField() const { return Bits(15, 12); }
// Fields used in Data processing instructions
inline Opcode OpcodeField() const {
return static_cast<Opcode>(Bits(24, 21));
@ -307,7 +320,7 @@ class Registers {
struct RegisterAlias {
int reg;
const char *name;
const char* name;
};
private:
@ -315,6 +328,15 @@ class Registers {
static const RegisterAlias aliases_[];
};
// Helper functions for converting between VFP register numbers and names.
class VFPRegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
private:
static const char* names_[kNumVFPRegisters];
};
} } // namespace assembler::arm

3
deps/v8/src/arm/cpu-arm.cc

@ -33,12 +33,13 @@
#include "v8.h"
#include "cpu.h"
#include "macro-assembler.h"
namespace v8 {
namespace internal {
void CPU::Setup() {
// Nothing to do.
CpuFeatures::Probe();
}

2
deps/v8/src/arm/debug-arm.cc

@ -61,7 +61,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// Restore the JS frame exit code.
void BreakLocationIterator::ClearDebugBreakAtReturn() {
rinfo()->PatchCode(original_rinfo()->pc(),
CodeGenerator::kJSReturnSequenceLength);
Debug::kARMJSReturnSequenceLength);
}

199
deps/v8/src/arm/disasm-arm.cc

@ -97,6 +97,10 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
void PrintShiftImm(Instr* instr);
@ -121,6 +125,10 @@ class Decoder {
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
// For VFP support.
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@ -171,6 +179,16 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
// Print the VFP S register name according to the active name converter.
void Decoder::PrintSRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg));
}
// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(assembler::arm::VFPRegisters::Name(reg + 32));
}
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
@ -290,6 +308,10 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
int reg = instr->RmField();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: Rt register
int reg = instr->RtField();
PrintRegister(reg);
return 2;
} else if (format[1] == 'l') {
// 'rlist: register list for load and store multiple instructions
ASSERT(STRING_STARTS_WITH(format, "rlist"));
@ -315,6 +337,39 @@ int Decoder::FormatRegister(Instr* instr, const char* format) {
}
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
if (format[1] == 'n') {
int reg = instr->VnField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'm') {
int reg = instr->VmField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'd') {
int reg = instr->VdField();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
}
UNREACHABLE();
return -1;
}
int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
Print(format);
return 0;
}
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@ -459,6 +514,13 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
}
return 1;
}
case 'v': {
return FormatVFPinstruction(instr, format);
}
case 'S':
case 'D': {
return FormatVFPRegister(instr, format);
}
case 'w': { // 'w: W field of load and store instructions
if (instr->HasW()) {
Print("!");
@ -761,8 +823,7 @@ void Decoder::DecodeType5(Instr* instr) {
void Decoder::DecodeType6(Instr* instr) {
// Coprocessor instructions currently not supported.
Unknown(instr);
DecodeType6CoprocessorIns(instr);
}
@ -770,12 +831,10 @@ void Decoder::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
Format(instr, "swi'cond 'swi");
} else {
// Coprocessor instructions currently not supported.
Unknown(instr);
DecodeTypeVFP(instr);
}
}
void Decoder::DecodeUnconditional(Instr* instr) {
if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
Format(instr, "'memop'h'pu 'rd, ");
@ -837,6 +896,136 @@ void Decoder::DecodeUnconditional(Instr* instr) {
}
// void Decoder::DecodeTypeVFP(Instr* instr)
// Implements the following VFP instructions:
// fmsr: Sn = Rt
// fmrs: Rt = Sn
// fsitod: Dd = Sm
// ftosid: Sd = Dm
// Dd = faddd(Dn, Dm)
// Dd = fsubd(Dn, Dm)
// Dd = fmuld(Dn, Dm)
// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Decoder::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x5) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
} else if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(7) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
} else if ((instr->Bit(21) == 0x0) &&
(instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bits(21, 20) == 0x3) &&
(instr->Bits(19, 16) == 0x4) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0x1) &&
(instr->Bit(4) == 0x0)) {
Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
} else if ((instr->Bits(23, 20) == 0xF) &&
(instr->Bits(19, 16) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(7, 5) == 0x0) &&
(instr->Bit(4) == 0x1) &&
(instr->Bits(3, 0) == 0x0)) {
if (instr->Bits(15, 12) == 0xF)
Format(instr, "vmrs'cond APSR, FPSCR");
else
Unknown(instr); // Not used by V8.
} else {
Unknown(instr); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
} else if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else {
if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'Sn, 'rt");
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'rt, 'Sn");
} else {
Unknown(instr); // Not used by V8.
}
}
}
// Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
if (instr->Bit(23) == 1) {
Unknown(instr); // Not used by V8.
} else if (instr->Bit(22) == 1) {
if ((instr->Bits(27, 24) == 0xC) &&
(instr->Bit(22) == 1) &&
(instr->Bits(11, 8) == 0xB) &&
(instr->Bits(7, 6) == 0x0) &&
(instr->Bit(4) == 1)) {
if (instr->Bit(20) == 0) {
Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
} else if (instr->Bit(20) == 1) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
Unknown(instr); // Not used by V8.
} else {
Unknown(instr); // Not used by V8.
}
}
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);

1243
deps/v8/src/arm/fast-codegen-arm.cc

File diff suppressed because it is too large

15
deps/v8/src/arm/frames-arm.cc

@ -54,23 +54,24 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement;
Type type;
if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
type = EXIT_DEBUG;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
} else {
type = EXIT;
}
// Fill in the state.
state->sp = sp;
state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
return type;
return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Do nothing
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}

2
deps/v8/src/arm/frames-arm.h

@ -100,7 +100,7 @@ class ExitFrameConstants : public AllStatic {
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize;
static const int kCodeOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize;

30
deps/v8/src/arm/macro-assembler-arm.cc

@ -274,9 +274,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
}
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@ -298,8 +296,11 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
mov(fp, Operand(sp)); // setup new frame pointer
// Push debug marker.
mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
if (mode == ExitFrame::MODE_DEBUG) {
mov(ip, Operand(Smi::FromInt(0)));
} else {
mov(ip, Operand(CodeObject()));
}
push(ip);
// Save the frame pointer and the context in top.
@ -316,7 +317,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
@ -348,14 +349,14 @@ void MacroAssembler::AlignStack(int offset) {
}
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
@ -975,6 +976,17 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
}
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg,
Register outLowReg) {
// ARMv7 VFP3 instructions to implement integer to double conversion.
mov(r7, Operand(inReg, ASR, kSmiTagSize));
fmsr(s15, r7);
fsitod(d7, s15);
fmrrd(outLowReg, outHighReg, d7);
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.

13
deps/v8/src/arm/macro-assembler-arm.h

@ -87,14 +87,14 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register r0 and
// Enter specific kind of exit frame; either normal or debug mode.
// Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5.
void EnterExitFrame(StackFrame::Type type);
void EnterExitFrame(ExitFrame::Mode mode);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(StackFrame::Type type);
void LeaveExitFrame(ExitFrame::Mode mode);
// Align the stack by optionally pushing a Smi zero.
void AlignStack(int offset);
@ -240,6 +240,11 @@ class MacroAssembler: public Assembler {
// occurred.
void IllegalOperation(int num_arguments);
// Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg,
Register outLowReg);
// ---------------------------------------------------------------------------
// Runtime calls

12
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -588,9 +588,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_limit_hit;
Label stack_ok;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ mov(r0, Operand(stack_guard_limit));
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
// Handle it if the stack pointer is already below the stack limit.
@ -1090,9 +1090,9 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ mov(r0, Operand(stack_guard_limit));
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
SafeCall(&check_preempt_label_, ls);

345
deps/v8/src/arm/simulator-arm.cc

@ -342,6 +342,11 @@ void Debugger::Debug() {
PrintF("Z flag: %d; ", sim_->z_flag_);
PrintF("C flag: %d; ", sim_->c_flag_);
PrintF("V flag: %d\n", sim_->v_flag_);
PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "unstop") == 0) {
intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
@ -429,6 +434,24 @@ Simulator::Simulator() {
c_flag_ = false;
v_flag_ = false;
// Initializing VFP registers.
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
for (int i = 0; i < num_s_registers; i++) {
vfp_register[i] = 0;
}
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
overflow_vfp_flag_ = false;
underflow_vfp_flag_ = false;
inexact_vfp_flag_ = false;
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
@ -545,6 +568,99 @@ int32_t Simulator::get_pc() const {
}
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
vfp_register[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
return vfp_register[sreg];
}
void Simulator::set_s_register_from_float(int sreg, const float flt) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
// Read the bits from the single precision floating point value
// into the unsigned integer element of vfp_register[] given by index=sreg.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &flt, sizeof(vfp_register[0]));
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
}
void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
// Read the bits from the integer value into the unsigned integer element of
// vfp_register[] given by index=sreg.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &sint, sizeof(vfp_register[0]));
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
}
void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
// Read the bits from the double precision floating point value into the two
// consecutive unsigned integer elements of vfp_register[] given by index
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
#else
memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
#endif
}
float Simulator::get_float_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
float sm_val = 0.0;
// Read the bits from the unsigned integer vfp_register[] array
// into the single precision floating point value and return it.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
return(sm_val);
}
int Simulator::get_sinteger_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
int sm_val = 0;
// Read the bits from the unsigned integer vfp_register[] array
// into the single precision floating point value and return it.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0]));
memcpy(&sm_val, buffer, sizeof(vfp_register[0]));
return(sm_val);
}
double Simulator::get_double_from_d_register(int dreg) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
double dm_val = 0.0;
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
#ifdef BIG_ENDIAN_FLOATING_POINT
memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
}
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
@ -772,6 +888,37 @@ bool Simulator::OverflowFrom(int32_t alu_out,
}
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
// All non-NaN cases.
if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = false;
} else if (val1 < val2) {
n_flag_FPSCR_ = true;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
} else {
// Case when (val1 > val2).
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = false;
}
}
void Simulator::Copy_FPSCR_to_APSR() {
n_flag_ = n_flag_FPSCR_;
z_flag_ = z_flag_FPSCR_;
c_flag_ = c_flag_FPSCR_;
v_flag_ = v_flag_FPSCR_;
}
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
@ -1154,7 +1301,7 @@ void Simulator::DecodeType01(Instr* instr) {
}
}
} else {
UNIMPLEMENTED(); // not used by V8
UNIMPLEMENTED(); // Not used by V8.
}
} else {
// extra load/store instructions
@ -1664,16 +1811,15 @@ void Simulator::DecodeType5(Instr* instr) {
void Simulator::DecodeType6(Instr* instr) {
UNIMPLEMENTED();
DecodeType6CoprocessorIns(instr);
}
void Simulator::DecodeType7(Instr* instr) {
if (instr->Bit(24) == 1) {
// Format(instr, "swi 'swi");
SoftwareInterrupt(instr);
} else {
UNIMPLEMENTED();
DecodeTypeVFP(instr);
}
}
@ -1745,6 +1891,177 @@ void Simulator::DecodeUnconditional(Instr* instr) {
}
// void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// fmsr :Sn = Rt
// fmrs :Rt = Sn
// fsitod: Dd = Sm
// ftosid: Sd = Dm
// Dd = faddd(Dn, Dm)
// Dd = fsubd(Dn, Dm)
// Dd = fmuld(Dn, Dm)
// Dd = fdivd(Dn, Dm)
// vcmp(Dd, Dm)
// VMRS
void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
int rt = instr->RtField();
int vm = instr->VmField();
int vn = instr->VnField();
int vd = instr->VdField();
if (instr->Bit(23) == 1) {
if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x5) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
double dm_val = get_double_from_d_register(vm);
int32_t int_value = static_cast<int32_t>(dm_val);
set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value);
} else if ((instr->Bits(21, 19) == 0x7) &&
(instr->Bits(18, 16) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(7) == 1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
int32_t int_value = get_sinteger_from_s_register(((vm<<1) |
instr->MField()));
double dbl_value = static_cast<double>(int_value);
set_d_register_from_double(vd, dbl_value);
} else if ((instr->Bit(21) == 0x0) &&
(instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bits(21, 20) == 0x3) &&
(instr->Bits(19, 16) == 0x4) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0x1) &&
(instr->Bit(4) == 0x0)) {
double dd_value = get_double_from_d_register(vd);
double dm_value = get_double_from_d_register(vm);
Compute_FPSCR_Flags(dd_value, dm_value);
} else if ((instr->Bits(23, 20) == 0xF) &&
(instr->Bits(19, 16) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(7, 5) == 0x0) &&
(instr->Bit(4) == 0x1) &&
(instr->Bits(3, 0) == 0x0)) {
if (instr->Bits(15, 12) == 0xF)
Copy_FPSCR_to_APSR();
else
UNIMPLEMENTED(); // Not used by V8.
} else {
UNIMPLEMENTED(); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 1) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 9) == 0x5) &&
(instr->Bit(8) == 0x1) &&
(instr->Bit(6) == 0) &&
(instr->Bit(4) == 0)) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
}
} else {
if ((instr->Bit(20) == 0x0) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
int32_t rs_val = get_register(rt);
set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val);
} else if ((instr->Bit(20) == 0x1) &&
(instr->Bits(11, 8) == 0xA) &&
(instr->Bits(6, 5) == 0x0) &&
(instr->Bit(4) == 1) &&
(instr->Bits(3, 0) == 0x0)) {
int32_t int_value = get_sinteger_from_s_register(((vn<<1) |
instr->NField()));
set_register(rt, int_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
}
}
}
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
// Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
int rt = instr->RtField();
int rn = instr->RnField();
int vm = instr->VmField();
if (instr->Bit(23) == 1) {
UNIMPLEMENTED();
} else if (instr->Bit(22) == 1) {
if ((instr->Bits(27, 24) == 0xC) &&
(instr->Bit(22) == 1) &&
(instr->Bits(11, 8) == 0xB) &&
(instr->Bits(7, 6) == 0x0) &&
(instr->Bit(4) == 1)) {
if (instr->Bit(20) == 0) {
int32_t rs_val = get_register(rt);
int32_t rn_val = get_register(rn);
set_s_register_from_sinteger(2*vm, rs_val);
set_s_register_from_sinteger((2*vm+1), rn_val);
} else if (instr->Bit(20) == 1) {
int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
set_register(rt, rt_int_value);
set_register(rn, rn_int_value);
}
} else {
UNIMPLEMENTED();
}
} else if (instr->Bit(21) == 1) {
UNIMPLEMENTED();
} else {
UNIMPLEMENTED();
}
}
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false;
@ -1802,7 +2119,6 @@ void Simulator::InstructionDecode(Instr* instr) {
}
//
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
@ -1924,6 +2240,25 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
return result;
}
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
*stack_slot = address;
set_register(sp, new_sp);
return new_sp;
}
uintptr_t Simulator::PopAddress() {
int current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
set_register(sp, current_sp + sizeof(uintptr_t));
return address;
}
} } // namespace assembler::arm
#endif // !defined(__arm__)

79
deps/v8/src/arm/simulator-arm.h

@ -52,6 +52,12 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
};
@ -60,6 +66,10 @@ class SimulatorStack : public v8::internal::AllStatic {
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
#else // defined(__arm__)
// When running with the simulator transition into simulated execution at this
@ -73,6 +83,11 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
#include "constants-arm.h"
@ -82,7 +97,6 @@ namespace arm {
class Simulator {
public:
friend class Debugger;
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@ -90,7 +104,15 @@ class Simulator {
num_registers,
sp = 13,
lr = 14,
pc = 15
pc = 15,
s0 = 0, s1, s2, s3, s4, s5, s6, s7,
s8, s9, s10, s11, s12, s13, s14, s15,
s16, s17, s18, s19, s20, s21, s22, s23,
s24, s25, s26, s27, s28, s29, s30, s31,
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
num_d_registers = 16
};
Simulator();
@ -106,6 +128,16 @@ class Simulator {
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
// Support for VFP.
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
void set_d_register_from_double(int dreg, const double& dbl);
double get_double_from_d_register(int dreg);
void set_s_register_from_float(int sreg, const float dbl);
float get_float_from_s_register(int sreg);
void set_s_register_from_sinteger(int reg, const int value);
int get_sinteger_from_s_register(int reg);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
int32_t get_pc() const;
@ -124,6 +156,12 @@ class Simulator {
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
// Pop an address from the JS stack.
uintptr_t PopAddress();
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@ -154,6 +192,10 @@ class Simulator {
int32_t right,
bool addition);
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instr* instr, bool* carry_out);
int32_t GetImm(Instr* instr, bool* carry_out);
@ -185,6 +227,10 @@ class Simulator {
void DecodeType7(Instr* instr);
void DecodeUnconditional(Instr* instr);
// Support for VFP.
void DecodeTypeVFP(Instr* instr);
void DecodeType6CoprocessorIns(Instr* instr);
// Executes one instruction.
void InstructionDecode(Instr* instr);
@ -198,20 +244,34 @@ class Simulator {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
// architecture state
// Architecture state.
int32_t registers_[16];
bool n_flag_;
bool z_flag_;
bool c_flag_;
bool v_flag_;
// simulator support
// VFP architecture state.
unsigned int vfp_register[num_s_registers];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
bool v_flag_FPSCR_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
bool div_zero_vfp_flag_;
bool overflow_vfp_flag_;
bool underflow_vfp_flag_;
bool inexact_vfp_flag_;
// Simulator support.
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
// registered breakpoints
// Registered breakpoints.
Instr* break_pc_;
instr_t break_instr_;
};
@ -229,6 +289,15 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return assembler::arm::Simulator::current()->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
assembler::arm::Simulator* sim = assembler::arm::Simulator::current();
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
assembler::arm::Simulator::current()->PopAddress();
}
};

44
deps/v8/src/assembler.cc

@ -174,14 +174,14 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
*--pos_ = data_delta << kPositionTypeTagBits | tag;
*--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
}
void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) {
*--pos_ = top_tag << (kTagBits + kExtraTagBits) |
extra_tag << kTagBits |
kDefaultTag;
*--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) |
extra_tag << kTagBits |
kDefaultTag);
}
@ -196,7 +196,7 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = data_delta;
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
@ -211,7 +211,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = rinfo->pc() - last_pc_;
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
// The two most common modes are given small tags, and usually fit in a byte.
@ -522,6 +522,10 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(ApiFunction* fun)
: address_(Redirect(fun->address())) {}
ExternalReference::ExternalReference(Builtins::Name name)
: address_(Builtins::builtin_address(name)) {}
@ -579,11 +583,16 @@ ExternalReference ExternalReference::roots_address() {
}
ExternalReference ExternalReference::address_of_stack_guard_limit() {
ExternalReference ExternalReference::address_of_stack_limit() {
return ExternalReference(StackGuard::address_of_jslimit());
}
ExternalReference ExternalReference::address_of_real_stack_limit() {
return ExternalReference(StackGuard::address_of_real_jslimit());
}
ExternalReference ExternalReference::address_of_regexp_stack_limit() {
return ExternalReference(RegExpStack::limit_address());
}
@ -608,6 +617,27 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::handle_scope_extensions_address() {
return ExternalReference(HandleScope::current_extensions_address());
}
ExternalReference ExternalReference::handle_scope_next_address() {
return ExternalReference(HandleScope::current_next_address());
}
ExternalReference ExternalReference::handle_scope_limit_address() {
return ExternalReference(HandleScope::current_limit_address());
}
ExternalReference ExternalReference::scheduled_exception_address() {
return ExternalReference(Top::scheduled_exception_address());
}
#ifdef V8_NATIVE_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {

21
deps/v8/src/assembler.h

@ -373,6 +373,8 @@ class ExternalReference BASE_EMBEDDED {
public:
explicit ExternalReference(Builtins::CFunctionId id);
explicit ExternalReference(ApiFunction* ptr);
explicit ExternalReference(Builtins::Name name);
explicit ExternalReference(Runtime::FunctionId id);
@ -406,7 +408,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference roots_address();
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_guard_limit();
static ExternalReference address_of_stack_limit();
// Static variable StackGuard::address_of_real_jslimit()
static ExternalReference address_of_real_stack_limit();
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
@ -422,6 +427,12 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
static ExternalReference handle_scope_extensions_address();
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
static ExternalReference scheduled_exception_address();
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -460,12 +471,16 @@ class ExternalReference BASE_EMBEDDED {
static void* Redirect(void* address, bool fp_return = false) {
if (redirector_ == NULL) return address;
return (*redirector_)(address, fp_return);
void* answer = (*redirector_)(address, fp_return);
return answer;
}
static void* Redirect(Address address_arg, bool fp_return = false) {
void* address = reinterpret_cast<void*>(address_arg);
return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
void* answer = (redirector_ == NULL) ?
address :
(*redirector_)(address, fp_return);
return answer;
}
void* address_;

54
deps/v8/src/ast.h

@ -28,7 +28,6 @@
#ifndef V8_AST_H_
#define V8_AST_H_
#include "location.h"
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
@ -162,7 +161,25 @@ class Statement: public AstNode {
class Expression: public AstNode {
public:
Expression() : location_(Location::Uninitialized()) {}
enum Context {
// Not assigned a context yet, or else will not be visited during
// code generation.
kUninitialized,
// Evaluated for its side effects.
kEffect,
// Evaluated for its value (and side effects).
kValue,
// Evaluated for control flow (and side effects).
kTest,
// Evaluated for control flow and side effects. Value is also
// needed if true.
kValueTest,
// Evaluated for control flow and side effects. Value is also
// needed if false.
kTestValue
};
Expression() : context_(kUninitialized) {}
virtual Expression* AsExpression() { return this; }
@ -177,12 +194,12 @@ class Expression: public AstNode {
// Static type information for this expression.
SmiAnalysis* type() { return &type_; }
Location location() { return location_; }
void set_location(Location loc) { location_ = loc; }
Context context() { return context_; }
void set_context(Context context) { context_ = context; }
private:
SmiAnalysis type_;
Location location_;
Context context_;
};
@ -305,7 +322,7 @@ class IterationStatement: public BreakableStatement {
class DoWhileStatement: public IterationStatement {
public:
explicit DoWhileStatement(ZoneStringList* labels)
: IterationStatement(labels), cond_(NULL) {
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
}
void Initialize(Expression* cond, Statement* body) {
@ -317,8 +334,14 @@ class DoWhileStatement: public IterationStatement {
Expression* cond() const { return cond_; }
// Position where condition expression starts. We need it to make
// the loop's condition a breakable location.
int condition_position() { return condition_position_; }
void set_condition_position(int pos) { condition_position_ = pos; }
private:
Expression* cond_;
int condition_position_;
};
@ -935,11 +958,7 @@ class Slot: public Expression {
// variable name in the context object on the heap,
// with lookup starting at the current context. index()
// is invalid.
LOOKUP,
// A property in the global object. var()->name() is
// the property name.
GLOBAL
LOOKUP
};
Slot(Variable* var, Type type, int index)
@ -1263,7 +1282,6 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body,
int materialized_literal_count,
int expected_property_count,
bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
Handle<FixedArray> this_property_assignments,
int num_parameters,
@ -1275,7 +1293,6 @@ class FunctionLiteral: public Expression {
body_(body),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
has_only_this_property_assignments_(has_only_this_property_assignments),
has_only_simple_this_property_assignments_(
has_only_simple_this_property_assignments),
this_property_assignments_(this_property_assignments),
@ -1285,7 +1302,8 @@ class FunctionLiteral: public Expression {
is_expression_(is_expression),
loop_nesting_(0),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()) {
inferred_name_(Heap::empty_string()),
try_fast_codegen_(false) {
#ifdef DEBUG
already_compiled_ = false;
#endif
@ -1307,9 +1325,6 @@ class FunctionLiteral: public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
bool has_only_this_property_assignments() {
return has_only_this_property_assignments_;
}
bool has_only_simple_this_property_assignments() {
return has_only_simple_this_property_assignments_;
}
@ -1328,6 +1343,9 @@ class FunctionLiteral: public Expression {
inferred_name_ = inferred_name;
}
bool try_fast_codegen() { return try_fast_codegen_; }
void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; }
#ifdef DEBUG
void mark_as_compiled() {
ASSERT(!already_compiled_);
@ -1341,7 +1359,6 @@ class FunctionLiteral: public Expression {
ZoneList<Statement*>* body_;
int materialized_literal_count_;
int expected_property_count_;
bool has_only_this_property_assignments_;
bool has_only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
int num_parameters_;
@ -1351,6 +1368,7 @@ class FunctionLiteral: public Expression {
int loop_nesting_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_fast_codegen_;
#ifdef DEBUG
bool already_compiled_;
#endif

42
deps/v8/src/bootstrapper.cc

@ -36,6 +36,7 @@
#include "global-handles.h"
#include "macro-assembler.h"
#include "natives.h"
#include "snapshot.h"
namespace v8 {
namespace internal {
@ -92,14 +93,39 @@ class SourceCodeCache BASE_EMBEDDED {
static SourceCodeCache natives_cache(Script::TYPE_NATIVE);
static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION);
// This is for delete, not delete[].
static List<char*>* delete_these_non_arrays_on_tear_down = NULL;
NativesExternalStringResource::NativesExternalStringResource(const char* source)
: data_(source), length_(StrLength(source)) {
if (delete_these_non_arrays_on_tear_down == NULL) {
delete_these_non_arrays_on_tear_down = new List<char*>(2);
}
// The resources are small objects and we only make a fixed number of
// them, but let's clean them up on exit for neatness.
delete_these_non_arrays_on_tear_down->
Add(reinterpret_cast<char*>(this));
}
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
if (Heap::natives_source_cache()->get(index)->IsUndefined()) {
Handle<String> source_code =
Factory::NewStringFromAscii(Natives::GetScriptSource(index));
Heap::natives_source_cache()->set(index, *source_code);
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
// We can use external strings for the natives.
NativesExternalStringResource* resource =
new NativesExternalStringResource(
Natives::GetScriptSource(index).start());
Handle<String> source_code =
Factory::NewExternalStringFromAscii(resource);
Heap::natives_source_cache()->set(index, *source_code);
} else {
// Old snapshot code can't cope with external strings at all.
Handle<String> source_code =
Factory::NewStringFromAscii(Natives::GetScriptSource(index));
Heap::natives_source_cache()->set(index, *source_code);
}
}
Handle<Object> cached_source(Heap::natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
@ -125,6 +151,16 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
void Bootstrapper::TearDown() {
if (delete_these_non_arrays_on_tear_down != NULL) {
int len = delete_these_non_arrays_on_tear_down->length();
ASSERT(len < 20); // Don't use this mechanism for unbounded allocations.
for (int i = 0; i < len; i++) {
delete delete_these_non_arrays_on_tear_down->at(i);
}
delete delete_these_non_arrays_on_tear_down;
delete_these_non_arrays_on_tear_down = NULL;
}
natives_cache.Initialize(false); // Yes, symmetrical
extensions_cache.Initialize(false);
}

18
deps/v8/src/bootstrapper.h

@ -76,6 +76,24 @@ class Bootstrapper : public AllStatic {
static void FreeThreadResources();
};
class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource {
public:
explicit NativesExternalStringResource(const char* source);
const char* data() const {
return data_;
}
size_t length() const {
return length_;
}
private:
const char* data_;
size_t length_;
};
}} // namespace v8::internal
#endif // V8_BOOTSTRAPPER_H_

2
deps/v8/src/checks.cc

@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
fflush(stdout);
fflush(stderr);
fatal_error_handler_nesting_depth++;
// First time we try to print an error message
if (fatal_error_handler_nesting_depth < 2) {

50
deps/v8/src/code-stubs.cc

@ -36,10 +36,27 @@ namespace v8 {
namespace internal {
Handle<Code> CodeStub::GetCode() {
uint32_t key = GetKey();
int index = Heap::code_stubs()->FindEntry(key);
if (index == NumberDictionary::kNotFound) {
HandleScope scope;
bool custom_cache = has_custom_cache();
int index = 0;
uint32_t key = 0;
if (custom_cache) {
Code* cached;
if (GetCustomCache(&cached)) {
return Handle<Code>(cached);
} else {
index = NumberDictionary::kNotFound;
}
} else {
key = GetKey();
index = Heap::code_stubs()->FindEntry(key);
if (index != NumberDictionary::kNotFound)
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
}
Code* result;
{
v8::HandleScope scope;
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
@ -79,25 +96,28 @@ Handle<Code> CodeStub::GetCode() {
}
#endif
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
key,
code);
Heap::public_set_code_stubs(*dict);
index = Heap::code_stubs()->FindEntry(key);
if (custom_cache) {
SetCustomCache(*code);
} else {
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
key,
code);
Heap::public_set_code_stubs(*dict);
}
result = *code;
}
ASSERT(index != NumberDictionary::kNotFound);
return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
return Handle<Code>(result);
}
const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
#define DEF_CASE(name) case name: return #name;
CODE_STUB_LIST_ALL(DEF_CASE)
CODE_STUB_LIST(DEF_CASE)
#undef DEF_CASE
default:
UNREACHABLE();

45
deps/v8/src/code-stubs.h

@ -33,23 +33,23 @@ namespace internal {
// List of code stubs used on all platforms. The order in this list is important
// as only the stubs up to and including RecordWrite allows nested stub calls.
#define CODE_STUB_LIST_ALL(V) \
V(CallFunction) \
V(GenericBinaryOp) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(UnarySub) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
V(Runtime) \
V(CEntry) \
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(GenericBinaryOp) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
V(UnarySub) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
V(Runtime) \
V(CEntry) \
V(JSEntry)
// List of code stubs only used on ARM platforms.
@ -64,8 +64,8 @@ namespace internal {
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL(V) \
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
// Stub is base classes of all stubs.
@ -75,6 +75,7 @@ class CodeStub BASE_EMBEDDED {
#define DEF_ENUM(name) name,
CODE_STUB_LIST(DEF_ENUM)
#undef DEF_ENUM
NoCache, // marker for stubs that do custom caching
NUMBER_OF_IDS
};
@ -91,6 +92,12 @@ class CodeStub BASE_EMBEDDED {
virtual ~CodeStub() {}
// Override these methods to provide a custom caching mechanism for
// an individual type of code stub.
virtual bool GetCustomCache(Code** code_out) { return false; }
virtual void SetCustomCache(Code* value) { }
virtual bool has_custom_cache() { return false; }
protected:
static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;

115
deps/v8/src/codegen.cc

@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
@ -250,98 +251,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
#endif
// Sets the function info on a function.
// The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the
// the first character is number 0 (not 1).
void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script) {
fun->shared()->set_length(lit->num_parameters());
fun->shared()->set_formal_parameter_count(lit->num_parameters());
fun->shared()->set_script(*script);
fun->shared()->set_function_token_position(lit->function_token_position());
fun->shared()->set_start_position(lit->start_position());
fun->shared()->set_end_position(lit->end_position());
fun->shared()->set_is_expression(lit->is_expression());
fun->shared()->set_is_toplevel(is_toplevel);
fun->shared()->set_inferred_name(*lit->inferred_name());
fun->shared()->SetThisPropertyAssignmentsInfo(
lit->has_only_this_property_assignments(),
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
}
Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
}
Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
node->mark_as_compiled();
#endif
// Determine if the function can be lazily compiled. This is
// necessary to allow some of our builtin JS files to be lazily
// compiled. These builtins cannot be handled lazily by the parser,
// since we have to know if a function uses the special natives
// syntax, which is something the parser records.
bool allow_lazy = node->AllowsLazyCompilation();
// Generate code
Handle<Code> code;
if (FLAG_lazy && allow_lazy) {
code = ComputeLazyCompile(node->num_parameters());
} else {
// The bodies of function literals have not yet been visited by
// the AST optimizer/analyzer.
if (!Rewriter::Optimize(node)) {
return Handle<JSFunction>::null();
}
code = MakeCode(node, script_, false);
// Check for stack-overflow exception.
if (code.is_null()) {
SetStackOverflow();
return Handle<JSFunction>::null();
}
// Function compilation complete.
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
#ifdef ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*node->name(),
code->instruction_start(),
code->instruction_size());
#endif
}
// Create a boilerplate function.
Handle<JSFunction> function =
Factory::NewFunctionBoilerplate(node->name(),
node->materialized_literal_count(),
code);
CodeGenerator::SetFunctionInfo(function, node, false, script_);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(function,
node->expected_property_count());
return function;
}
Handle<Code> CodeGenerator::ComputeCallInitialize(
int argc,
InLoopFlag in_loop) {
@ -398,7 +307,8 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
array->set_undefined(j++);
}
} else {
Handle<JSFunction> function = BuildBoilerplate(node->fun());
Handle<JSFunction> function =
Compiler::BuildBoilerplate(node->fun(), script(), this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
@ -521,6 +431,9 @@ void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
}
void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
if (FLAG_debug_info) RecordPositions(masm(), stmt->condition_position());
}
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
@ -551,4 +464,20 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
Object* cache = info()->load_stub_cache();
if (cache->IsUndefined()) {
return false;
} else {
*code_out = Code::cast(cache);
return true;
}
}
void ApiGetterEntryStub::SetCustomCache(Code* value) {
info()->set_load_stub_cache(value);
}
} } // namespace v8::internal

31
deps/v8/src/codegen.h

@ -38,9 +38,9 @@
// MakeCode
// MakeCodePrologue
// MakeCodeEpilogue
// SetFunctionInfo
// masm
// frame
// script
// has_valid_frame
// SetFrame
// DeleteFrame
@ -69,6 +69,7 @@
// CodeForFunctionPosition
// CodeForReturnPosition
// CodeForStatementPosition
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
@ -301,7 +302,7 @@ class CEntryStub : public CodeStub {
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
@ -320,6 +321,32 @@ class CEntryStub : public CodeStub {
};
class ApiGetterEntryStub : public CodeStub {
public:
ApiGetterEntryStub(Handle<AccessorInfo> info,
ApiFunction* fun)
: info_(info),
fun_(fun) { }
void Generate(MacroAssembler* masm);
virtual bool has_custom_cache() { return true; }
virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value);
static const int kStackSpace = 6;
static const int kArgc = 4;
private:
Handle<AccessorInfo> info() { return info_; }
ApiFunction* fun() { return fun_; }
Major MajorKey() { return NoCache; }
int MinorKey() { return 0; }
const char* GetName() { return "ApiEntryStub"; }
// The accessor info associated with the function.
Handle<AccessorInfo> info_;
// The function to be called.
ApiFunction* fun_;
};
class CEntryDebugBreakStub : public CEntryStub {
public:
CEntryDebugBreakStub() : CEntryStub(1) { }

464
deps/v8/src/compiler.cc

@ -48,23 +48,24 @@ class CodeGenSelector: public AstVisitor {
CodeGenSelector()
: has_supported_syntax_(true),
location_(Location::Uninitialized()) {
context_(Expression::kUninitialized) {
}
CodeGenTag Select(FunctionLiteral* fun);
private:
// Visit an expression in a given expression context.
void ProcessExpression(Expression* expr, Expression::Context context) {
Expression::Context saved = context_;
context_ = context;
Visit(expr);
expr->set_context(context);
context_ = saved;
}
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
// Visit an expression in effect context with a desired location of
// nowhere.
void VisitAsEffect(Expression* expr);
// Visit an expression in value context with a desired location of
// temporary.
void VisitAsValue(Expression* expr);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
@ -72,8 +73,8 @@ class CodeGenSelector: public AstVisitor {
bool has_supported_syntax_;
// The desired location of the currently visited expression.
Location location_;
// The desired expression context of the currently visited expression.
Expression::Context context_;
DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
};
@ -82,7 +83,8 @@ class CodeGenSelector: public AstVisitor {
static Handle<Code> MakeCode(FunctionLiteral* literal,
Handle<Script> script,
Handle<Context> context,
bool is_eval) {
bool is_eval,
Handle<SharedFunctionInfo> shared) {
ASSERT(literal != NULL);
// Rewrite the AST by introducing .result assignments where needed.
@ -119,12 +121,21 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
// Generate code and return it.
if (FLAG_fast_compiler) {
CodeGenSelector selector;
CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
if (code_gen == CodeGenSelector::FAST) {
return FastCodeGenerator::MakeCode(literal, script, is_eval);
// If there is no shared function info, try the fast code
// generator for code in the global scope. Otherwise obey the
// explicit hint in the shared function info.
if (shared.is_null() && !literal->scope()->is_global_scope()) {
if (FLAG_trace_bailout) PrintF("Non-global scope\n");
} else if (!shared.is_null() && !shared->try_fast_codegen()) {
if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
} else {
CodeGenSelector selector;
CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
if (code_gen == CodeGenSelector::FAST) {
return FastCodeGenerator::MakeCode(literal, script, is_eval);
}
ASSERT(code_gen == CodeGenSelector::NORMAL);
}
ASSERT(code_gen == CodeGenSelector::NORMAL);
}
return CodeGenerator::MakeCode(literal, script, is_eval);
}
@ -166,7 +177,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
if (is_eval) {
JavaScriptFrameIterator it;
script->set_eval_from_function(it.frame()->function());
int offset = it.frame()->pc() - it.frame()->code()->instruction_start();
int offset = static_cast<int>(
it.frame()->pc() - it.frame()->code()->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
@ -209,7 +221,8 @@ static Handle<JSFunction> MakeFunction(bool is_global,
HistogramTimerScope timer(rate);
// Compile the code.
Handle<Code> code = MakeCode(lit, script, context, is_eval);
Handle<Code> code = MakeCode(lit, script, context, is_eval,
Handle<SharedFunctionInfo>::null());
// Check for stack-overflow exceptions.
if (code.is_null()) {
@ -246,7 +259,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
code);
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
CodeGenerator::SetFunctionInfo(fun, lit, true, script);
Compiler::SetFunctionInfo(fun, lit, true, script);
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@ -410,7 +423,8 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
HistogramTimerScope timer(&Counters::compile_lazy);
// Compile the code.
Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false);
Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false,
shared);
// Check for stack-overflow exception.
if (code.is_null()) {
@ -452,7 +466,6 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Set the optimication hints after performing lazy compilation, as these are
// not set when the function is set up as a lazily compiled function.
shared->SetThisPropertyAssignmentsInfo(
lit->has_only_this_property_assignments(),
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
@ -462,18 +475,137 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
}
Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
Handle<Script> script,
AstVisitor* caller) {
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
literal->mark_as_compiled();
#endif
// Determine if the function can be lazily compiled. This is
// necessary to allow some of our builtin JS files to be lazily
// compiled. These builtins cannot be handled lazily by the parser,
// since we have to know if a function uses the special natives
// syntax, which is something the parser records.
bool allow_lazy = literal->AllowsLazyCompilation();
// Generate code
Handle<Code> code;
if (FLAG_lazy && allow_lazy) {
code = ComputeLazyCompile(literal->num_parameters());
} else {
// The bodies of function literals have not yet been visited by
// the AST optimizer/analyzer.
if (!Rewriter::Optimize(literal)) {
return Handle<JSFunction>::null();
}
// Generate code and return it.
bool is_compiled = false;
if (FLAG_fast_compiler && literal->try_fast_codegen()) {
CodeGenSelector selector;
CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
if (code_gen == CodeGenSelector::FAST) {
code = FastCodeGenerator::MakeCode(literal,
script,
false); // Not eval.
is_compiled = true;
}
}
if (!is_compiled) {
// We didn't try the fast compiler, or we failed to select it.
code = CodeGenerator::MakeCode(literal,
script,
false); // Not eval.
}
// Check for stack-overflow exception.
if (code.is_null()) {
caller->SetStackOverflow();
return Handle<JSFunction>::null();
}
// Function compilation complete.
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
#ifdef ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*node->name(),
code->instruction_start(),
code->instruction_size());
#endif
}
// Create a boilerplate function.
Handle<JSFunction> function =
Factory::NewFunctionBoilerplate(literal->name(),
literal->materialized_literal_count(),
code);
SetFunctionInfo(function, literal, false, script);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(function,
literal->expected_property_count());
return function;
}
// Sets the function info on a function.
// The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the
// the first character is number 0 (not 1).
void Compiler::SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script) {
fun->shared()->set_length(lit->num_parameters());
fun->shared()->set_formal_parameter_count(lit->num_parameters());
fun->shared()->set_script(*script);
fun->shared()->set_function_token_position(lit->function_token_position());
fun->shared()->set_start_position(lit->start_position());
fun->shared()->set_end_position(lit->end_position());
fun->shared()->set_is_expression(lit->is_expression());
fun->shared()->set_is_toplevel(is_toplevel);
fun->shared()->set_inferred_name(*lit->inferred_name());
fun->shared()->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
fun->shared()->set_try_fast_codegen(lit->try_fast_codegen());
}
CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
Scope* scope = fun->scope();
if (!scope->is_global_scope()) {
if (FLAG_trace_bailout) PrintF("Non-global scope\n");
if (scope->num_heap_slots() > 0) {
// We support functions with a local context if they do not have
// parameters that need to be copied into the context.
for (int i = 0, len = scope->num_parameters(); i < len; i++) {
Slot* slot = scope->parameter(i)->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
if (FLAG_trace_bailout) {
PrintF("function has context-allocated parameters");
}
return NORMAL;
}
}
}
if (scope->arguments() != NULL) {
if (FLAG_trace_bailout) PrintF("function uses 'arguments'\n");
return NORMAL;
}
ASSERT(scope->num_heap_slots() == 0);
ASSERT(scope->arguments() == NULL);
has_supported_syntax_ = true;
VisitDeclarations(fun->scope()->declarations());
VisitDeclarations(scope->declarations());
if (!has_supported_syntax_) return NORMAL;
VisitStatements(fun->body());
@ -513,34 +645,9 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
}
void CodeGenSelector::VisitAsEffect(Expression* expr) {
if (location_.is_effect()) {
Visit(expr);
} else {
Location saved = location_;
location_ = Location::Effect();
Visit(expr);
location_ = saved;
}
}
void CodeGenSelector::VisitAsValue(Expression* expr) {
if (location_.is_value()) {
Visit(expr);
} else {
Location saved = location_;
location_ = Location::Value();
Visit(expr);
location_ = saved;
}
}
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->var();
if (!var->is_global() || var->mode() == Variable::CONST) {
BAILOUT("Non-global declaration");
if (decl->fun() != NULL) {
ProcessExpression(decl->fun(), Expression::kValue);
}
}
@ -551,7 +658,7 @@ void CodeGenSelector::VisitBlock(Block* stmt) {
void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
VisitAsEffect(stmt->expression());
ProcessExpression(stmt->expression(), Expression::kEffect);
}
@ -561,7 +668,11 @@ void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
BAILOUT("IfStatement");
ProcessExpression(stmt->condition(), Expression::kTest);
CHECK_BAILOUT;
Visit(stmt->then_statement());
CHECK_BAILOUT;
Visit(stmt->else_statement());
}
@ -576,7 +687,7 @@ void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
VisitAsValue(stmt->expression());
ProcessExpression(stmt->expression(), Expression::kValue);
}
@ -596,17 +707,39 @@ void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
BAILOUT("DoWhileStatement");
// We do not handle loops with breaks or continue statements in their
// body. We will bailout when we hit those statements in the body.
ProcessExpression(stmt->cond(), Expression::kTest);
CHECK_BAILOUT;
Visit(stmt->body());
}
void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
BAILOUT("WhileStatement");
// We do not handle loops with breaks or continue statements in their
// body. We will bailout when we hit those statements in the body.
ProcessExpression(stmt->cond(), Expression::kTest);
CHECK_BAILOUT;
Visit(stmt->body());
}
void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
BAILOUT("ForStatement");
// We do not handle loops with breaks or continue statements in their
// body. We will bailout when we hit those statements in the body.
if (stmt->init() != NULL) {
Visit(stmt->init());
CHECK_BAILOUT;
}
if (stmt->cond() != NULL) {
ProcessExpression(stmt->cond(), Expression::kTest);
CHECK_BAILOUT;
}
Visit(stmt->body());
if (stmt->next() != NULL) {
CHECK_BAILOUT;
Visit(stmt->next());
}
}
@ -626,15 +759,12 @@ void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
BAILOUT("DebuggerStatement");
// Debugger statement is supported.
}
void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
if (!expr->AllowsLazyCompilation()) {
BAILOUT("FunctionLiteral does not allow lazy compilation");
}
expr->set_location(location_);
// Function literal is supported.
}
@ -645,7 +775,11 @@ void CodeGenSelector::VisitFunctionBoilerplateLiteral(
void CodeGenSelector::VisitConditional(Conditional* expr) {
BAILOUT("Conditional");
ProcessExpression(expr->condition(), Expression::kTest);
CHECK_BAILOUT;
ProcessExpression(expr->then_expression(), context_);
CHECK_BAILOUT;
ProcessExpression(expr->else_expression(), context_);
}
@ -660,28 +794,27 @@ void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
if (rewrite != NULL) {
// Non-global.
Slot* slot = rewrite->AsSlot();
if (slot == NULL) {
// This is a variable rewritten to an explicit property access
// on the arguments object.
BAILOUT("non-global/non-slot variable reference");
}
Slot::Type type = slot->type();
if (type != Slot::PARAMETER && type != Slot::LOCAL) {
BAILOUT("non-parameter/non-local slot reference");
if (slot != NULL) {
Slot::Type type = slot->type();
// When LOOKUP slots are enabled, some currently dead code
// implementing unary typeof will become live.
if (type == Slot::LOOKUP) {
BAILOUT("Lookup slot");
}
} else {
BAILOUT("access to arguments object");
}
}
expr->set_location(location_);
}
void CodeGenSelector::VisitLiteral(Literal* expr) {
expr->set_location(location_);
/* Nothing to do. */
}
void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
expr->set_location(location_);
/* Nothing to do. */
}
@ -711,14 +844,13 @@ void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER: // Fall through.
case ObjectLiteral::Property::SETTER: // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
VisitAsValue(property->key());
ProcessExpression(property->key(), Expression::kValue);
CHECK_BAILOUT;
break;
}
VisitAsValue(property->value());
ProcessExpression(property->value(), Expression::kValue);
CHECK_BAILOUT;
}
expr->set_location(location_);
}
@ -728,10 +860,9 @@ void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitAsValue(subexpr);
ProcessExpression(subexpr, Expression::kValue);
CHECK_BAILOUT;
}
expr->set_location(location_);
}
@ -741,13 +872,8 @@ void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void CodeGenSelector::VisitAssignment(Assignment* expr) {
// We support plain non-compound assignments to parameters and
// non-context (stack-allocated) locals.
if (expr->starts_initialization_block() ||
expr->ends_initialization_block()) {
BAILOUT("initialization block start");
}
// We support plain non-compound assignments to properties, parameters and
// non-context (stack-allocated) locals, and global variables.
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
if (op != Token::ASSIGN && op != Token::INIT_VAR) {
@ -755,18 +881,39 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
}
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
if (var == NULL) BAILOUT("non-variable assignment");
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
Slot::Type type = var->slot()->type();
if (type != Slot::PARAMETER && type != Slot::LOCAL) {
BAILOUT("non-parameter/non-local slot assignment");
Property* prop = expr->target()->AsProperty();
if (var != NULL) {
// All global variables are supported.
if (!var->is_global()) {
if (var->slot() == NULL) {
// This is a parameter that has rewritten to an arguments access.
BAILOUT("non-global/non-slot assignment");
}
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("Lookup slot");
}
}
} else if (prop != NULL) {
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
// stores. Leave its expression context uninitialized for named
// property stores.
Literal* lit = prop->key()->AsLiteral();
uint32_t ignored;
if (lit == NULL ||
!lit->handle()->IsSymbol() ||
String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
} else {
// This is a throw reference error.
BAILOUT("non-variable/non-property assignment");
}
VisitAsValue(expr->value());
expr->set_location(location_);
ProcessExpression(expr->value(), Expression::kValue);
}
@ -776,10 +923,9 @@ void CodeGenSelector::VisitThrow(Throw* expr) {
void CodeGenSelector::VisitProperty(Property* expr) {
VisitAsValue(expr->obj());
ProcessExpression(expr->obj(), Expression::kValue);
CHECK_BAILOUT;
VisitAsValue(expr->key());
expr->set_location(location_);
ProcessExpression(expr->key(), Expression::kValue);
}
@ -790,33 +936,45 @@ void CodeGenSelector::VisitCall(Call* expr) {
// Check for supported calls
if (var != NULL && var->is_possibly_eval()) {
BAILOUT("Call to a function named 'eval'");
BAILOUT("call to the identifier 'eval'");
} else if (var != NULL && !var->is_this() && var->is_global()) {
// ----------------------------------
// JavaScript example: 'foo(1, 2, 3)' // foo is global
// ----------------------------------
// Calls to global variables are supported.
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
BAILOUT("call to a lookup slot");
} else if (fun->AsProperty() != NULL) {
Property* prop = fun->AsProperty();
Literal* literal_key = prop->key()->AsLiteral();
if (literal_key != NULL && literal_key->handle()->IsSymbol()) {
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
} else {
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
} else {
BAILOUT("Call to a non-global function");
// Otherwise the call is supported if the function expression is.
ProcessExpression(fun, Expression::kValue);
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
// Check all arguments to the call.
for (int i = 0; i < args->length(); i++) {
VisitAsValue(args->at(i));
ProcessExpression(args->at(i), Expression::kValue);
CHECK_BAILOUT;
}
expr->set_location(location_);
}
void CodeGenSelector::VisitCallNew(CallNew* expr) {
VisitAsValue(expr->expression());
ProcessExpression(expr->expression(), Expression::kValue);
CHECK_BAILOUT;
ZoneList<Expression*>* args = expr->arguments();
// Check all arguments to the call
for (int i = 0; i < args->length(); i++) {
VisitAsValue(args->at(i));
ProcessExpression(args->at(i), Expression::kValue);
CHECK_BAILOUT;
}
expr->set_location(location_);
}
@ -830,37 +988,88 @@ void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
VisitAsValue(expr->arguments()->at(i));
ProcessExpression(expr->arguments()->at(i), Expression::kValue);
CHECK_BAILOUT;
}
expr->set_location(location_);
}
void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
BAILOUT("UnaryOperation");
switch (expr->op()) {
case Token::VOID:
ProcessExpression(expr->expression(), Expression::kEffect);
break;
case Token::NOT:
ProcessExpression(expr->expression(), Expression::kTest);
break;
case Token::TYPEOF:
ProcessExpression(expr->expression(), Expression::kValue);
break;
default:
BAILOUT("UnaryOperation");
}
}
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
BAILOUT("CountOperation");
// We support postfix count operations on global variables.
if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
ProcessExpression(expr->expression(), Expression::kValue);
}
void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
VisitAsEffect(expr->left());
ProcessExpression(expr->left(), Expression::kEffect);
CHECK_BAILOUT;
Visit(expr->right()); // Location is the same as the parent location.
ProcessExpression(expr->right(), context_);
break;
case Token::OR:
VisitAsValue(expr->left());
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect: // Fall through.
case Expression::kTest: // Fall through.
case Expression::kTestValue:
// The left subexpression's value is not needed, it is in a pure
// test context.
ProcessExpression(expr->left(), Expression::kTest);
break;
case Expression::kValue: // Fall through.
case Expression::kValueTest:
// The left subexpression's value is needed, it is in a hybrid
// value/test context.
ProcessExpression(expr->left(), Expression::kValueTest);
break;
}
CHECK_BAILOUT;
// The location for the right subexpression is the same as for the
// whole expression so we call Visit directly.
Visit(expr->right());
ProcessExpression(expr->right(), context_);
break;
case Token::AND:
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect: // Fall through.
case Expression::kTest: // Fall through.
case Expression::kValueTest:
// The left subexpression's value is not needed, it is in a pure
// test context.
ProcessExpression(expr->left(), Expression::kTest);
break;
case Expression::kValue: // Fall through.
case Expression::kTestValue:
// The left subexpression's value is needed, it is in a hybrid
// test/value context.
ProcessExpression(expr->left(), Expression::kTestValue);
break;
}
CHECK_BAILOUT;
ProcessExpression(expr->right(), context_);
break;
case Token::ADD:
@ -874,20 +1083,21 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SHL:
case Token::SHR:
case Token::SAR:
VisitAsValue(expr->left());
ProcessExpression(expr->left(), Expression::kValue);
CHECK_BAILOUT;
VisitAsValue(expr->right());
ProcessExpression(expr->right(), Expression::kValue);
break;
default:
BAILOUT("Unsupported binary operation");
}
expr->set_location(location_);
}
void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
BAILOUT("CompareOperation");
ProcessExpression(expr->left(), Expression::kValue);
CHECK_BAILOUT;
ProcessExpression(expr->right(), Expression::kValue);
}

13
deps/v8/src/compiler.h

@ -71,6 +71,19 @@ class Compiler : public AllStatic {
// true on success and false if the compilation resulted in a stack
// overflow.
static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting);
// Compile a function boilerplate object (the function is possibly
// lazily compiled). Called recursively from a backend code
// generator 'caller' to build the boilerplate.
static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node,
Handle<Script> script,
AstVisitor* caller);
// Set the function info for a newly compiled function.
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
};

31
deps/v8/src/conversions.cc

@ -50,7 +50,7 @@ int HexValue(uc32 c) {
// Provide a common interface to getting a character at a certain
// index from a char* or a String object.
static inline int GetChar(const char* str, int index) {
ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
ASSERT(index >= 0 && index < StrLength(str));
return str[index];
}
@ -61,7 +61,7 @@ static inline int GetChar(String* str, int index) {
static inline int GetLength(const char* str) {
return strlen(str);
return StrLength(str);
}
@ -101,7 +101,7 @@ static inline void ReleaseCString(String* original, const char* str) {
static inline bool IsSpace(const char* str, int index) {
ASSERT(index >= 0 && index < static_cast<int>(strlen(str)));
ASSERT(index >= 0 && index < StrLength(str));
return Scanner::kIsWhiteSpace.get(str[index]);
}
@ -121,13 +121,13 @@ static inline bool SubStringEquals(const char* str,
static inline bool SubStringEquals(String* str, int index, const char* other) {
HandleScope scope;
int str_length = str->length();
int other_length = strlen(other);
int other_length = StrLength(other);
int end = index + other_length < str_length ?
index + other_length :
str_length;
Handle<String> slice =
Factory::NewStringSlice(Handle<String>(str), index, end);
return slice->IsEqualTo(Vector<const char>(other, other_length));
Handle<String> substring =
Factory::NewSubString(Handle<String>(str), index, end);
return substring->IsEqualTo(Vector<const char>(other, other_length));
}
@ -319,7 +319,7 @@ static double InternalStringToDouble(S* str,
ReleaseCString(str, cstr);
if (result != 0.0 || end != cstr) {
// It appears that strtod worked
index += end - cstr;
index += static_cast<int>(end - cstr);
} else {
// Check for {+,-,}Infinity
bool is_negative = (GetChar(str, index) == '-');
@ -383,7 +383,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int sign;
char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL);
int length = strlen(decimal_rep);
int length = StrLength(decimal_rep);
if (sign) builder.AddCharacter('-');
@ -465,7 +465,7 @@ char* DoubleToFixedCString(double value, int f) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL);
int decimal_rep_length = strlen(decimal_rep);
int decimal_rep_length = StrLength(decimal_rep);
// Create a representation that is padded with zeros if needed.
int zero_prefix_length = 0;
@ -526,7 +526,8 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
if (significant_digits != 1) {
builder.AddCharacter('.');
builder.AddString(decimal_rep + 1);
builder.AddPadding('0', significant_digits - strlen(decimal_rep));
int rep_length = StrLength(decimal_rep);
builder.AddPadding('0', significant_digits - rep_length);
}
builder.AddCharacter('e');
@ -553,11 +554,11 @@ char* DoubleToExponentialCString(double value, int f) {
char* decimal_rep = NULL;
if (f == -1) {
decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL);
f = strlen(decimal_rep) - 1;
f = StrLength(decimal_rep) - 1;
} else {
decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL);
}
int decimal_rep_length = strlen(decimal_rep);
int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length > 0);
ASSERT(decimal_rep_length <= f + 1);
USE(decimal_rep_length);
@ -585,7 +586,7 @@ char* DoubleToPrecisionCString(double value, int p) {
int decimal_point;
int sign;
char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL);
int decimal_rep_length = strlen(decimal_rep);
int decimal_rep_length = StrLength(decimal_rep);
ASSERT(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@ -619,7 +620,7 @@ char* DoubleToPrecisionCString(double value, int p) {
builder.AddCharacter('.');
const int extra = negative ? 2 : 1;
if (decimal_rep_length > decimal_point) {
const int len = strlen(decimal_rep + decimal_point);
const int len = StrLength(decimal_rep + decimal_point);
const int n = Min(len, p - (builder.position() - extra));
builder.AddSubstring(decimal_rep + decimal_point, n);
}

10
deps/v8/src/debug-agent.cc

@ -105,7 +105,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
if (session_ != NULL) {
static const char* message = "Remote debugging session already active\r\n";
client->Send(message, strlen(message));
client->Send(message, StrLength(message));
delete client;
return;
}
@ -172,14 +172,15 @@ void DebuggerAgentSession::Run() {
}
// Convert UTF-8 to UTF-16.
unibrow::Utf8InputBuffer<> buf(*message, strlen(*message));
unibrow::Utf8InputBuffer<> buf(*message,
StrLength(*message));
int len = 0;
while (buf.has_more()) {
buf.GetNext();
len++;
}
int16_t* temp = NewArray<int16_t>(len + 1);
buf.Reset(*message, strlen(*message));
buf.Reset(*message, StrLength(*message));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
@ -203,7 +204,8 @@ void DebuggerAgentSession::Shutdown() {
const char* DebuggerAgentUtil::kContentLength = "Content-Length";
int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength);
int DebuggerAgentUtil::kContentLengthSize =
StrLength(kContentLength);
SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {

9
deps/v8/src/debug.cc

@ -108,12 +108,13 @@ void BreakLocationIterator::Next() {
// current value of these.
if (RelocInfo::IsPosition(rmode())) {
if (RelocInfo::IsStatementPosition(rmode())) {
statement_position_ =
rinfo()->data() - debug_info_->shared()->start_position();
statement_position_ = static_cast<int>(
rinfo()->data() - debug_info_->shared()->start_position());
}
// Always update the position as we don't want that to be before the
// statement position.
position_ = rinfo()->data() - debug_info_->shared()->start_position();
position_ = static_cast<int>(
rinfo()->data() - debug_info_->shared()->start_position());
ASSERT(position_ >= 0);
ASSERT(statement_position_ >= 0);
}
@ -182,7 +183,7 @@ void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
// Check if this break point is closer that what was previously found.
if (this->pc() < pc && pc - this->pc() < distance) {
closest_break_point = break_point();
distance = pc - this->pc();
distance = static_cast<int>(pc - this->pc());
// Check whether we can't get any closer.
if (distance == 0) break;
}

6
deps/v8/src/debug.h

@ -102,7 +102,9 @@ class BreakLocationIterator {
void ClearAllDebugBreak();
inline int code_position() { return pc() - debug_info_->code()->entry(); }
inline int code_position() {
return static_cast<int>(pc() - debug_info_->code()->entry());
}
inline int break_point() { return break_point_; }
inline int position() { return position_; }
inline int statement_position() { return statement_position_; }
@ -377,6 +379,8 @@ class Debug {
static const int kX64CallInstructionLength = 13;
static const int kX64JSReturnSequenceLength = 13;
static const int kARMJSReturnSequenceLength = 4;
// Code generator routines.
static void GenerateLoadICDebugBreak(MacroAssembler* masm);
static void GenerateStoreICDebugBreak(MacroAssembler* masm);

4
deps/v8/src/disassembler.cc

@ -74,7 +74,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
}
if (code_ != NULL) {
int offs = pc - code_->instruction_start();
int offs = static_cast<int>(pc - code_->instruction_start());
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_->instruction_size()) {
OS::SNPrintF(buffer, "%d (%p)", offs, pc);
@ -289,7 +289,7 @@ static int DecodeIt(FILE* f,
}
delete it;
return pc - begin;
return static_cast<int>(pc - begin);
}

39
deps/v8/src/execution.cc

@ -31,18 +31,8 @@
#include "api.h"
#include "codegen-inl.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#else
#error Unsupported target architecture.
#endif
#include "debug.h"
#include "simulator.h"
#include "v8threads.h"
namespace v8 {
@ -237,15 +227,14 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
// If the current limits are special (eg due to a pending interrupt) then
// leave them alone.
uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit);
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
thread_local_.jslimit_ = jslimit;
Heap::SetStackLimit(jslimit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
if (thread_local_.climit_ == thread_local_.real_climit_) {
thread_local_.climit_ = limit;
}
thread_local_.initial_climit_ = limit;
thread_local_.initial_jslimit_ = jslimit;
thread_local_.real_climit_ = limit;
thread_local_.real_jslimit_ = jslimit;
}
@ -354,7 +343,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
Heap::SetStackLimit(thread_local_.jslimit_);
Heap::SetStackLimits();
return from + sizeof(ThreadLocal);
}
@ -366,33 +355,33 @@ static internal::Thread::LocalStorageKey stack_limit_key =
void StackGuard::FreeThreadResources() {
Thread::SetThreadLocal(
stack_limit_key,
reinterpret_cast<void*>(thread_local_.initial_climit_));
reinterpret_cast<void*>(thread_local_.real_climit_));
}
void StackGuard::ThreadLocal::Clear() {
initial_jslimit_ = kIllegalLimit;
real_jslimit_ = kIllegalLimit;
jslimit_ = kIllegalLimit;
initial_climit_ = kIllegalLimit;
real_climit_ = kIllegalLimit;
climit_ = kIllegalLimit;
nesting_ = 0;
postpone_interrupts_nesting_ = 0;
interrupt_flags_ = 0;
Heap::SetStackLimit(kIllegalLimit);
Heap::SetStackLimits();
}
void StackGuard::ThreadLocal::Initialize() {
if (initial_climit_ == kIllegalLimit) {
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
initial_climit_ = limit;
real_climit_ = limit;
climit_ = limit;
Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit));
Heap::SetStackLimits();
}
nesting_ = 0;
postpone_interrupts_nesting_ = 0;

41
deps/v8/src/execution.h

@ -150,10 +150,6 @@ class StackGuard : public AllStatic {
// is assumed to grow downwards.
static void SetStackLimit(uintptr_t limit);
static Address address_of_jslimit() {
return reinterpret_cast<Address>(&thread_local_.jslimit_);
}
// Threading support.
static char* ArchiveStackGuard(char* to);
static char* RestoreStackGuard(char* from);
@ -181,16 +177,24 @@ class StackGuard : public AllStatic {
#endif
static void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limit for the current
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
static uintptr_t climit() {
return thread_local_.climit_;
}
static uintptr_t jslimit() {
return thread_local_.jslimit_;
}
static uintptr_t real_jslimit() {
return thread_local_.real_jslimit_;
}
static Address address_of_jslimit() {
return reinterpret_cast<Address>(&thread_local_.jslimit_);
}
static Address address_of_real_jslimit() {
return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
}
private:
// You should hold the ExecutionAccess lock when calling this method.
@ -198,17 +202,17 @@ class StackGuard : public AllStatic {
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
Heap::SetStackLimit(value);
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
Heap::SetStackLimits();
}
// Reset limits to initial values. For example after handling interrupt.
// Reset limits to actual values. For example after handling interrupt.
// You should hold the ExecutionAccess lock when calling this method.
static void reset_limits(const ExecutionAccess& lock) {
thread_local_.jslimit_ = thread_local_.initial_jslimit_;
Heap::SetStackLimit(thread_local_.jslimit_);
thread_local_.climit_ = thread_local_.initial_climit_;
thread_local_.jslimit_ = thread_local_.real_jslimit_;
thread_local_.climit_ = thread_local_.real_climit_;
Heap::SetStackLimits();
}
// Enable or disable interrupts.
@ -232,10 +236,21 @@ class StackGuard : public AllStatic {
// Clear.
void Initialize();
void Clear();
uintptr_t initial_jslimit_;
// The stack limit is split into a JavaScript and a C++ stack limit. These
// two are the same except when running on a simulator where the C++ and
// JavaScript stacks are separate. Each of the two stack limits have two
// values. The one eith the real_ prefix is the actual stack limit
// set for the VM. The one without the real_ prefix has the same value as
// the actual stack limit except when there is an interruption (e.g. debug
// break or preemption) in which case it is lowered to make stack checks
// fail. Both the generated code and the runtime system check against the
// one without the real_ prefix.
uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
uintptr_t jslimit_;
uintptr_t initial_climit_;
uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
uintptr_t climit_;
int nesting_;
int postpone_interrupts_nesting_;
int interrupt_flags_;

11
deps/v8/src/factory.cc

@ -106,10 +106,10 @@ Handle<String> Factory::NewConsString(Handle<String> first,
}
Handle<String> Factory::NewStringSlice(Handle<String> str,
int begin,
int end) {
CALL_HEAP_FUNCTION(str->Slice(begin, end), String);
Handle<String> Factory::NewSubString(Handle<String> str,
int begin,
int end) {
CALL_HEAP_FUNCTION(str->SubString(begin, end), String);
}
@ -188,7 +188,8 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value());
script->set_line_ends_fixed_array(Heap::undefined_value());
script->set_line_ends_js_array(Heap::undefined_value());
script->set_eval_from_function(Heap::undefined_value());
script->set_eval_from_instructions_offset(Smi::FromInt(0));

9
deps/v8/src/factory.h

@ -106,11 +106,10 @@ class Factory : public AllStatic {
static Handle<String> NewConsString(Handle<String> first,
Handle<String> second);
// Create a new sliced string object which represents a substring of a
// backing string.
static Handle<String> NewStringSlice(Handle<String> str,
int begin,
int end);
// Create a new string object which holds a substring of a string.
static Handle<String> NewSubString(Handle<String> str,
int begin,
int end);
// Creates a new external String object. There are two String encodings
// in the system: ASCII and two byte. Unlike other String types, it does

396
deps/v8/src/fast-codegen.cc

@ -28,6 +28,7 @@
#include "v8.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "fast-codegen.h"
#include "stub-cache.h"
#include "debug.h"
@ -35,6 +36,8 @@
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval) {
@ -71,119 +74,57 @@ int FastCodeGenerator::SlotOffset(Slot* slot) {
}
// All platform macro assemblers in {ia32,x64,arm} have a push(Register)
// function.
void FastCodeGenerator::Move(Location destination, Register source) {
switch (destination.type()) {
case Location::kUninitialized:
UNREACHABLE();
case Location::kEffect:
break;
case Location::kValue:
masm_->push(source);
break;
}
}
// All platform macro assemblers in {ia32,x64,arm} have a pop(Register)
// function.
void FastCodeGenerator::Move(Register destination, Location source) {
switch (source.type()) {
case Location::kUninitialized: // Fall through.
case Location::kEffect:
UNREACHABLE();
case Location::kValue:
masm_->pop(destination);
}
}
void FastCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
Slot* slot = var->slot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
UNREACHABLE();
VisitDeclaration(decl);
} else {
// Count global variables and functions for later processing
globals++;
}
}
// Return in case of no declared global functions or variables.
if (globals == 0) return;
// Compute array of global variable and function declarations.
Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->slot();
if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
array->set(j++, *(var->name()));
if (node->fun() == NULL) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
// Do nothing in case of no declared global functions or variables.
if (globals > 0) {
Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* decl = declarations->at(i);
Variable* var = decl->proxy()->var();
Slot* slot = var->slot();
if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
array->set(j++, *(var->name()));
if (decl->fun() == NULL) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
array->set_undefined(j++);
Handle<JSFunction> function =
Compiler::BuildBoilerplate(decl->fun(), script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
}
} else {
Handle<JSFunction> function = BuildBoilerplate(node->fun());
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
array->set(j++, *function);
}
}
// Invoke the platform-dependent code generator to do the actual
// declaration the global variables and functions.
DeclareGlobals(array);
}
// Invoke the platform-dependent code generator to do the actual
// declaration the global variables and functions.
DeclareGlobals(array);
}
Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
#ifdef DEBUG
// We should not try to compile the same function literal more than
// once.
fun->mark_as_compiled();
#endif
// Generate code
Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
// Check for stack-overflow exception.
if (code.is_null()) {
SetStackOverflow();
return Handle<JSFunction>::null();
}
// Create a boilerplate function.
Handle<JSFunction> function =
Factory::NewFunctionBoilerplate(fun->name(),
fun->materialized_literal_count(),
code);
CodeGenerator::SetFunctionInfo(function, fun, false, script_);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return
// the resulting function.
SetExpectedNofPropertiesFromEstimate(function,
fun->expected_property_count());
return function;
}
@ -215,8 +156,77 @@ void FastCodeGenerator::SetSourcePosition(int pos) {
}
void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
#ifdef DEBUG
Expression::Context expected = Expression::kUninitialized;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect: // Fall through.
case Expression::kTest:
// The value of the left subexpression is not needed.
expected = Expression::kTest;
break;
case Expression::kValue:
// The value of the left subexpression is needed and its specific
// context depends on the operator.
expected = (expr->op() == Token::OR)
? Expression::kValueTest
: Expression::kTestValue;
break;
case Expression::kValueTest:
// The value of the left subexpression is needed for OR.
expected = (expr->op() == Token::OR)
? Expression::kValueTest
: Expression::kTest;
break;
case Expression::kTestValue:
// The value of the left subexpression is needed for AND.
expected = (expr->op() == Token::OR)
? Expression::kTest
: Expression::kTestValue;
break;
}
ASSERT_EQ(expected, expr->left()->context());
ASSERT_EQ(expr->context(), expr->right()->context());
#endif
Label eval_right, done;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
// Set up the appropriate context for the left subexpression based on the
// operation and our own context.
if (expr->op() == Token::OR) {
// If there is no usable true label in the OR expression's context, use
// the end of this expression, otherwise inherit the same true label.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
true_label_ = &done;
}
// The false label is the label of the second subexpression.
false_label_ = &eval_right;
} else {
ASSERT_EQ(Token::AND, expr->op());
// The true label is the label of the second subexpression.
true_label_ = &eval_right;
// If there is no usable false label in the AND expression's context,
// use the end of the expression, otherwise inherit the same false
// label.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
false_label_ = &done;
}
}
Visit(expr->left());
true_label_ = saved_true;
false_label_ = saved_false;
__ bind(&eval_right);
Visit(expr->right());
__ bind(&done);
}
@ -241,7 +251,29 @@ void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ IfStatement");
// Expressions cannot recursively enter statements, there are no labels in
// the state.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
Label then_part, else_part, done;
// Do not worry about optimizing for empty then or else bodies.
true_label_ = &then_part;
false_label_ = &else_part;
ASSERT(stmt->condition()->context() == Expression::kTest);
Visit(stmt->condition());
true_label_ = NULL;
false_label_ = NULL;
__ bind(&then_part);
Visit(stmt->then_statement());
__ jmp(&done);
__ bind(&else_part);
Visit(stmt->else_statement());
__ bind(&done);
}
@ -271,17 +303,91 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ DoWhileStatement");
increment_loop_depth();
Label body, exit;
// Emit the test at the bottom of the loop.
__ bind(&body);
Visit(stmt->body());
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
__ bind(&exit);
decrement_loop_depth();
}
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ WhileStatement");
increment_loop_depth();
Label test, body, exit;
// Emit the test at the bottom of the loop.
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
__ bind(&test);
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
__ bind(&exit);
decrement_loop_depth();
}
void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ ForStatement");
Label test, body, exit;
if (stmt->init() != NULL) Visit(stmt->init());
increment_loop_depth();
// Emit the test at the bottom of the loop (even if empty).
__ jmp(&test);
__ bind(&body);
Visit(stmt->body());
if (stmt->next() != NULL) Visit(stmt->next());
__ bind(&test);
if (stmt->cond() == NULL) {
// For an empty test jump to the top of the loop.
__ jmp(&body);
} else {
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = &exit;
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
}
__ bind(&exit);
decrement_loop_depth();
}
@ -301,7 +407,12 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNREACHABLE();
#ifdef ENABLE_DEBUGGER_SUPPORT
Comment cmnt(masm_, "[ DebuggerStatement");
SetStatementPosition(stmt);
__ CallRuntime(Runtime::kDebugBreak, 0);
// Ignore the return value.
#endif
}
@ -312,7 +423,37 @@ void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
void FastCodeGenerator::VisitConditional(Conditional* expr) {
UNREACHABLE();
Comment cmnt(masm_, "[ Conditional");
ASSERT_EQ(Expression::kTest, expr->condition()->context());
ASSERT_EQ(expr->context(), expr->then_expression()->context());
ASSERT_EQ(expr->context(), expr->else_expression()->context());
Label true_case, false_case, done;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
true_label_ = &true_case;
false_label_ = &false_case;
Visit(expr->condition());
true_label_ = saved_true;
false_label_ = saved_false;
__ bind(&true_case);
Visit(expr->then_expression());
// If control flow falls through Visit, jump to done.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
__ jmp(&done);
}
__ bind(&false_case);
Visit(expr->else_expression());
// If control flow falls through Visit, merge it with true case here.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
__ bind(&done);
}
}
@ -323,7 +464,48 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitLiteral(Literal* expr) {
Move(expr->location(), expr);
Comment cmnt(masm_, "[ Literal");
Move(expr->context(), expr);
}
void FastCodeGenerator::VisitAssignment(Assignment* expr) {
Comment cmnt(masm_, "[ Assignment");
ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
// Record source code position of the (possible) IC call.
SetSourcePosition(expr->position());
Expression* rhs = expr->value();
// Left-hand side can only be a property, a global or a (parameter or
// local) slot.
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
if (var != NULL) {
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitVariableAssignment(expr);
} else if (prop != NULL) {
// Assignment to a property.
Visit(prop->obj());
ASSERT_EQ(Expression::kValue, prop->obj()->context());
// Use the expression context of the key subexpression to detect whether
// we have decided to us a named or keyed IC.
if (prop->key()->context() == Expression::kUninitialized) {
ASSERT(prop->key()->AsLiteral() != NULL);
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitNamedPropertyAssignment(expr);
} else {
Visit(prop->key());
ASSERT_EQ(Expression::kValue, prop->key()->context());
Visit(rhs);
ASSERT_EQ(Expression::kValue, rhs->context());
EmitKeyedPropertyAssignment(expr);
}
} else {
UNREACHABLE();
}
}
@ -337,24 +519,12 @@ void FastCodeGenerator::VisitThrow(Throw* expr) {
}
void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
#undef __
} } // namespace v8::internal

57
deps/v8/src/fast-codegen.h

@ -39,7 +39,13 @@ namespace internal {
class FastCodeGenerator: public AstVisitor {
public:
FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
: masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
: masm_(masm),
function_(NULL),
script_(script),
is_eval_(is_eval),
loop_depth_(0),
true_label_(NULL),
false_label_(NULL) {
}
static Handle<Code> MakeCode(FunctionLiteral* fun,
@ -51,26 +57,54 @@ class FastCodeGenerator: public AstVisitor {
private:
int SlotOffset(Slot* slot);
void Move(Location destination, Register source);
void Move(Location destination, Slot* source);
void Move(Location destination, Literal* source);
void Move(Register destination, Location source);
void Move(Slot* destination, Location source);
void Move(Expression::Context destination, Register source);
void Move(Expression::Context destination, Slot* source);
void Move(Expression::Context destination, Literal* source);
// Drop the TOS, and store source to destination.
// If destination is TOS, just overwrite TOS with source.
void DropAndMove(Location destination, Register source);
void DropAndMove(Expression::Context destination, Register source);
// Test the JavaScript value in source as if in a test context, compile
// control flow to a pair of labels.
void TestAndBranch(Register source, Label* true_label, Label* false_label);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
void DeclareGlobals(Handle<FixedArray> pairs);
// Platform-specific return sequence
void EmitReturnSequence(int position);
// Platform-specific code sequences for calls
void EmitCallWithStub(Call* expr);
void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info);
// Platform-specific support for compiling assignments.
// Complete a variable assignment. The right-hand-side value is expected
// on top of the stack.
void EmitVariableAssignment(Assignment* expr);
// Complete a named property assignment. The receiver and right-hand-side
// value are expected on top of the stack.
void EmitNamedPropertyAssignment(Assignment* expr);
// Complete a keyed property assignment. The reciever, key, and
// right-hand-side value are expected on top of the stack.
void EmitKeyedPropertyAssignment(Assignment* expr);
void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt);
void SetSourcePosition(int pos);
int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; }
void decrement_loop_depth() {
ASSERT(loop_depth_ > 0);
loop_depth_--;
}
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
@ -83,6 +117,11 @@ class FastCodeGenerator: public AstVisitor {
FunctionLiteral* function_;
Handle<Script> script_;
bool is_eval_;
Label return_label_;
int loop_depth_;
Label* true_label_;
Label* false_label_;
DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
};

3
deps/v8/src/flag-definitions.h

@ -114,6 +114,8 @@ DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@ -196,6 +198,7 @@ DEFINE_bool(canonicalize_object_literal_maps, true,
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")

4
deps/v8/src/flags.cc

@ -303,8 +303,8 @@ static void SplitArgument(const char* arg,
// get the value if any
if (*arg == '=') {
// make a copy so we can NUL-terminate flag name
int n = arg - *name;
CHECK(n < buffer_size); // buffer is too small
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
memcpy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;

22
deps/v8/src/frames.cc

@ -393,8 +393,19 @@ Code* EntryConstructFrame::code() const {
}
Object*& ExitFrame::code_slot() const {
const int offset = ExitFrameConstants::kCodeOffset;
return Memory::Object_at(fp() + offset);
}
Code* ExitFrame::code() const {
return Heap::c_entry_code();
Object* code = code_slot();
if (code->IsSmi()) {
return Heap::c_entry_debug_break_code();
} else {
return Code::cast(code);
}
}
@ -412,11 +423,6 @@ Address ExitFrame::GetCallerStackPointer() const {
}
Code* ExitDebugFrame::code() const {
return Heap::c_entry_debug_break_code();
}
Address StandardFrame::GetExpressionAddress(int n) const {
const int offset = StandardFrameConstants::kExpressionsOffset;
return fp() + offset - n * kPointerSize;
@ -430,7 +436,7 @@ int StandardFrame::ComputeExpressionsCount() const {
Address limit = sp();
ASSERT(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
return (base - limit) / kPointerSize;
return static_cast<int>((base - limit) / kPointerSize);
}
@ -460,7 +466,7 @@ Object* JavaScriptFrame::GetParameter(int index) const {
int JavaScriptFrame::ComputeParametersCount() const {
Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
return (base - limit) / kPointerSize;
return static_cast<int>((base - limit) / kPointerSize);
}

25
deps/v8/src/frames.h

@ -93,7 +93,6 @@ class StackHandler BASE_EMBEDDED {
V(ENTRY, EntryFrame) \
V(ENTRY_CONSTRUCT, EntryConstructFrame) \
V(EXIT, ExitFrame) \
V(EXIT_DEBUG, ExitDebugFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
@ -119,7 +118,6 @@ class StackFrame BASE_EMBEDDED {
bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; }
bool is_exit_debug() const { return type() == EXIT_DEBUG; }
bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
@ -260,10 +258,13 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame {
public:
enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; }
virtual Code* code() const;
Object*& code_slot() const;
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
@ -289,26 +290,6 @@ class ExitFrame: public StackFrame {
};
class ExitDebugFrame: public ExitFrame {
public:
virtual Type type() const { return EXIT_DEBUG; }
virtual Code* code() const;
static ExitDebugFrame* cast(StackFrame* frame) {
ASSERT(frame->is_exit_debug());
return static_cast<ExitDebugFrame*>(frame);
}
protected:
explicit ExitDebugFrame(StackFrameIterator* iterator)
: ExitFrame(iterator) { }
private:
friend class StackFrameIterator;
};
class StandardFrame: public StackFrame {
public:
// Testers.

18
deps/v8/src/global-handles.cc

@ -165,6 +165,9 @@ class GlobalHandles::Node : public Malloced {
// It's fine though to reuse nodes that were destroyed in weak callback
// as those cannot be deallocated until we are back from the callback.
set_first_free(NULL);
if (first_deallocated()) {
first_deallocated()->set_next(head());
}
// Leaving V8.
VMState state(EXTERNAL);
func(object, par);
@ -270,6 +273,7 @@ Handle<Object> GlobalHandles::Create(Object* value) {
// Next try deallocated list
result = first_deallocated();
set_first_deallocated(result->next_free());
ASSERT(result->next() == head());
set_head(result);
} else {
// Allocate a new node.
@ -390,8 +394,8 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
}
void GlobalHandles::IterateRoots(ObjectVisitor* v) {
// Traversal of global handles marked as NORMAL or NEAR_DEATH.
void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
// Traversal of global handles marked as NORMAL.
for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ == Node::NORMAL) {
v->VisitPointer(&current->object_);
@ -399,6 +403,16 @@ void GlobalHandles::IterateRoots(ObjectVisitor* v) {
}
}
void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ != Node::DESTROYED) {
v->VisitPointer(&current->object_);
}
}
}
void GlobalHandles::TearDown() {
// Reset all the lists.
set_head(NULL);

8
deps/v8/src/global-handles.h

@ -48,7 +48,8 @@ namespace internal {
class ObjectGroup : public Malloced {
public:
ObjectGroup() : objects_(4) {}
explicit ObjectGroup(size_t capacity) : objects_(capacity) {}
explicit ObjectGroup(size_t capacity)
: objects_(static_cast<int>(capacity)) { }
List<Object**> objects_;
};
@ -95,8 +96,11 @@ class GlobalHandles : public AllStatic {
// Process pending weak handles.
static void PostGarbageCollectionProcessing();
// Iterates over all strong handles.
static void IterateStrongRoots(ObjectVisitor* v);
// Iterates over all handles.
static void IterateRoots(ObjectVisitor* v);
static void IterateAllRoots(ObjectVisitor* v);
// Iterates over all weak roots in heap.
static void IterateWeakRoots(ObjectVisitor* v);

18
deps/v8/src/globals.h

@ -103,6 +103,10 @@ typedef byte* Address;
#define V8PRIxPTR "lx"
#endif
#if defined(__APPLE__) && defined(__MACH__)
#define USING_MAC_ABI
#endif
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
@ -248,7 +252,6 @@ class Variable;
class VariableProxy;
class RelocInfo;
class Deserializer;
class GenericDeserializer; // TODO(erikcorry): Get rid of this.
class MessageLocation;
class ObjectGroup;
class TickSample;
@ -291,6 +294,8 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG };
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
@ -570,6 +575,17 @@ inline Dest bit_cast(const Source& source) {
}
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
enum CpuFeature { SSE3 = 32, // x86
SSE2 = 26, // x86
CMOV = 15, // x86
RDTSC = 4, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
SAHF = 0}; // x86
} } // namespace v8::internal
#endif // V8_GLOBALS_H_

61
deps/v8/src/handles.cc

@ -37,6 +37,7 @@
#include "global-handles.h"
#include "natives.h"
#include "runtime.h"
#include "stub-cache.h"
namespace v8 {
namespace internal {
@ -49,8 +50,8 @@ v8::ImplementationUtilities::HandleScopeData HandleScope::current_ =
int HandleScope::NumberOfHandles() {
int n = HandleScopeImplementer::instance()->blocks()->length();
if (n == 0) return 0;
return ((n - 1) * kHandleBlockSize) +
(current_.next - HandleScopeImplementer::instance()->blocks()->last());
return ((n - 1) * kHandleBlockSize) + static_cast<int>(
(current_.next - HandleScopeImplementer::instance()->blocks()->last()));
}
@ -105,6 +106,21 @@ void HandleScope::ZapRange(Object** start, Object** end) {
}
Address HandleScope::current_extensions_address() {
return reinterpret_cast<Address>(&current_.extensions);
}
Address HandleScope::current_next_address() {
return reinterpret_cast<Address>(&current_.next);
}
Address HandleScope::current_limit_address() {
return reinterpret_cast<Address>(&current_.limit);
}
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
Handle<JSArray> array) {
CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
@ -285,7 +301,9 @@ Handle<Object> GetPrototype(Handle<Object> obj) {
Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
bool create_if_needed) {
Handle<String> key = Factory::hidden_symbol();
Object* holder = obj->BypassGlobalProxy();
if (holder->IsUndefined()) return Factory::undefined_value();
obj = Handle<JSObject>(JSObject::cast(holder));
if (obj->HasFastProperties()) {
// If the object has fast properties, check whether the first slot
@ -294,7 +312,7 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = obj->map()->instance_descriptors();
if ((descriptors->number_of_descriptors() > 0) &&
(descriptors->GetKey(0) == *key) &&
(descriptors->GetKey(0) == Heap::hidden_symbol()) &&
descriptors->IsProperty(0)) {
ASSERT(descriptors->GetType(0) == FIELD);
return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
@ -304,17 +322,17 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
// Only attempt to find the hidden properties in the local object and not
// in the prototype chain. Note that HasLocalProperty() can cause a GC in
// the general case in the presence of interceptors.
if (!obj->HasLocalProperty(*key)) {
if (!obj->HasHiddenPropertiesObject()) {
// Hidden properties object not found. Allocate a new hidden properties
// object if requested. Otherwise return the undefined value.
if (create_if_needed) {
Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function());
return SetProperty(obj, key, hidden_obj, DONT_ENUM);
CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object);
} else {
return Factory::undefined_value();
}
}
return GetProperty(obj, key);
return Handle<Object>(obj->GetHiddenPropertiesObject());
}
@ -338,7 +356,7 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
Handle<String> SubString(Handle<String> str, int start, int end) {
CALL_HEAP_FUNCTION(str->Slice(start, end), String);
CALL_HEAP_FUNCTION(str->SubString(start, end), String);
}
@ -411,12 +429,12 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
// Init line_ends array with code positions of line ends inside script
// source.
void InitScriptLineEnds(Handle<Script> script) {
if (!script->line_ends()->IsUndefined()) return;
if (!script->line_ends_fixed_array()->IsUndefined()) return;
if (!script->source()->IsString()) {
ASSERT(script->source()->IsUndefined());
script->set_line_ends(*(Factory::NewJSArray(0)));
ASSERT(script->line_ends()->IsJSArray());
script->set_line_ends_fixed_array(*(Factory::NewFixedArray(0)));
ASSERT(script->line_ends_fixed_array()->IsFixedArray());
return;
}
@ -449,9 +467,8 @@ void InitScriptLineEnds(Handle<Script> script) {
}
ASSERT(array_index == line_count);
Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
script->set_line_ends(*object);
ASSERT(script->line_ends()->IsJSArray());
script->set_line_ends_fixed_array(*array);
ASSERT(script->line_ends_fixed_array()->IsFixedArray());
}
@ -459,17 +476,18 @@ void InitScriptLineEnds(Handle<Script> script) {
int GetScriptLineNumber(Handle<Script> script, int code_pos) {
InitScriptLineEnds(script);
AssertNoAllocation no_allocation;
JSArray* line_ends_array = JSArray::cast(script->line_ends());
const int line_ends_len = (Smi::cast(line_ends_array->length()))->value();
FixedArray* line_ends_array =
FixedArray::cast(script->line_ends_fixed_array());
const int line_ends_len = line_ends_array->length();
int line = -1;
if (line_ends_len > 0 &&
code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) {
code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) {
line = 0;
} else {
for (int i = 1; i < line_ends_len; ++i) {
if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos &&
code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) {
if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos &&
code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) {
line = i;
break;
}
@ -672,6 +690,11 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
}
Handle<Code> ComputeLazyCompile(int argc) {
CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
}
OptimizedObjectForAddingMultipleProperties::
~OptimizedObjectForAddingMultipleProperties() {
// Reoptimize the object to allow fast property access.

17
deps/v8/src/handles.h

@ -133,6 +133,13 @@ class HandleScope {
return result;
}
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
static Address current_extensions_address();
static Address current_next_address();
static Address current_limit_address();
private:
// Prevent heap allocation or illegal handle scopes.
HandleScope(const HandleScope&);
@ -166,9 +173,6 @@ class HandleScope {
// Extend the handle scope making room for more handles.
static internal::Object** Extend();
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
@ -304,8 +308,8 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
Handle<Object> prototype);
// Do lazy compilation of the given function. Returns true on success
// and false if the compilation resulted in a stack overflow.
// Does lazy compilation of the given function. Returns true on success and
// false if the compilation resulted in a stack overflow.
enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
@ -315,6 +319,9 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
// Returns the lazy compilation stub for argc arguments.
Handle<Code> ComputeLazyCompile(int argc);
// These deal with lazily loaded properties.
void SetupLazy(Handle<JSObject> obj,
int index,

2
deps/v8/src/heap-profiler.cc

@ -536,7 +536,7 @@ RetainerHeapProfile::RetainerHeapProfile()
: zscope_(DELETE_ON_EXIT) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
}

158
deps/v8/src/heap.cc

@ -733,10 +733,7 @@ void Heap::Scavenge() {
ScavengeVisitor scavenge_visitor;
// Copy roots.
IterateRoots(&scavenge_visitor);
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
IterateRoots(&scavenge_visitor, VISIT_ALL);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
@ -1730,6 +1727,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@ -1766,10 +1764,14 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
Object* Heap::AllocateConsString(String* first, String* second) {
int first_length = first->length();
if (first_length == 0) return second;
if (first_length == 0) {
return second;
}
int second_length = second->length();
if (second_length == 0) return first;
if (second_length == 0) {
return first;
}
int length = first_length + second_length;
bool is_ascii = first->IsAsciiRepresentation()
@ -1821,54 +1823,18 @@ Object* Heap::AllocateConsString(String* first, String* second) {
: long_cons_string_map();
}
Object* result = Allocate(map, NEW_SPACE);
Object* result = Allocate(map,
always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ASSERT(InNewSpace(result));
ConsString* cons_string = ConsString::cast(result);
cons_string->set_first(first, SKIP_WRITE_BARRIER);
cons_string->set_second(second, SKIP_WRITE_BARRIER);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
cons_string->set_first(first, mode);
cons_string->set_second(second, mode);
cons_string->set_length(length);
return result;
}
Object* Heap::AllocateSlicedString(String* buffer,
int start,
int end) {
int length = end - start;
// If the resulting string is small make a sub string.
if (length <= String::kMinNonFlatLength) {
return Heap::AllocateSubString(buffer, start, end);
}
Map* map;
if (length <= String::kMaxShortSize) {
map = buffer->IsAsciiRepresentation() ?
short_sliced_ascii_string_map() :
short_sliced_string_map();
} else if (length <= String::kMaxMediumSize) {
map = buffer->IsAsciiRepresentation() ?
medium_sliced_ascii_string_map() :
medium_sliced_string_map();
} else {
map = buffer->IsAsciiRepresentation() ?
long_sliced_ascii_string_map() :
long_sliced_string_map();
}
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
SlicedString* sliced_string = SlicedString::cast(result);
sliced_string->set_buffer(buffer);
sliced_string->set_start(start);
sliced_string->set_length(length);
return result;
}
Object* Heap::AllocateSubString(String* buffer,
int start,
int end) {
@ -1888,22 +1854,19 @@ Object* Heap::AllocateSubString(String* buffer,
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
if (result->IsFailure()) return result;
String* string_result = String::cast(result);
// Copy the characters into the new object.
String* string_result = String::cast(result);
StringHasher hasher(length);
int i = 0;
for (; i < length && hasher.is_array_index(); i++) {
uc32 c = buffer->Get(start + i);
hasher.AddCharacter(c);
string_result->Set(i, c);
}
for (; i < length; i++) {
uc32 c = buffer->Get(start + i);
hasher.AddCharacterNoIndex(c);
string_result->Set(i, c);
if (buffer->IsAsciiRepresentation()) {
ASSERT(string_result->IsAsciiRepresentation());
char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
}
string_result->set_length_field(hasher.GetHashField());
return result;
}
@ -1911,20 +1874,24 @@ Object* Heap::AllocateSubString(String* buffer,
Object* Heap::AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource) {
Map* map;
int length = resource->length();
if (length <= String::kMaxShortSize) {
size_t length = resource->length();
if (length <= static_cast<size_t>(String::kMaxShortSize)) {
map = short_external_ascii_string_map();
} else if (length <= String::kMaxMediumSize) {
} else if (length <= static_cast<size_t>(String::kMaxMediumSize)) {
map = medium_external_ascii_string_map();
} else {
} else if (length <= static_cast<size_t>(String::kMaxLength)) {
map = long_external_ascii_string_map();
} else {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
Object* result = Allocate(map, NEW_SPACE);
Object* result = Allocate(map,
always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
external_string->set_length(length);
external_string->set_length(static_cast<int>(length));
external_string->set_resource(resource);
return result;
@ -1933,14 +1900,18 @@ Object* Heap::AllocateExternalStringFromAscii(
Object* Heap::AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource) {
int length = resource->length();
Map* map = ExternalTwoByteString::StringMap(length);
Object* result = Allocate(map, NEW_SPACE);
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
Map* map = ExternalTwoByteString::StringMap(static_cast<int>(length));
Object* result = Allocate(map,
always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
external_string->set_length(length);
external_string->set_length(static_cast<int>(length));
external_string->set_resource(resource);
return result;
@ -2321,6 +2292,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@ -2603,20 +2575,6 @@ Map* Heap::SymbolMapForString(String* string) {
return long_cons_ascii_symbol_map();
}
if (map == short_sliced_string_map()) return short_sliced_symbol_map();
if (map == medium_sliced_string_map()) return medium_sliced_symbol_map();
if (map == long_sliced_string_map()) return long_sliced_symbol_map();
if (map == short_sliced_ascii_string_map()) {
return short_sliced_ascii_symbol_map();
}
if (map == medium_sliced_ascii_string_map()) {
return medium_sliced_ascii_symbol_map();
}
if (map == long_sliced_ascii_string_map()) {
return long_sliced_ascii_symbol_map();
}
if (map == short_external_string_map()) {
return short_external_symbol_map();
}
@ -3117,7 +3075,7 @@ void Heap::Verify() {
ASSERT(HasBeenSetup());
VerifyPointersVisitor visitor;
IterateRoots(&visitor);
IterateRoots(&visitor, VISIT_ONLY_STRONG);
new_space_.Verify();
@ -3244,14 +3202,14 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
}
void Heap::IterateRoots(ObjectVisitor* v) {
IterateStrongRoots(v);
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
}
void Heap::IterateStrongRoots(ObjectVisitor* v) {
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize("strong_root_list");
@ -3284,7 +3242,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) {
v->Synchronize("builtins");
// Iterate over global handles.
GlobalHandles::IterateRoots(v);
if (mode == VISIT_ONLY_STRONG) {
GlobalHandles::IterateStrongRoots(v);
} else {
GlobalHandles::IterateAllRoots(v);
}
v->Synchronize("globalhandles");
// Iterate over pointers being held by inactive threads.
@ -3455,14 +3417,18 @@ bool Heap::Setup(bool create_heap_objects) {
}
void Heap::SetStackLimit(intptr_t limit) {
void Heap::SetStackLimits() {
// On 64 bit machines, pointers are generally out of range of Smis. We write
// something that looks like an out of range Smi to the GC.
// Set up the special root array entry containing the stack guard.
// This is actually an address, but the tag makes the GC ignore it.
// Set up the special root array entries containing the stack limits.
// These are actually addresses, but the tag makes the GC ignore it.
roots_[kStackLimitRootIndex] =
reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
reinterpret_cast<Object*>(
(StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
roots_[kRealStackLimitRootIndex] =
reinterpret_cast<Object*>(
(StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
}
@ -3889,7 +3855,7 @@ void Heap::TracePathToObject() {
search_for_any_global = false;
MarkRootVisitor root_visitor;
IterateRoots(&root_visitor);
IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
@ -3901,7 +3867,7 @@ void Heap::TracePathToGlobal() {
search_for_any_global = true;
MarkRootVisitor root_visitor;
IterateRoots(&root_visitor);
IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
}
#endif

35
deps/v8/src/heap.h

@ -77,12 +77,6 @@ namespace internal {
V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \
V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \
V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \
V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \
V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \
V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \
V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \
V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \
V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \
V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \
V(Map, long_external_symbol_map, LongExternalSymbolMap) \
@ -95,12 +89,6 @@ namespace internal {
V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \
V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \
V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \
V(Map, short_sliced_string_map, ShortSlicedStringMap) \
V(Map, medium_sliced_string_map, MediumSlicedStringMap) \
V(Map, long_sliced_string_map, LongSlicedStringMap) \
V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \
V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \
V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, medium_external_string_map, MediumExternalStringMap) \
V(Map, long_external_string_map, LongExternalStringMap) \
@ -148,6 +136,7 @@ namespace internal {
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Object, last_script_id, LastScriptId) \
V(Smi, real_stack_limit, RealStackLimit) \
#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
#define STRONG_ROOT_LIST(V) \
@ -250,10 +239,10 @@ class Heap : public AllStatic {
// Destroys all memory allocated by the heap.
static void TearDown();
// Sets the stack limit in the roots_ array. Some architectures generate code
// that looks here, because it is faster than loading from the static jslimit_
// variable.
static void SetStackLimit(intptr_t limit);
// Set the stack limit in the roots_ array. Some architectures generate
// code that looks here, because it is faster than loading from the static
// jslimit_/real_jslimit_ variable in the StackGuard.
static void SetStackLimits();
// Returns whether Setup has been called.
static bool HasBeenSetup();
@ -586,16 +575,6 @@ class Heap : public AllStatic {
// Please note this does not perform a garbage collection.
static Object* AllocateConsString(String* first, String* second);
// Allocates a new sliced string object which is a slice of an underlying
// string buffer stretching from the index start (inclusive) to the index
// end (exclusive).
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
static Object* AllocateSlicedString(String* buffer,
int start,
int end);
// Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index
// end (exclusive).
@ -729,9 +708,9 @@ class Heap : public AllStatic {
static String* hidden_symbol() { return hidden_symbol_; }
// Iterates over all roots in the heap.
static void IterateRoots(ObjectVisitor* v);
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
static void IterateStrongRoots(ObjectVisitor* v);
static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);

2
deps/v8/src/ia32/assembler-ia32-inl.h

@ -89,7 +89,7 @@ Object* RelocInfo::target_object() {
}
Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(pc_);
}

80
deps/v8/src/ia32/assembler-ia32.cc

@ -49,6 +49,7 @@ namespace internal {
// Safe default is no features.
uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::enabled_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
// The Probe method needs executable memory, so it uses Heap::CreateCode.
@ -56,7 +57,10 @@ uint64_t CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
ASSERT(Heap::HasBeenSetup());
ASSERT(supported_ == 0);
if (Serializer::enabled()) return; // No features if we might serialize.
if (Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
return; // No features if we might serialize.
}
Assembler assm(NULL, 0);
Label cpuid, done;
@ -124,6 +128,10 @@ void CpuFeatures::Probe() {
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();
found_by_runtime_probing_ = supported_;
uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
supported_ |= os_guarantees;
found_by_runtime_probing_ &= ~os_guarantees;
}
@ -360,7 +368,7 @@ void Assembler::Align(int m) {
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@ -712,7 +720,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) {
void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@ -723,7 +731,7 @@ void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
UNIMPLEMENTED();
@ -734,7 +742,7 @@ void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV));
ASSERT(CpuFeatures::IsEnabled(CMOV));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Opcode: 0f 40 + cc /r
@ -1083,7 +1091,7 @@ void Assembler::sar(Register dst, uint8_t imm8) {
}
void Assembler::sar(Register dst) {
void Assembler::sar_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@ -1123,7 +1131,7 @@ void Assembler::shl(Register dst, uint8_t imm8) {
}
void Assembler::shl(Register dst) {
void Assembler::shl_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
@ -1144,24 +1152,21 @@ void Assembler::shr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
ASSERT(is_uint5(imm8)); // illegal shift count
EMIT(0xC1);
EMIT(0xE8 | dst.code());
EMIT(imm8);
}
void Assembler::shr(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD3);
EMIT(0xE8 | dst.code());
if (imm8 == 1) {
EMIT(0xD1);
EMIT(0xE8 | dst.code());
} else {
EMIT(0xC1);
EMIT(0xE8 | dst.code());
EMIT(imm8);
}
}
void Assembler::shr_cl(Register dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xD1);
EMIT(0xD3);
EMIT(0xE8 | dst.code());
}
@ -1316,7 +1321,7 @@ void Assembler::nop() {
void Assembler::rdtsc() {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC));
ASSERT(CpuFeatures::IsEnabled(RDTSC));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x0F);
@ -1662,7 +1667,7 @@ void Assembler::fistp_s(const Operand& adr) {
void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDB);
@ -1923,7 +1928,7 @@ void Assembler::setcc(Condition cc, Register reg) {
void Assembler::cvttss2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
@ -1934,7 +1939,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) {
void Assembler::cvttsd2si(Register dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -1945,7 +1950,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) {
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -1956,7 +1961,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -1967,7 +1972,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -1978,7 +1983,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -1989,7 +1994,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) {
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2);
@ -2000,7 +2005,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
void Assembler::comisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
@ -2025,7 +2030,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) {
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@ -2036,7 +2041,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) {
void Assembler::movsd(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2));
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF2); // double
@ -2245,10 +2250,15 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
ASSERT(rmode != RelocInfo::NONE);
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!Serializer::enabled() &&
!FLAG_debug_code) {
return;
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif
if (!Serializer::enabled() && !FLAG_debug_code) {
return;
}
}
RelocInfo rinfo(pc_, rmode, data);
reloc_info_writer.Write(&rinfo);

33
deps/v8/src/ia32/assembler-ia32.h

@ -37,6 +37,8 @@
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
#include "serialize.h"
namespace v8 {
namespace internal {
@ -358,15 +360,11 @@ class Displacement BASE_EMBEDDED {
// }
class CpuFeatures : public AllStatic {
public:
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
static bool IsSupported(CpuFeature f) {
if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@ -374,29 +372,32 @@ class CpuFeatures : public AllStatic {
return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(Feature f) {
static bool IsEnabled(CpuFeature f) {
return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(Feature f) {
explicit Scope(CpuFeature f) {
uint64_t mask = static_cast<uint64_t>(1) << f;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0);
old_enabled_ = CpuFeatures::enabled_;
CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
CpuFeatures::enabled_ |= mask;
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
uint64_t old_enabled_;
#else
public:
explicit Scope(Feature f) {}
explicit Scope(CpuFeature f) {}
#endif
};
private:
static uint64_t supported_;
static uint64_t enabled_;
static uint64_t found_by_runtime_probing_;
};
@ -440,12 +441,21 @@ class Assembler : public Malloced {
inline static void set_target_address_at(Address pc, Address target);
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void set_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches to runtime code.
inline static void set_external_target_at(Address instruction_payload,
Address target) {
set_target_address_at(instruction_payload, target);
}
static const int kCallTargetSize = kPointerSize;
static const int kExternalTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
// and the return address
@ -587,19 +597,18 @@ class Assembler : public Malloced {
void rcl(Register dst, uint8_t imm8);
void sar(Register dst, uint8_t imm8);
void sar(Register dst);
void sar_cl(Register dst);
void sbb(Register dst, const Operand& src);
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl(Register dst);
void shl_cl(Register dst);
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr(Register dst);
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);

41
deps/v8/src/ia32/builtins-ia32.cc

@ -522,43 +522,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(Operand(ebp, 2 * kPointerSize)); // push arguments
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow or a break request.
// We need to catch preemptions right here, otherwise an unlucky preemption
// could show up as a failed apply.
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
Label retry_preemption;
Label no_preemption;
__ bind(&retry_preemption);
__ mov(edi, Operand::StaticVariable(stack_guard_limit));
__ cmp(esp, Operand(edi));
__ j(above, &no_preemption, taken);
// Preemption!
// Because builtins always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack.
__ push(eax);
__ push(Immediate(Smi::FromInt(0)));
// Do call to runtime routine.
__ CallRuntime(Runtime::kStackGuard, 1);
__ pop(eax);
__ jmp(&retry_preemption);
__ bind(&no_preemption);
// Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
// limit" is checked.
Label okay;
// Make ecx the space we have left.
ExternalReference real_stack_limit =
ExternalReference::address_of_real_stack_limit();
__ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
__ mov(ecx, Operand(esp));
__ sub(ecx, Operand(edi));
// Make edx the space we need for the array when it is unrolled onto the
// stack.
__ mov(edx, Operand(eax));
__ shl(edx, kPointerSizeLog2 - kSmiTagSize);
// Check if the arguments will overflow the stack.
__ cmp(ecx, Operand(edx));
__ j(greater, &okay, taken);
__ j(greater, &okay, taken); // Signed comparison.
// Too bad: Out of stack space.
// Out of stack space.
__ push(Operand(ebp, 4 * kPointerSize)); // push this
__ push(eax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
@ -898,7 +881,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// be preserved.
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
Label *call_generic_code) {
Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
// Push the constructor and argc. No need to tag argc as a smi, as there will

338
deps/v8/src/ia32/codegen-ia32.cc

@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
@ -75,7 +76,6 @@ void DeferredCode::RestoreRegisters() {
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
typeof_state_(NOT_INSIDE_TYPEOF),
destination_(NULL),
previous_(NULL) {
owner_->set_state(this);
@ -83,10 +83,8 @@ CodeGenState::CodeGenState(CodeGenerator* owner)
CodeGenState::CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination)
: owner_(owner),
typeof_state_(typeof_state),
destination_(destination),
previous_(owner->state()) {
owner_->set_state(this);
@ -415,13 +413,12 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
// partially compiled) into control flow to the control destination.
// If force_control is true, control flow is forced.
void CodeGenerator::LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* dest,
bool force_control) {
ASSERT(!in_spilled_code());
int original_height = frame_->height();
{ CodeGenState new_state(this, typeof_state, dest);
{ CodeGenState new_state(this, dest);
Visit(x);
// If we hit a stack overflow, we may not have actually visited
@ -450,17 +447,16 @@ void CodeGenerator::LoadCondition(Expression* x,
}
void CodeGenerator::LoadAndSpill(Expression* expression,
TypeofState typeof_state) {
void CodeGenerator::LoadAndSpill(Expression* expression) {
ASSERT(in_spilled_code());
set_in_spilled_code(false);
Load(expression, typeof_state);
Load(expression);
frame_->SpillAll();
set_in_spilled_code(true);
}
void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
void CodeGenerator::Load(Expression* expr) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
@ -468,7 +464,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
JumpTarget true_target;
JumpTarget false_target;
ControlDestination dest(&true_target, &false_target, true);
LoadCondition(x, typeof_state, &dest, false);
LoadCondition(expr, &dest, false);
if (dest.false_was_fall_through()) {
// The false target was just bound.
@ -543,23 +539,25 @@ void CodeGenerator::LoadGlobalReceiver() {
}
// TODO(1241834): Get rid of this function in favor of just using Load, now
// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
// variables w/o reference errors elsewhere.
void CodeGenerator::LoadTypeofExpression(Expression* x) {
Variable* variable = x->AsVariableProxy()->AsVariable();
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
// the variable as if through '<global>.<variable>' to make sure we
// do not get reference errors.
// For a global variable we build the property reference
// <global>.<variable> and perform a (regular non-contextual) property
// load to make sure we do not get reference errors.
Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
Literal key(variable->name());
// TODO(1241834): Fetch the position from the variable instead of using
// no position.
Property property(&global, &key, RelocInfo::kNoPosition);
Load(&property);
Reference ref(this, &property);
ref.GetValue();
} else if (variable != NULL && variable->slot() != NULL) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
} else {
Load(x, INSIDE_TYPEOF);
// Anything else can be handled normally.
Load(expr);
}
}
@ -1190,12 +1188,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Perform the operation.
switch (op) {
case Token::SAR:
__ sar(answer.reg());
__ sar_cl(answer.reg());
// No checks of result necessary
break;
case Token::SHR: {
Label result_ok;
__ shr(answer.reg());
__ shr_cl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
@ -1216,7 +1214,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
case Token::SHL: {
Label result_ok;
__ shl(answer.reg());
__ shl_cl(answer.reg());
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
@ -1970,27 +1968,6 @@ void CodeGenerator::Comparison(Condition cc,
}
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
#endif
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@ -2027,7 +2004,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
ref.GetValue(NOT_INSIDE_TYPEOF);
ref.GetValue();
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
@ -2204,9 +2181,9 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() {
DeferredStackCheck* deferred = new DeferredStackCheck;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
deferred->Branch(below);
deferred->BindExit();
}
@ -2366,7 +2343,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
ControlDestination dest(&then, &else_, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
@ -2393,7 +2370,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
ControlDestination dest(&then, &exit, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@ -2413,7 +2390,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
ControlDestination dest(&exit, &else_, false);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->condition(), &dest, true);
if (dest.true_was_fall_through()) {
// The exit label was bound. We may have dangling jumps to the
@ -2435,7 +2412,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
// or control flow effect). LoadCondition is called without
// forcing control flow.
ControlDestination dest(&exit, &exit, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false);
LoadCondition(node->condition(), &dest, false);
if (!dest.is_used()) {
// We got a value on the frame rather than (or in addition to)
// control flow.
@ -2472,6 +2449,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
CodeForStatementPosition(node);
Load(node->expression());
Result return_value = frame_->Pop();
masm()->WriteRecordedPositions();
if (function_return_is_shadowed_) {
function_return_.Jump(&return_value);
} else {
@ -2735,8 +2713,10 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
node->continue_target()->Bind();
}
if (has_valid_frame()) {
Comment cmnt(masm_, "[ DoWhileCondition");
CodeForDoWhileConditionPosition(node);
ControlDestination dest(&body, node->break_target(), false);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->cond(), &dest, true);
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
@ -2791,7 +2771,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@ -2838,7 +2818,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here and thus an invalid fall-through).
ControlDestination dest(&body, node->break_target(), false);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->cond(), &dest, true);
}
} else {
// If we have chosen not to recompile the test at the bottom,
@ -2929,7 +2909,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// Compile the test with the body as the true target and preferred
// fall-through and with the break target as the false target.
ControlDestination dest(&body, node->break_target(), true);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->cond(), &dest, true);
if (dest.false_was_fall_through()) {
// If we got the break target as fall-through, the test may have
@ -2999,7 +2979,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// The break target is the fall-through (body is a backward
// jump from here).
ControlDestination dest(&body, node->break_target(), false);
LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->cond(), &dest, true);
}
} else {
// Otherwise, jump back to the test at the top.
@ -3574,7 +3554,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
Handle<JSFunction> boilerplate = BuildBoilerplate(node);
Handle<JSFunction> boilerplate =
Compiler::BuildBoilerplate(node, script_, this);
// Check for stack-overflow exception.
if (HasStackOverflow()) return;
InstantiateBoilerplate(boilerplate);
@ -3594,25 +3575,25 @@ void CodeGenerator::VisitConditional(Conditional* node) {
JumpTarget else_;
JumpTarget exit;
ControlDestination dest(&then, &else_, true);
LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true);
LoadCondition(node->condition(), &dest, true);
if (dest.false_was_fall_through()) {
// The else target was bound, so we compile the else part first.
Load(node->else_expression(), typeof_state());
Load(node->else_expression());
if (then.is_linked()) {
exit.Jump();
then.Bind();
Load(node->then_expression(), typeof_state());
Load(node->then_expression());
}
} else {
// The then target was bound, so we compile the then part first.
Load(node->then_expression(), typeof_state());
Load(node->then_expression());
if (else_.is_linked()) {
exit.Jump();
else_.Bind();
Load(node->else_expression(), typeof_state());
Load(node->else_expression());
}
}
@ -3934,7 +3915,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
LoadFromSlotCheckForArguments(node, typeof_state());
LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
}
@ -3947,7 +3928,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
} else {
ASSERT(var->is_global());
Reference ref(this, node);
ref.GetValue(typeof_state());
ref.GetValue();
}
}
@ -3958,12 +3939,28 @@ void CodeGenerator::VisitLiteral(Literal* node) {
}
void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
ASSERT(value->IsSmi());
int bits = reinterpret_cast<int>(*value);
__ push(Immediate(bits & 0x0000FFFF));
__ or_(Operand(esp, 0), Immediate(bits & 0xFFFF0000));
}
void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
ASSERT(value->IsSmi());
int bits = reinterpret_cast<int>(*value);
__ mov(Operand(ebp, offset), Immediate(bits & 0x0000FFFF));
__ or_(Operand(ebp, offset), Immediate(bits & 0xFFFF0000));
}
void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
ASSERT(target.is_valid());
ASSERT(value->IsSmi());
int bits = reinterpret_cast<int>(*value);
__ Set(target, Immediate(bits & 0x0000FFFF));
__ xor_(target, bits & 0xFFFF0000);
__ or_(target, bits & 0xFFFF0000);
}
@ -4354,9 +4351,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// the target, with an implicit promise that it will be written to again
// before it is read.
if (literal != NULL || (right_var != NULL && right_var != var)) {
target.TakeValue(NOT_INSIDE_TYPEOF);
target.TakeValue();
} else {
target.GetValue(NOT_INSIDE_TYPEOF);
target.GetValue();
}
Load(node->value());
GenericBinaryOperation(node->binary_op(),
@ -4404,7 +4401,7 @@ void CodeGenerator::VisitThrow(Throw* node) {
void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
property.GetValue(typeof_state());
property.GetValue();
}
@ -4589,7 +4586,7 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function to call from the property through a reference.
Reference ref(this, property);
ref.GetValue(NOT_INSIDE_TYPEOF);
ref.GetValue();
// Pass receiver to called function.
if (property->is_synthetic()) {
@ -4699,10 +4696,10 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
// This generates code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It can handle flat and sliced strings, 8 and 16 bit characters and
// cons strings where the answer is found in the left hand branch of the
// cons. The slow case will flatten the string, which will ensure that
// the answer is in the left hand side the next time around.
// It can handle flat, 8 and 16 bit characters and cons strings where the
// answer is found in the left hand branch of the cons. The slow case will
// flatten the string, which will ensure that the answer is in the left hand
// side the next time around.
void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Comment(masm_, "[ GenerateFastCharCodeAt");
ASSERT(args->length() == 2);
@ -4710,7 +4707,6 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Label slow_case;
Label end;
Label not_a_flat_string;
Label a_cons_string;
Label try_again_with_new_string;
Label ascii_string;
Label got_char_code;
@ -4792,7 +4788,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ add(Operand(ecx), Immediate(String::kLongLengthShift));
// Fetch the length field into the temporary register.
__ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
__ shr(temp.reg()); // The shift amount in ecx is implicit operand.
__ shr_cl(temp.reg());
// Check for index out of range.
__ cmp(index.reg(), Operand(temp.reg()));
__ j(greater_equal, &slow_case);
@ -4832,21 +4828,16 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&not_a_flat_string);
__ and_(temp.reg(), kStringRepresentationMask);
__ cmp(temp.reg(), kConsStringTag);
__ j(equal, &a_cons_string);
__ cmp(temp.reg(), kSlicedStringTag);
__ j(not_equal, &slow_case);
// SlicedString.
// Add the offset to the index and trigger the slow case on overflow.
__ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
__ j(overflow, &slow_case);
// Getting the underlying string is done by running the cons string code.
// ConsString.
__ bind(&a_cons_string);
// Get the first of the two strings. Both sliced and cons strings
// store their source string at the same offset.
ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
// Check that the right hand side is the empty string (ie if this is really a
// flat string in a cons string). If that is not the case we would rather go
// to the runtime system now, to flatten the string.
__ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset));
__ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string())));
__ j(not_equal, &slow_case);
// Get the first of the two strings.
__ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
__ jmp(&try_again_with_new_string);
@ -5224,9 +5215,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
// Note that because of NOT and an optimization in comparison of a typeof
// expression to a literal string, this function can fail to leave a value
// on top of the frame or in the cc register.
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
@ -5235,7 +5223,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
// Swap the true and false targets but keep the same actual label
// as the fall through.
destination()->Invert();
LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true);
LoadCondition(node->expression(), destination(), true);
// Swap the labels back.
destination()->Invert();
@ -5485,7 +5473,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
if (!is_postfix) frame_->Push(Smi::FromInt(0));
return;
}
target.TakeValue(NOT_INSIDE_TYPEOF);
target.TakeValue();
Result new_value = frame_->Pop();
new_value.ToRegister();
@ -5563,9 +5551,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// Note that due to an optimization in comparison operations (typeof
// compared to a string literal), we can evaluate a binary expression such
// as AND or OR and not leave a value on the frame or in the cc register.
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@ -5581,7 +5566,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
if (op == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
LoadCondition(node->left(), &dest, false);
if (dest.false_was_fall_through()) {
// The current false target was used as the fall-through. If
@ -5600,7 +5585,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_true.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
LoadCondition(node->right(), destination(), false);
} else {
// We have actually just jumped to or bound the current false
// target but the current control destination is not marked as
@ -5611,7 +5596,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_true
// was just bound), so the right is free to do so as well.
LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@ -5644,7 +5629,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (op == Token::OR) {
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false);
LoadCondition(node->left(), &dest, false);
if (dest.true_was_fall_through()) {
// The current true target was used as the fall-through. If
@ -5663,7 +5648,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
is_false.Bind();
// The left subexpression compiled to control flow, so the
// right one is free to do so as well.
LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
LoadCondition(node->right(), destination(), false);
} else {
// We have just jumped to or bound the current true target but
// the current control destination is not marked as used.
@ -5673,7 +5658,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
} else if (dest.is_used()) {
// The left subexpression compiled to control flow (and is_false
// was just bound), so the right is free to do so as well.
LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false);
LoadCondition(node->right(), destination(), false);
} else {
// We have a materialized value on the frame, so we exit with
@ -5805,6 +5790,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->false_target()->Branch(zero);
frame_->Spill(answer.reg());
__ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
destination()->true_target()->Branch(equal);
// Regular expressions are callable so typeof == 'function'.
__ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
@ -5814,10 +5802,13 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ cmp(answer.reg(), Factory::null_value());
destination()->true_target()->Branch(equal);
// It can be an undetectable object.
Result map = allocator()->Allocate();
ASSERT(map.is_valid());
__ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
// Regular expressions are typeof == 'function', not 'object'.
__ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
destination()->false_target()->Branch(equal);
// It can be an undetectable object.
__ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset));
__ test(map.reg(), Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
@ -6066,7 +6057,7 @@ Handle<String> Reference::GetName() {
}
void Reference::GetValue(TypeofState typeof_state) {
void Reference::GetValue() {
ASSERT(!cgen_->in_spilled_code());
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@ -6083,17 +6074,11 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
break;
}
case NAMED: {
// TODO(1241834): Make sure that it is safe to ignore the
// distinction between expressions in a typeof and not in a
// typeof. If there is a chance that reference errors can be
// thrown below, we must distinguish between the two kinds of
// loads (typeof expression loads must not throw a reference
// error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
@ -6163,8 +6148,6 @@ void Reference::GetValue(TypeofState typeof_state) {
}
case KEYED: {
// TODO(1241834): Make sure that this it is safe to ignore the
// distinction between expressions in a typeof and not in a typeof.
Comment cmnt(masm, "[ Load from keyed Property");
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
@ -6283,13 +6266,13 @@ void Reference::GetValue(TypeofState typeof_state) {
}
void Reference::TakeValue(TypeofState typeof_state) {
void Reference::TakeValue() {
// For non-constant frame-allocated slots, we invalidate the value in the
// slot. For all others, we fall back on GetValue.
ASSERT(!cgen_->in_spilled_code());
ASSERT(!is_illegal());
if (type_ != SLOT) {
GetValue(typeof_state);
GetValue();
return;
}
@ -6299,7 +6282,7 @@ void Reference::TakeValue(TypeofState typeof_state) {
slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
GetValue(typeof_state);
GetValue();
return;
}
@ -6728,11 +6711,11 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform the operation.
switch (op_) {
case Token::SAR:
__ sar(eax);
__ sar_cl(eax);
// No checks of result necessary
break;
case Token::SHR:
__ shr(eax);
__ shr_cl(eax);
// Check that the *unsigned* result fits in a smi.
// Neither of the two high-order bits can be set:
// - 0x80000000: high bit would be lost when smi tagging.
@ -6743,7 +6726,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ j(not_zero, slow, not_taken);
break;
case Token::SHL:
__ shl(eax);
__ shl_cl(eax);
// Check that the *signed* result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(sign, slow, not_taken);
@ -6793,8 +6776,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// eax: y
// edx: x
if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
switch (op_) {
@ -6889,7 +6872,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
if (use_sse3_) {
// Truncate the operands to 32-bit integers and check for
// exceptions in doing so.
CpuFeatures::Scope scope(CpuFeatures::SSE3);
CpuFeatures::Scope scope(SSE3);
__ fisttp_s(Operand(esp, 0 * kPointerSize));
__ fisttp_s(Operand(esp, 1 * kPointerSize));
__ fnstsw_ax();
@ -6918,9 +6901,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
case Token::SAR: __ sar(eax); break;
case Token::SHL: __ shl(eax); break;
case Token::SHR: __ shr(eax); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@ -7516,9 +7499,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call builtin if operands are not floating point or smi.
Label check_for_symbols;
Label unordered;
if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) {
CpuFeatures::Scope use_sse2(CpuFeatures::SSE2);
CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
__ comisd(xmm0, xmm1);
@ -7707,11 +7690,84 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
}
// If true, a Handle<T> passed by value is passed and returned by
// using the location_ field directly. If false, it is passed and
// returned as a pointer to a handle.
#ifdef USING_MAC_ABI
static const bool kPassHandlesDirectly = true;
#else
static const bool kPassHandlesDirectly = false;
#endif
void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
Label get_result;
Label prologue;
Label promote_scheduled_exception;
__ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
ASSERT_EQ(kArgc, 4);
if (kPassHandlesDirectly) {
// When handles as passed directly we don't have to allocate extra
// space for and pass an out parameter.
__ mov(Operand(esp, 0 * kPointerSize), ebx); // name.
__ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer.
} else {
// The function expects three arguments to be passed but we allocate
// four to get space for the output cell. The argument slots are filled
// as follows:
//
// 3: output cell
// 2: arguments pointer
// 1: name
// 0: pointer to the output cell
//
// Note that this is one more "argument" than the function expects
// so the out cell will have to be popped explicitly after returning
// from the function.
__ mov(Operand(esp, 1 * kPointerSize), ebx); // name.
__ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer.
__ mov(ebx, esp);
__ add(Operand(ebx), Immediate(3 * kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), ebx); // output
__ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell.
}
// Call the api function!
__ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address();
__ cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(Factory::the_hole_value()));
__ j(not_equal, &promote_scheduled_exception, not_taken);
if (!kPassHandlesDirectly) {
// The returned value is a pointer to the handle holding the result.
// Dereference this to get to the location.
__ mov(eax, Operand(eax, 0));
}
// Check if the result handle holds 0
__ test(eax, Operand(eax));
__ j(not_zero, &get_result, taken);
// It was zero; the result is undefined.
__ mov(eax, Factory::undefined_value());
__ jmp(&prologue);
// It was non-zero. Dereference to get the result value.
__ bind(&get_result);
__ mov(eax, Operand(eax, 0));
__ bind(&prologue);
__ LeaveExitFrame(ExitFrame::MODE_NORMAL);
__ ret(0);
__ bind(&promote_scheduled_exception);
__ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
0,
1);
}
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
StackFrame::Type frame_type,
ExitFrame::Mode mode,
bool do_gc,
bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
@ -7761,7 +7817,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &failure_returned, not_taken);
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(frame_type);
__ LeaveExitFrame(mode);
__ ret(0);
// Handling of failure.
@ -7860,12 +7916,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// of a proper result. The builtin entry handles this by performing
// a garbage collection and retrying the builtin (twice).
StackFrame::Type frame_type = is_debug_break ?
StackFrame::EXIT_DEBUG :
StackFrame::EXIT;
ExitFrame::Mode mode = is_debug_break
? ExitFrame::MODE_DEBUG
: ExitFrame::MODE_NORMAL;
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type);
__ EnterExitFrame(mode);
// eax: result parameter for PerformGC, if any (setup below)
// ebx: pointer to builtin function (C callee-saved)
@ -7883,7 +7939,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
false,
false);
@ -7892,7 +7948,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
true,
false);
@ -7903,7 +7959,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
&throw_normal_exception,
&throw_termination_exception,
&throw_out_of_memory_exception,
frame_type,
mode,
true,
true);

62
deps/v8/src/ia32/codegen-ia32.h

@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED {
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
void GetValue();
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
void TakeValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
@ -241,28 +241,20 @@ class CodeGenState BASE_EMBEDDED {
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination);
// state. The new state has its own control destination.
CodeGenState(CodeGenerator* owner, ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
@ -307,17 +299,12 @@ class CodeGenerator: public AstVisitor {
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
static void RecordPositions(MacroAssembler* masm, int pos);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
Handle<Script> script() { return script_; }
bool has_valid_frame() const { return frame_ != NULL; }
@ -352,7 +339,6 @@ class CodeGenerator: public AstVisitor {
void ProcessDeferred();
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
@ -412,18 +398,16 @@ class CodeGenerator: public AstVisitor {
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadAndSpill(Expression* expression);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
@ -484,9 +468,11 @@ class CodeGenerator: public AstVisitor {
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// Load an integer constant x into a register target or into the stack using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
void MoveUnsafeSmi(Register target, Handle<Object> value);
void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
void PushUnsafeSmi(Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
@ -511,8 +497,6 @@ class CodeGenerator: public AstVisitor {
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
@ -574,6 +558,7 @@ class CodeGenerator: public AstVisitor {
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* stmt);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@ -626,6 +611,27 @@ class CodeGenerator: public AstVisitor {
};
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop)
: argc_(argc), in_loop_(in_loop) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
#endif
Major MajorKey() { return CallFunction; }
int MinorKey() { return argc_; }
InLoopFlag InLoop() { return in_loop_; }
};
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@ -655,7 +661,7 @@ class GenericBinaryOpStub: public CodeStub {
flags_(flags),
args_in_registers_(false),
args_reversed_(false) {
use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}

40
deps/v8/src/ia32/disasm-ia32.cc

@ -272,6 +272,17 @@ class DisassemblerIA32 {
};
enum ShiftOpcodeExtension {
kROL = 0,
kROR = 1,
kRCL = 2,
kRCR = 3,
kSHL = 4,
KSHR = 5,
kSAR = 7
};
const char* NameOfCPURegister(int reg) const {
return converter_.NameOfCPURegister(reg);
}
@ -536,31 +547,22 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
int num_bytes = 2;
if (mod == 3) {
const char* mnem = NULL;
switch (regop) {
case kROL: mnem = "rol"; break;
case kROR: mnem = "ror"; break;
case kRCL: mnem = "rcl"; break;
case kSHL: mnem = "shl"; break;
case KSHR: mnem = "shr"; break;
case kSAR: mnem = "sar"; break;
default: UnimplementedInstruction();
}
if (op == 0xD1) {
imm8 = 1;
switch (regop) {
case edx: mnem = "rcl"; break;
case edi: mnem = "sar"; break;
case esp: mnem = "shl"; break;
default: UnimplementedInstruction();
}
} else if (op == 0xC1) {
imm8 = *(data+2);
num_bytes = 3;
switch (regop) {
case edx: mnem = "rcl"; break;
case esp: mnem = "shl"; break;
case ebp: mnem = "shr"; break;
case edi: mnem = "sar"; break;
default: UnimplementedInstruction();
}
} else if (op == 0xD3) {
switch (regop) {
case esp: mnem = "shl"; break;
case ebp: mnem = "shr"; break;
case edi: mnem = "sar"; break;
default: UnimplementedInstruction();
}
// Shift/rotate by cl.
}
ASSERT_NE(NULL, mnem);
AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));

1251
deps/v8/src/ia32/fast-codegen-ia32.cc

File diff suppressed because it is too large

13
deps/v8/src/ia32/frames-ia32.cc

@ -56,19 +56,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp;
state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
// Determine frame type.
if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
return EXIT_DEBUG;
} else {
return EXIT;
}
return EXIT;
}
void ExitFrame::Iterate(ObjectVisitor* v) const {
// Exit frames on IA-32 do not contain any pointers. The arguments
// are traversed as part of the expression stack of the calling
// frame.
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}

2
deps/v8/src/ia32/frames-ia32.h

@ -76,7 +76,7 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
static const int kDebugMarkOffset = -2 * kPointerSize;
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;

102
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -319,7 +319,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
ffree(0);
fincstp();
@ -355,10 +355,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave();
}
void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
// Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@ -369,23 +366,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Reserve room for entry stack pointer and push the debug marker.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // saved entry sp, patched before call
push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
if (mode == ExitFrame::MODE_DEBUG) {
push(Immediate(0));
} else {
push(Immediate(CodeObject()));
}
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
mov(Operand::StaticVariable(c_entry_fp_address), ebp);
mov(Operand::StaticVariable(context_address), esi);
}
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
@ -396,8 +394,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
// Reserve space for two arguments: argc and argv.
sub(Operand(esp), Immediate(2 * kPointerSize));
// Reserve space for arguments.
sub(Operand(esp), Immediate(argc * kPointerSize));
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@ -411,15 +409,39 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
EnterExitFramePrologue(mode);
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
EnterExitFrameEpilogue(mode, 2);
}
void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
int stack_space,
int argc) {
EnterExitFramePrologue(mode);
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
EnterExitFrameEpilogue(mode, argc);
}
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
@ -931,6 +953,52 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
}
void MacroAssembler::PushHandleScope(Register scratch) {
// Push the number of extensions, smi-tagged so the gc will ignore it.
ExternalReference extensions_address =
ExternalReference::handle_scope_extensions_address();
mov(scratch, Operand::StaticVariable(extensions_address));
ASSERT_EQ(0, kSmiTag);
shl(scratch, kSmiTagSize);
push(scratch);
mov(Operand::StaticVariable(extensions_address), Immediate(0));
// Push next and limit pointers which will be wordsize aligned and
// hence automatically smi tagged.
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
push(Operand::StaticVariable(next_address));
ExternalReference limit_address =
ExternalReference::handle_scope_limit_address();
push(Operand::StaticVariable(limit_address));
}
void MacroAssembler::PopHandleScope(Register saved, Register scratch) {
ExternalReference extensions_address =
ExternalReference::handle_scope_extensions_address();
Label write_back;
mov(scratch, Operand::StaticVariable(extensions_address));
cmp(Operand(scratch), Immediate(0));
j(equal, &write_back);
// Calling a runtime function messes with registers so we save and
// restore any one we're asked not to change
if (saved.is_valid()) push(saved);
CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
if (saved.is_valid()) pop(saved);
bind(&write_back);
ExternalReference limit_address =
ExternalReference::handle_scope_limit_address();
pop(Operand::StaticVariable(limit_address));
ExternalReference next_address =
ExternalReference::handle_scope_next_address();
pop(Operand::StaticVariable(next_address));
pop(scratch);
shr(scratch, kSmiTagSize);
mov(Operand::StaticVariable(extensions_address), scratch);
}
void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));

19
deps/v8/src/ia32/macro-assembler-ia32.h

@ -77,16 +77,18 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register eax and
// Enter specific kind of exit frame; either in normal or debug mode.
// Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
void EnterExitFrame(StackFrame::Type type);
void EnterExitFrame(ExitFrame::Mode mode);
void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
void LeaveExitFrame(StackFrame::Type type);
void LeaveExitFrame(ExitFrame::Mode mode);
// ---------------------------------------------------------------------------
@ -269,6 +271,12 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
void PushHandleScope(Register scratch);
// Pops a handle scope using the specified scratch register and
// ensuring that saved register, it is not no_reg, is left unchanged.
void PopHandleScope(Register saved, Register scratch);
// Jump to a runtime routine.
void JumpToRuntime(const ExternalReference& ext);
@ -346,6 +354,9 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
void EnterExitFramePrologue(ExitFrame::Mode mode);
void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
// Allocation support helpers.
void LoadAllocationTopHelper(Register result,
Register result_end,

12
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -598,10 +598,10 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
Label stack_limit_hit;
Label stack_ok;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ mov(ecx, esp);
__ sub(ecx, Operand::StaticVariable(stack_guard_limit));
__ sub(ecx, Operand::StaticVariable(stack_limit));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit, not_taken);
// Check if there is room for the variable number of registers above
@ -1081,9 +1081,9 @@ void RegExpMacroAssemblerIA32::Pop(Register target) {
void RegExpMacroAssemblerIA32::CheckPreemption() {
// Check for preemption.
Label no_preempt;
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above, &no_preempt, taken);
SafeCall(&check_preempt_label_);

4
deps/v8/src/ia32/register-allocator-ia32.cc

@ -42,7 +42,7 @@ void Result::ToRegister() {
Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
ASSERT(fresh.is_valid());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));
@ -64,7 +64,7 @@ void Result::ToRegister(Register target) {
} else {
ASSERT(is_constant());
if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle());
} else {
CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
Immediate(handle()));

9
deps/v8/src/ia32/simulator-ia32.h

@ -43,6 +43,12 @@ class SimulatorStack : public v8::internal::AllStatic {
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return c_limit;
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
return try_catch_address;
}
static inline void UnregisterCTryCatch() { }
};
// Call the generated regexp code directly. The entry function pointer should
@ -50,4 +56,7 @@ class SimulatorStack : public v8::internal::AllStatic {
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
#endif // V8_IA32_SIMULATOR_IA32_H_

42
deps/v8/src/ia32/stub-cache-ia32.cc

@ -240,7 +240,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
// ecx is also the receiver.
__ lea(ecx, Operand(scratch, String::kLongLengthShift));
__ shr(eax); // ecx is implicit shift register.
__ shr_cl(eax);
__ shl(eax, kSmiTagSize);
__ ret(0);
@ -776,20 +776,40 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
CheckPrototypes(object, receiver, holder,
scratch1, scratch2, name, miss);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
Handle<AccessorInfo> callback_handle(callback);
Register other = reg.is(scratch1) ? scratch2 : scratch1;
__ EnterInternalFrame();
__ PushHandleScope(other);
// Push the stack address where the list of arguments ends
__ mov(other, esp);
__ sub(Operand(other), Immediate(2 * kPointerSize));
__ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
__ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
__ push(reg);
__ push(FieldOperand(reg, AccessorInfo::kDataOffset));
__ mov(other, Immediate(callback_handle));
__ push(other);
__ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name
__ push(scratch2); // restore return address
// Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const Arguments& to the C++ callback.
__ mov(eax, esp);
__ add(Operand(eax), Immediate(5 * kPointerSize));
__ mov(ebx, esp);
// Do call through the api.
ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
__ CallStub(&stub);
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
__ TailCallRuntime(load_callback_property, 5, 1);
// We need to avoid using eax since that now holds the result.
Register tmp = other.is(eax) ? reg : other;
__ PopHandleScope(eax, tmp);
__ LeaveInternalFrame();
__ ret(0);
}

18
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -75,10 +75,7 @@ void VirtualFrame::SyncElementBelowStackPointer(int index) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
__ mov(Operand(ebp, fp_relative(index)), temp.reg());
cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
} else {
__ Set(Operand(ebp, fp_relative(index)),
Immediate(element.handle()));
@ -127,10 +124,7 @@ void VirtualFrame::SyncElementByPushing(int index) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(element.handle())) {
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
cgen()->LoadUnsafeSmi(temp.reg(), element.handle());
__ push(temp.reg());
cgen()->PushUnsafeSmi(element.handle());
} else {
__ push(Immediate(element.handle()));
}
@ -161,7 +155,7 @@ void VirtualFrame::SyncRange(int begin, int end) {
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
// Emit normal 'push' instructions for elements above stack pointer
// Emit normal push instructions for elements above stack pointer
// and use mov instructions if we are below stack pointer.
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) {
@ -199,7 +193,7 @@ void VirtualFrame::MakeMergable() {
// Emit a move.
if (element.is_constant()) {
if (cgen()->IsUnsafeSmi(element.handle())) {
cgen()->LoadUnsafeSmi(fresh.reg(), element.handle());
cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
} else {
__ Set(fresh.reg(), Immediate(element.handle()));
}
@ -300,7 +294,7 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
if (!source.is_synced()) {
if (cgen()->IsUnsafeSmi(source.handle())) {
esi_caches = i;
cgen()->LoadUnsafeSmi(esi, source.handle());
cgen()->MoveUnsafeSmi(esi, source.handle());
__ mov(Operand(ebp, fp_relative(i)), esi);
} else {
__ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
@ -408,7 +402,7 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
case FrameElement::CONSTANT:
if (cgen()->IsUnsafeSmi(source.handle())) {
cgen()->LoadUnsafeSmi(target_reg, source.handle());
cgen()->MoveUnsafeSmi(target_reg, source.handle());
} else {
__ Set(target_reg, Immediate(source.handle()));
}

3
deps/v8/src/ic.cc

@ -126,7 +126,8 @@ Address IC::OriginalCodeAddress() {
// Return the address in the original code. This is the place where
// the call which has been overwritten by the DebugBreakXXX resides
// and the place where the inline cache system should look.
int delta = original_code->instruction_start() - code->instruction_start();
intptr_t delta =
original_code->instruction_start() - code->instruction_start();
return addr + delta;
}
#endif

23
deps/v8/src/interpreter-irregexp.cc

@ -117,17 +117,17 @@ static void TraceInterpreter(const byte* code_base,
}
#define BYTECODE(name) \
case BC_##name: \
TraceInterpreter(code_base, \
pc, \
backtrack_sp - backtrack_stack_base, \
current, \
current_char, \
BC_##name##_LENGTH, \
#define BYTECODE(name) \
case BC_##name: \
TraceInterpreter(code_base, \
pc, \
static_cast<int>(backtrack_sp - backtrack_stack_base), \
current, \
current_char, \
BC_##name##_LENGTH, \
#name);
#else
#define BYTECODE(name) \
#define BYTECODE(name) \
case BC_##name:
#endif
@ -250,13 +250,14 @@ static bool RawMatch(const byte* code_base,
pc += BC_SET_CP_TO_REGISTER_LENGTH;
break;
BYTECODE(SET_REGISTER_TO_SP)
registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base;
registers[insn >> BYTECODE_SHIFT] =
static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_REGISTER_TO_SP_LENGTH;
break;
BYTECODE(SET_SP_TO_REGISTER)
backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT];
backtrack_stack_space = backtrack_stack.max_size() -
(backtrack_sp - backtrack_stack_base);
static_cast<int>(backtrack_sp - backtrack_stack_base);
pc += BC_SET_SP_TO_REGISTER_LENGTH;
break;
BYTECODE(POP_CP)

157
deps/v8/src/jsregexp.cc

@ -2432,16 +2432,19 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
void TextNode::MakeCaseIndependent() {
void TextNode::MakeCaseIndependent(bool is_ascii) {
int element_count = elms_->length();
for (int i = 0; i < element_count; i++) {
TextElement elm = elms_->at(i);
if (elm.type == TextElement::CHAR_CLASS) {
RegExpCharacterClass* cc = elm.data.u_char_class;
// None of the standard character classses is different in the case
// independent case and it slows us down if we don't know that.
if (cc->is_standard()) continue;
ZoneList<CharacterRange>* ranges = cc->ranges();
int range_count = ranges->length();
for (int i = 0; i < range_count; i++) {
ranges->at(i).AddCaseEquivalents(ranges);
for (int j = 0; j < range_count; j++) {
ranges->at(j).AddCaseEquivalents(ranges, is_ascii);
}
}
}
@ -3912,19 +3915,31 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base,
}
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
int bottom,
int top);
void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
bool is_ascii) {
uc16 bottom = from();
uc16 top = to();
if (is_ascii) {
if (bottom > String::kMaxAsciiCharCode) return;
if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (IsSingleton()) {
if (top == bottom) {
// If this is a singleton we just expand the one character.
int length = uncanonicalize.get(from(), '\0', chars);
int length = uncanonicalize.get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
uc32 chr = chars[i];
if (chr != from()) {
if (chr != bottom) {
ranges->Add(CharacterRange::Singleton(chars[i]));
}
}
} else if (from() <= kRangeCanonicalizeMax &&
to() <= kRangeCanonicalizeMax) {
} else if (bottom <= kRangeCanonicalizeMax &&
top <= kRangeCanonicalizeMax) {
// If this is a range we expand the characters block by block,
// expanding contiguous subranges (blocks) one at a time.
// The approach is as follows. For a given start character we
@ -3943,14 +3958,14 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
// completely contained in a block we do this for all the blocks
// covered by the range.
unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
// First, look up the block that contains the 'from' character.
int length = canonrange.get(from(), '\0', range);
// First, look up the block that contains the 'bottom' character.
int length = canonrange.get(bottom, '\0', range);
if (length == 0) {
range[0] = from();
range[0] = bottom;
} else {
ASSERT_EQ(1, length);
}
int pos = from();
int pos = bottom;
// The start of the current block. Note that except for the first
// iteration 'start' is always equal to 'pos'.
int start;
@ -3961,10 +3976,10 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
} else {
start = pos;
}
// Then we add the ranges on at a time, incrementing the current
// Then we add the ranges one at a time, incrementing the current
// position to be after the last block each time. The position
// always points to the start of a block.
while (pos < to()) {
while (pos < top) {
length = canonrange.get(start, '\0', range);
if (length == 0) {
range[0] = start;
@ -3975,20 +3990,122 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) {
// The start point of a block contains the distance to the end
// of the range.
int block_end = start + (range[0] & kPayloadMask) - 1;
int end = (block_end > to()) ? to() : block_end;
int end = (block_end > top) ? top : block_end;
length = uncanonicalize.get(start, '\0', range);
for (int i = 0; i < length; i++) {
uc32 c = range[i];
uc16 range_from = c + (pos - start);
uc16 range_to = c + (end - start);
if (!(from() <= range_from && range_to <= to())) {
if (!(bottom <= range_from && range_to <= top)) {
ranges->Add(CharacterRange(range_from, range_to));
}
}
start = pos = block_end + 1;
}
} else {
// TODO(plesner) when we've fixed the 2^11 bug in unibrow.
// Unibrow ranges don't work for high characters due to the "2^11 bug".
// Therefore we do something dumber for these ranges.
AddUncanonicals(ranges, bottom, top);
}
}
static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
int bottom,
int top) {
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
// Zones with no case mappings. There is a DEBUG-mode loop to assert that
// this table is correct.
// 0x0600 - 0x0fff
// 0x1100 - 0x1cff
// 0x2000 - 0x20ff
// 0x2200 - 0x23ff
// 0x2500 - 0x2bff
// 0x2e00 - 0xa5ff
// 0xa800 - 0xfaff
// 0xfc00 - 0xfeff
const int boundary_count = 18;
// The ASCII boundary and the kRangeCanonicalizeMax boundary are also in this
// array. This is to split up big ranges and not because they actually denote
// a case-mapping-free-zone.
ASSERT(CharacterRange::kRangeCanonicalizeMax < 0x600);
const int kFirstRealCaselessZoneIndex = 2;
int boundaries[] = {0x80, CharacterRange::kRangeCanonicalizeMax,
0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500,
0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00};
// Special ASCII rule from spec can save us some work here.
if (bottom == 0x80 && top == 0xffff) return;
// We have optimized support for this range.
if (top <= CharacterRange::kRangeCanonicalizeMax) {
CharacterRange range(bottom, top);
range.AddCaseEquivalents(ranges, false);
return;
}
// Split up very large ranges. This helps remove ranges where there are no
// case mappings.
for (int i = 0; i < boundary_count; i++) {
if (bottom < boundaries[i] && top >= boundaries[i]) {
AddUncanonicals(ranges, bottom, boundaries[i] - 1);
AddUncanonicals(ranges, boundaries[i], top);
return;
}
}
// If we are completely in a zone with no case mappings then we are done.
// We start at 2 so as not to except the ASCII range from mappings.
for (int i = kFirstRealCaselessZoneIndex; i < boundary_count; i += 2) {
if (bottom >= boundaries[i] && top < boundaries[i + 1]) {
#ifdef DEBUG
for (int j = bottom; j <= top; j++) {
unsigned current_char = j;
int length = uncanonicalize.get(current_char, '\0', chars);
for (int k = 0; k < length; k++) {
ASSERT(chars[k] == current_char);
}
}
#endif
return;
}
}
// Step through the range finding equivalent characters.
ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100);
for (int i = bottom; i <= top; i++) {
int length = uncanonicalize.get(i, '\0', chars);
for (int j = 0; j < length; j++) {
uc32 chr = chars[j];
if (chr != i && (chr < bottom || chr > top)) {
characters->Add(chr);
}
}
}
// Step through the equivalent characters finding simple ranges and
// adding ranges to the character class.
if (characters->length() > 0) {
int new_from = characters->at(0);
int new_to = new_from;
for (int i = 1; i < characters->length(); i++) {
int chr = characters->at(i);
if (chr == new_to + 1) {
new_to++;
} else {
if (new_to == new_from) {
ranges->Add(CharacterRange::Singleton(new_from));
} else {
ranges->Add(CharacterRange(new_from, new_to));
}
new_from = new_to = chr;
}
}
if (new_to == new_from) {
ranges->Add(CharacterRange::Singleton(new_from));
} else {
ranges->Add(CharacterRange(new_from, new_to));
}
}
}
@ -4234,7 +4351,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
if (ignore_case_) {
that->MakeCaseIndependent();
that->MakeCaseIndependent(is_ascii_);
}
EnsureAnalyzed(that->on_success());
if (!has_failed()) {
@ -4452,7 +4569,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
}
}
data->node = node;
Analysis analysis(ignore_case);
Analysis analysis(ignore_case, is_ascii);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();

11
deps/v8/src/jsregexp.h

@ -200,7 +200,7 @@ class CharacterRange {
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
void AddCaseEquivalents(ZoneList<CharacterRange>* ranges);
void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii);
static void Split(ZoneList<CharacterRange>* base,
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
@ -703,7 +703,7 @@ class TextNode: public SeqRegExpNode {
int characters_filled_in,
bool not_at_start);
ZoneList<TextElement>* elements() { return elms_; }
void MakeCaseIndependent();
void MakeCaseIndependent(bool is_ascii);
virtual int GreedyLoopTextLength();
virtual TextNode* Clone() {
TextNode* result = new TextNode(*this);
@ -1212,8 +1212,10 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
explicit Analysis(bool ignore_case)
: ignore_case_(ignore_case), error_message_(NULL) { }
Analysis(bool ignore_case, bool is_ascii)
: ignore_case_(ignore_case),
is_ascii_(is_ascii),
error_message_(NULL) { }
void EnsureAnalyzed(RegExpNode* node);
#define DECLARE_VISIT(Type) \
@ -1232,6 +1234,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
}
private:
bool ignore_case_;
bool is_ascii_;
const char* error_message_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);

4
deps/v8/src/list.h

@ -59,7 +59,9 @@ class List {
Initialize(0);
}
INLINE(void* operator new(size_t size)) { return P::New(size); }
INLINE(void* operator new(size_t size)) {
return P::New(static_cast<int>(size));
}
INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); }
// Returns a reference to the element at index i. This reference is

10
deps/v8/src/log-utils.cc

@ -155,7 +155,7 @@ void Log::OpenMemoryBuffer() {
ASSERT(!IsEnabled());
output_buffer_ = new LogDynamicBuffer(
kDynamicBufferBlockSize, kMaxDynamicBufferSize,
kDynamicBufferSeal, strlen(kDynamicBufferSeal));
kDynamicBufferSeal, StrLength(kDynamicBufferSeal));
Write = WriteToMemory;
Init();
}
@ -195,7 +195,7 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
// Find previous log line boundary.
char* end_pos = dest_buf + actual_size - 1;
while (end_pos >= dest_buf && *end_pos != '\n') --end_pos;
actual_size = end_pos - dest_buf + 1;
actual_size = static_cast<int>(end_pos - dest_buf + 1);
ASSERT(actual_size <= max_size);
return actual_size;
}
@ -352,7 +352,7 @@ void LogMessageBuilder::WriteToLogFile() {
void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
const int len = strlen(str);
const int len = StrLength(str);
const int written = Log::Write(str, len);
if (written != len && write_failure_handler != NULL) {
write_failure_handler();
@ -461,7 +461,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed(
--data_ptr;
}
const intptr_t truncated_len = prev_end - prev_ptr;
const int copy_from_pos = data_ptr - data.start();
const int copy_from_pos = static_cast<int>(data_ptr - data.start());
// Check if the length of compressed tail is enough.
if (truncated_len <= kMaxBackwardReferenceSize
&& truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
@ -493,7 +493,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed(
prev_record->start() + unchanged_len, best.backref_size + 1);
PrintBackwardReference(backref, best.distance, best.copy_from_pos);
ASSERT(strlen(backref.start()) - best.backref_size == 0);
prev_record->Truncate(unchanged_len + best.backref_size);
prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
}
return true;
}

7
deps/v8/src/log-utils.h

@ -129,9 +129,10 @@ class Log : public AllStatic {
// Implementation of writing to a log file.
static int WriteToFile(const char* msg, int length) {
ASSERT(output_handle_ != NULL);
int rv = fwrite(msg, 1, length, output_handle_);
ASSERT(length == rv);
return rv;
size_t rv = fwrite(msg, 1, length, output_handle_);
ASSERT(static_cast<size_t>(length) == rv);
USE(rv);
return length;
}
// Implementation of writing to a memory buffer.

47
deps/v8/src/log.cc

@ -915,8 +915,9 @@ void Logger::HeapSampleJSRetainersEvent(
// Event starts with comma, so we don't have it in the format string.
static const char* event_text = "heap-js-ret-item,%s";
// We take placeholder strings into account, but it's OK to be conservative.
static const int event_text_len = strlen(event_text);
const int cons_len = strlen(constructor), event_len = strlen(event);
static const int event_text_len = StrLength(event_text);
const int cons_len = StrLength(constructor);
const int event_len = StrLength(event);
int pos = 0;
// Retainer lists can be long. We may need to split them into multiple events.
do {
@ -1120,6 +1121,48 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
}
void Logger::LogCodeObject(Object* object) {
if (FLAG_log_code) {
Code* code_object = Code::cast(object);
LogEventsAndTags tag = Logger::STUB_TAG;
const char* description = "Unknown code from the snapshot";
switch (code_object->kind()) {
case Code::FUNCTION:
return; // We log this later using LogCompiledFunctions.
case Code::STUB:
description = CodeStub::MajorName(code_object->major_key());
tag = Logger::STUB_TAG;
break;
case Code::BUILTIN:
description = "A builtin from the snapshot";
tag = Logger::BUILTIN_TAG;
break;
case Code::KEYED_LOAD_IC:
description = "A keyed load IC from the snapshot";
tag = Logger::KEYED_LOAD_IC_TAG;
break;
case Code::LOAD_IC:
description = "A load IC from the snapshot";
tag = Logger::LOAD_IC_TAG;
break;
case Code::STORE_IC:
description = "A store IC from the snapshot";
tag = Logger::STORE_IC_TAG;
break;
case Code::KEYED_STORE_IC:
description = "A keyed store IC from the snapshot";
tag = Logger::KEYED_STORE_IC_TAG;
break;
case Code::CALL_IC:
description = "A call IC from the snapshot";
tag = Logger::CALL_IC_TAG;
break;
}
LOG(CodeCreateEvent(tag, code_object, description));
}
}
void Logger::LogCompiledFunctions() {
HandleScope scope;
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);

2
deps/v8/src/log.h

@ -265,6 +265,8 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
// Used for logging stubs found in the snapshot.
static void LogCodeObject(Object* code_object);
private:

5
deps/v8/src/macros.py

@ -77,12 +77,13 @@ const kMonthShift = 5;
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
macro IS_FUNCTION(arg) = (typeof(arg) === 'function');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_OBJECT(arg) = (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_ARRAY(arg) = (%_IsArray(arg));
# IS_FUNCTION uses %_ClassOf rather than typeof so as to exclude regexps.
macro IS_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp');
macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date');
macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number');

19
deps/v8/src/mark-compact.cc

@ -572,9 +572,8 @@ class SymbolMarkingVisitor : public ObjectVisitor {
void MarkCompactCollector::MarkSymbolTable() {
// Objects reachable from symbols are marked as live so as to ensure
// that if the symbol itself remains alive after GC for any reason,
// and if it is a sliced string or a cons string backed by an
// external string (even indirectly), then the external string does
// not receive a weak reference callback.
// and if it is a cons string backed by an external string (even indirectly),
// then the external string does not receive a weak reference callback.
SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
// Mark the symbol table itself.
SetMark(symbol_table);
@ -593,7 +592,7 @@ void MarkCompactCollector::MarkSymbolTable() {
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
Heap::IterateStrongRoots(visitor);
Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
// Handle the symbol table specially.
MarkSymbolTable();
@ -1074,7 +1073,7 @@ inline void EncodeForwardingAddressesInRange(Address start,
}
#endif
if (!is_prev_alive) { // Transition from non-live to live.
EncodeFreeRegion(free_start, current - free_start);
EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
is_prev_alive = true;
}
} else { // Non-live object.
@ -1088,7 +1087,9 @@ inline void EncodeForwardingAddressesInRange(Address start,
}
// If we ended on a free region, mark it.
if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
if (!is_prev_alive) {
EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
}
}
@ -1169,7 +1170,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, current - free_start);
dealloc(free_start, static_cast<int>(current - free_start));
is_previous_alive = true;
}
} else {
@ -1189,7 +1190,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If the last region was not live we need to deallocate from
// free_start to the allocation top in the page.
if (!is_previous_alive) {
int free_size = p->AllocationTop() - free_start;
int free_size = static_cast<int>(p->AllocationTop() - free_start);
if (free_size > 0) {
dealloc(free_start, free_size);
}
@ -1455,7 +1456,7 @@ void MarkCompactCollector::UpdatePointers() {
state_ = UPDATE_POINTERS;
#endif
UpdatingVisitor updating_visitor;
Heap::IterateRoots(&updating_visitor);
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
int live_maps = IterateLiveObjects(Heap::map_space(),

65
deps/v8/src/mirror-delay.js

@ -848,6 +848,33 @@ FunctionMirror.prototype.script = function() {
};
/**
* Returns the script source position for the function. Only makes sense
* for functions which has a script defined.
* @return {Number or undefined} in-script position for the function
*/
FunctionMirror.prototype.sourcePosition_ = function() {
// Return script if function is resolved. Otherwise just fall through
// to return undefined.
if (this.resolved()) {
return %FunctionGetScriptSourcePosition(this.value_);
}
};
/**
* Returns the script source location object for the function. Only makes sense
* for functions which has a script defined.
* @return {Location or undefined} in-script location for the function begin
*/
FunctionMirror.prototype.sourceLocation = function() {
if (this.resolved() && this.script()) {
return this.script().locationFromPosition(this.sourcePosition_(),
true);
}
};
/**
* Returns objects constructed by this function.
* @param {number} opt_max_instances Optional parameter specifying the maximum
@ -2119,6 +2146,9 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
if (mirror.script()) {
content.script = this.serializeReference(mirror.script());
content.scriptId = mirror.script().id();
serializeLocationFields(mirror.sourceLocation(), content);
}
}
@ -2150,6 +2180,31 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
}
/**
* Serialize location information to the following JSON format:
*
* "position":"<position>",
* "line":"<line>",
* "column":"<column>",
*
* @param {SourceLocation} location The location to serialize, may be undefined.
*/
function serializeLocationFields (location, content) {
if (!location) {
return;
}
content.position = location.position;
var line = location.line;
if (!IS_UNDEFINED(line)) {
content.line = line;
}
var column = location.column;
if (!IS_UNDEFINED(column)) {
content.column = column;
}
}
/**
* Serialize property information to the following JSON format for building the
* array of properties.
@ -2218,15 +2273,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
x[i] = local;
}
content.locals = x;
content.position = mirror.sourcePosition();
var line = mirror.sourceLine();
if (!IS_UNDEFINED(line)) {
content.line = line;
}
var column = mirror.sourceColumn();
if (!IS_UNDEFINED(column)) {
content.column = column;
}
serializeLocationFields(mirror.sourceLocation(), content);
var source_line_text = mirror.sourceLineText();
if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text;

106
deps/v8/src/mksnapshot.cc

@ -87,57 +87,53 @@ class CounterCollection {
// We statically allocate a set of local counters to be used if we
// don't want to store the stats in a memory-mapped file
static CounterCollection local_counters;
static CounterCollection* counters = &local_counters;
typedef std::map<std::string, int*> CounterMap;
typedef std::map<std::string, int*>::iterator CounterMapIterator;
static CounterMap counter_table_;
// Callback receiver when v8 has a counter to track.
static int* counter_callback(const char* name) {
std::string counter = name;
// See if this counter name is already known.
if (counter_table_.find(counter) != counter_table_.end())
return counter_table_[counter];
Counter* ctr = counters->GetNextCounter();
if (ctr == NULL) return NULL;
int* ptr = ctr->Bind(name);
counter_table_[counter] = ptr;
return ptr;
}
class CppByteSink : public i::SnapshotByteSink {
public:
explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) {
fp_ = i::OS::FOpen(snapshot_file, "wb");
if (fp_ == NULL) {
i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
exit(1);
}
fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
fprintf(fp_, "#include \"v8.h\"\n");
fprintf(fp_, "#include \"platform.h\"\n\n");
fprintf(fp_, "#include \"snapshot.h\"\n\n");
fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n");
fprintf(fp_, "const byte Snapshot::data_[] = {");
}
// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot
// to the file given by filename. Only the first size chars are written.
static int WriteInternalSnapshotToFile(const char* filename,
const v8::internal::byte* bytes,
int size) {
FILE* f = i::OS::FOpen(filename, "wb");
if (f == NULL) {
i::OS::PrintError("Cannot open file %s for reading.\n", filename);
return 0;
virtual ~CppByteSink() {
if (fp_ != NULL) {
fprintf(fp_, "};\n\n");
fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_);
fprintf(fp_, "} } // namespace v8::internal\n");
fclose(fp_);
}
}
fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n");
fprintf(f, "#include \"v8.h\"\n");
fprintf(f, "#include \"platform.h\"\n\n");
fprintf(f, "#include \"snapshot.h\"\n\n");
fprintf(f, "namespace v8 {\nnamespace internal {\n\n");
fprintf(f, "const byte Snapshot::data_[] = {");
int written = 0;
written += fprintf(f, "0x%x", bytes[0]);
for (int i = 1; i < size; ++i) {
written += fprintf(f, ",0x%x", bytes[i]);
// The following is needed to keep the line length low on Visual C++:
if (i % 512 == 0) fprintf(f, "\n");
virtual void Put(int byte, const char* description) {
if (bytes_written_ != 0) {
fprintf(fp_, ",");
}
fprintf(fp_, "%d", byte);
bytes_written_++;
if ((bytes_written_ & 0x3f) == 0) {
fprintf(fp_, "\n");
}
}
fprintf(f, "};\n\n");
fprintf(f, "int Snapshot::size_ = %d;\n\n", size);
fprintf(f, "} } // namespace v8::internal\n");
fclose(f);
return written;
}
private:
FILE* fp_;
int bytes_written_;
};
int main(int argc, char** argv) {
@ -153,34 +149,20 @@ int main(int argc, char** argv) {
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
v8::V8::SetCounterFunction(counter_callback);
v8::HandleScope scope;
const int kExtensionCount = 1;
const char* extension_list[kExtensionCount] = { "v8/gc" };
v8::ExtensionConfiguration extensions(kExtensionCount, extension_list);
i::Serializer::Enable();
v8::Context::New(&extensions);
Persistent<Context> context = v8::Context::New();
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
i::Bootstrapper::NativesSourceLookup(i);
}
}
// Get rid of unreferenced scripts with a global GC.
i::Heap::CollectAllGarbage(false);
i::Serializer ser;
context.Dispose();
CppByteSink sink(argv[1]);
i::Serializer ser(&sink);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
i::Heap::CollectAllGarbage(true);
ser.Serialize();
v8::internal::byte* bytes;
int len;
ser.Finalize(&bytes, &len);
WriteInternalSnapshotToFile(argv[1], bytes, len);
i::DeleteArray(bytes);
return 0;
}

18
deps/v8/src/objects-debug.cc

@ -553,12 +553,6 @@ static const char* TypeToString(InstanceType type) {
case SHORT_ASCII_SYMBOL_TYPE:
case MEDIUM_ASCII_SYMBOL_TYPE:
case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
case SHORT_SLICED_SYMBOL_TYPE:
case MEDIUM_SLICED_SYMBOL_TYPE:
case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL";
case SHORT_SLICED_ASCII_SYMBOL_TYPE:
case MEDIUM_SLICED_ASCII_SYMBOL_TYPE:
case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL";
case SHORT_CONS_SYMBOL_TYPE:
case MEDIUM_CONS_SYMBOL_TYPE:
case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
@ -583,12 +577,6 @@ static const char* TypeToString(InstanceType type) {
case SHORT_CONS_ASCII_STRING_TYPE:
case MEDIUM_CONS_ASCII_STRING_TYPE:
case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING";
case SHORT_SLICED_STRING_TYPE:
case MEDIUM_SLICED_STRING_TYPE:
case LONG_SLICED_STRING_TYPE:
case SHORT_SLICED_ASCII_STRING_TYPE:
case MEDIUM_SLICED_ASCII_STRING_TYPE:
case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING";
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
case MEDIUM_EXTERNAL_ASCII_STRING_TYPE:
case LONG_EXTERNAL_ASCII_STRING_TYPE:
@ -796,8 +784,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint() {
PrintF("\n - debug info = ");
debug_info()->ShortPrint();
PrintF("\n - length = %d", length());
PrintF("\n - has_only_this_property_assignments = %d",
has_only_this_property_assignments());
PrintF("\n - has_only_simple_this_property_assignments = %d",
has_only_simple_this_property_assignments());
PrintF("\n - this_property_assignments = ");
@ -979,6 +965,7 @@ void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
VerifyPointer(data());
VerifyPointer(flag());
VerifyPointer(load_stub_cache());
}
void AccessorInfo::AccessorInfoPrint() {
@ -1153,7 +1140,8 @@ void Script::ScriptVerify() {
VerifyPointer(data());
VerifyPointer(wrapper());
type()->SmiVerify();
VerifyPointer(line_ends());
VerifyPointer(line_ends_fixed_array());
VerifyPointer(line_ends_js_array());
VerifyPointer(id());
}

113
deps/v8/src/objects-inl.h

@ -163,11 +163,6 @@ bool Object::IsConsString() {
}
#ifdef DEBUG
// These are for cast checks. If you need one of these in release
// mode you should consider using a StringShape before moving it out
// of the ifdef
bool Object::IsSeqString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
@ -208,15 +203,6 @@ bool Object::IsExternalTwoByteString() {
}
bool Object::IsSlicedString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
}
#endif // DEBUG
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
@ -246,9 +232,6 @@ bool StringShape::IsSymbol() {
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
if ((type & kStringRepresentationMask) == kSlicedStringTag) {
return SlicedString::cast(this)->buffer()->IsAsciiRepresentation();
}
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsAsciiRepresentation();
@ -259,9 +242,7 @@ bool String::IsAsciiRepresentation() {
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
if ((type & kStringRepresentationMask) == kSlicedStringTag) {
return SlicedString::cast(this)->buffer()->IsTwoByteRepresentation();
} else if ((type & kStringRepresentationMask) == kConsStringTag &&
if ((type & kStringRepresentationMask) == kConsStringTag &&
ConsString::cast(this)->second()->length() == 0) {
return ConsString::cast(this)->first()->IsTwoByteRepresentation();
}
@ -274,11 +255,6 @@ bool StringShape::IsCons() {
}
bool StringShape::IsSliced() {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
@ -879,7 +855,7 @@ Failure* Failure::RetryAfterGC(int requested_bytes) {
requested = static_cast<intptr_t>(
(~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
}
int value = (requested << kSpaceTagSize) | NEW_SPACE;
int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE;
return Construct(RETRY_AFTER_GC, value);
}
@ -1033,9 +1009,9 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) {
int MapWord::DecodeOffset() {
// The offset field is represented in the kForwardingOffsetBits
// most-significant bits.
int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
return offset;
uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
return static_cast<int>(offset);
}
@ -1610,7 +1586,6 @@ CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqAsciiString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
@ -1721,9 +1696,6 @@ uint16_t String::Get(int index) {
case kConsStringTag | kAsciiStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
case kSlicedStringTag | kAsciiStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
case kExternalStringTag | kAsciiStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
@ -1754,11 +1726,6 @@ bool String::IsFlat() {
// Only flattened strings have second part empty.
return second->length() == 0;
}
case kSlicedStringTag: {
StringRepresentationTag tag =
StringShape(SlicedString::cast(this)->buffer()).representation_tag();
return tag == kSeqStringTag || tag == kExternalStringTag;
}
default:
return true;
}
@ -1872,27 +1839,6 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) {
}
String* SlicedString::buffer() {
return String::cast(READ_FIELD(this, kBufferOffset));
}
void SlicedString::set_buffer(String* buffer) {
WRITE_FIELD(this, kBufferOffset, buffer);
WRITE_BARRIER(this, kBufferOffset);
}
int SlicedString::start() {
return READ_INT_FIELD(this, kStartOffset);
}
void SlicedString::set_start(int start) {
WRITE_INT_FIELD(this, kStartOffset, start);
}
ExternalAsciiString::Resource* ExternalAsciiString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
@ -2436,6 +2382,7 @@ ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@ -2494,7 +2441,8 @@ ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
ACCESSORS(Script, type, Smi, kTypeOffset)
ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, line_ends_fixed_array, Object, kLineEndsFixedArrayOffset)
ACCESSORS(Script, line_ends_js_array, Object, kLineEndsJSArrayOffset)
ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset)
ACCESSORS(Script, eval_from_instructions_offset, Smi,
kEvalFrominstructionsOffsetOffset)
@ -2532,13 +2480,13 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
BOOL_GETTER(SharedFunctionInfo, compiler_hints,
has_only_this_property_assignments,
kHasOnlyThisPropertyAssignments)
BOOL_GETTER(SharedFunctionInfo, compiler_hints,
has_only_simple_this_property_assignments,
kHasOnlySimpleThisPropertyAssignments)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
try_fast_codegen,
kTryFastCodegen)
INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
@ -3046,6 +2994,43 @@ PropertyAttributes JSObject::GetPropertyAttribute(String* key) {
return GetPropertyAttributeWithReceiver(this, key);
}
// TODO(504): this may be useful in other places too where JSGlobalProxy
// is used.
Object* JSObject::BypassGlobalProxy() {
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return Heap::undefined_value();
ASSERT(proto->IsJSGlobalObject());
return proto;
}
return this;
}
bool JSObject::HasHiddenPropertiesObject() {
ASSERT(!IsJSGlobalProxy());
return GetPropertyAttributePostInterceptor(this,
Heap::hidden_symbol(),
false) != ABSENT;
}
Object* JSObject::GetHiddenPropertiesObject() {
ASSERT(!IsJSGlobalProxy());
PropertyAttributes attributes;
return GetLocalPropertyPostInterceptor(this,
Heap::hidden_symbol(),
&attributes);
}
Object* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
ASSERT(!IsJSGlobalProxy());
return SetPropertyPostInterceptor(Heap::hidden_symbol(),
hidden_obj,
DONT_ENUM);
}
bool JSObject::HasElement(uint32_t index) {
return HasElementWithReceiver(this, index);

201
deps/v8/src/objects.cc

@ -683,23 +683,6 @@ Object* String::TryFlatten() {
#endif
switch (StringShape(this).representation_tag()) {
case kSlicedStringTag: {
SlicedString* ss = SlicedString::cast(this);
// The SlicedString constructor should ensure that there are no
// SlicedStrings that are constructed directly on top of other
// SlicedStrings.
String* buf = ss->buffer();
ASSERT(!buf->IsSlicedString());
Object* ok = buf->TryFlatten();
if (ok->IsFailure()) return ok;
// Under certain circumstances (TryFlattenIfNotFlat fails in
// String::Slice) we can have a cons string under a slice.
// In this case we need to get the flat string out of the cons!
if (StringShape(String::cast(ok)).IsCons()) {
ss->set_buffer(ConsString::cast(ok)->first());
}
return this;
}
case kConsStringTag: {
ConsString* cs = ConsString::cast(this);
if (cs->second()->length() == 0) {
@ -1135,8 +1118,14 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case kConsStringTag:
reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v);
break;
case kSlicedStringTag:
reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v);
case kExternalStringTag:
if ((type & kStringEncodingMask) == kAsciiStringTag) {
reinterpret_cast<ExternalAsciiString*>(this)->
ExternalAsciiStringIterateBody(v);
} else {
reinterpret_cast<ExternalTwoByteString*>(this)->
ExternalTwoByteStringIterateBody(v);
}
break;
}
return;
@ -3562,12 +3551,7 @@ Vector<const char> String::ToAsciiVector() {
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
if (string_tag == kSlicedStringTag) {
SlicedString* sliced = SlicedString::cast(string);
offset += sliced->start();
string = sliced->buffer();
string_tag = StringShape(string).representation_tag();
} else if (string_tag == kConsStringTag) {
if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@ -3593,12 +3577,7 @@ Vector<const uc16> String::ToUC16Vector() {
int length = this->length();
StringRepresentationTag string_tag = StringShape(this).representation_tag();
String* string = this;
if (string_tag == kSlicedStringTag) {
SlicedString* sliced = SlicedString::cast(string);
offset += sliced->start();
string = String::cast(sliced->buffer());
string_tag = StringShape(string).representation_tag();
} else if (string_tag == kConsStringTag) {
if (string_tag == kConsStringTag) {
ConsString* cons = ConsString::cast(string);
ASSERT(cons->second()->length() == 0);
string = cons->first();
@ -3689,17 +3668,6 @@ const uc16* String::GetTwoByteData(unsigned start) {
case kExternalStringTag:
return ExternalTwoByteString::cast(this)->
ExternalTwoByteStringGetData(start);
case kSlicedStringTag: {
SlicedString* sliced_string = SlicedString::cast(this);
String* buffer = sliced_string->buffer();
if (StringShape(buffer).IsCons()) {
ConsString* cs = ConsString::cast(buffer);
// Flattened string.
ASSERT(cs->second()->length() == 0);
buffer = cs->first();
}
return buffer->GetTwoByteData(start + sliced_string->start());
}
case kConsStringTag:
UNREACHABLE();
return NULL;
@ -3854,22 +3822,6 @@ const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
}
const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
String* backing = buffer();
unsigned offset = start() + *offset_ptr;
unsigned length = backing->length();
if (max_chars > length - offset) {
max_chars = length - offset;
}
const unibrow::byte* answer =
String::ReadBlock(backing, rbb, &offset, max_chars);
*offset_ptr = offset - start();
return answer;
}
uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
ASSERT(index >= 0 && index < length());
return resource()->data()[index];
@ -3993,10 +3945,6 @@ const unibrow::byte* String::ReadBlock(String* input,
return ConsString::cast(input)->ConsStringReadBlock(rbb,
offset_ptr,
max_chars);
case kSlicedStringTag:
return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
offset_ptr,
max_chars);
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
@ -4139,20 +4087,15 @@ void String::ReadBlockIntoBuffer(String* input,
offset_ptr,
max_chars);
return;
case kSlicedStringTag:
SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
return;
case kExternalStringTag:
if (input->IsAsciiRepresentation()) {
ExternalAsciiString::cast(input)->
ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
} else {
ExternalTwoByteString::cast(input)->
ExternalTwoByteStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
ExternalAsciiString::cast(input)->
ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
} else {
ExternalTwoByteString::cast(input)->
ExternalTwoByteStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
}
return;
default:
@ -4258,20 +4201,6 @@ void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
}
void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
String* backing = buffer();
unsigned offset = start() + *offset_ptr;
unsigned length = backing->length();
if (max_chars > length - offset) {
max_chars = length - offset;
}
String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars);
*offset_ptr = offset - start();
}
void ConsString::ConsStringIterateBody(ObjectVisitor* v) {
IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize);
}
@ -4350,15 +4279,6 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
case kAsciiStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString* sliced_string = SlicedString::cast(source);
int start = sliced_string->start();
from += start;
to += start;
source = String::cast(sliced_string->buffer());
break;
}
case kAsciiStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
@ -4394,18 +4314,23 @@ void String::WriteToFlat(String* src,
}
void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) {
IteratePointer(v, kBufferOffset);
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalAsciiStringResource Resource;
v->VisitExternalAsciiString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
uint16_t SlicedString::SlicedStringGet(int index) {
ASSERT(index >= 0 && index < this->length());
// Delegate to the buffer string.
String* underlying = buffer();
return underlying->Get(start() + index);
void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) {
typedef v8::String::ExternalStringResource Resource;
v->VisitExternalTwoByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
#undef FIELD_ADDR
template <typename IteratorA, typename IteratorB>
static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
@ -4705,43 +4630,10 @@ uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
}
Object* String::Slice(int start, int end) {
Object* String::SubString(int start, int end) {
if (start == 0 && end == length()) return this;
if (StringShape(this).representation_tag() == kSlicedStringTag) {
// Translate slices of a SlicedString into slices of the
// underlying string buffer.
SlicedString* str = SlicedString::cast(this);
String* buf = str->buffer();
return Heap::AllocateSlicedString(buf,
str->start() + start,
str->start() + end);
}
Object* result = Heap::AllocateSlicedString(this, start, end);
if (result->IsFailure()) {
return result;
}
// Due to the way we retry after GC on allocation failure we are not allowed
// to fail on allocation after this point. This is the one-allocation rule.
// Try to flatten a cons string that is under the sliced string.
// This is to avoid memory leaks and possible stack overflows caused by
// building 'towers' of sliced strings on cons strings.
// This may fail due to an allocation failure (when a GC is needed), but it
// will succeed often enough to avoid the problem. We only have to do this
// if Heap::AllocateSlicedString actually returned a SlicedString. It will
// return flat strings for small slices for efficiency reasons.
String* answer = String::cast(result);
if (StringShape(answer).IsSliced() &&
StringShape(this).representation_tag() == kConsStringTag) {
TryFlatten();
// If the flatten succeeded we might as well make the sliced string point
// to the flat string rather than the cons string.
String* second = ConsString::cast(this)->second();
if (second->length() == 0) {
SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first());
}
}
return answer;
Object* result = Heap::AllocateSubString(this, start, end);
return result;
}
@ -4921,12 +4813,8 @@ int SharedFunctionInfo::CalculateInObjectProperties() {
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_this_property_assignments,
bool only_simple_this_property_assignments,
FixedArray* assignments) {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments,
only_this_property_assignments));
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
only_simple_this_property_assignments));
@ -4936,9 +4824,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments,
false));
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlySimpleThisPropertyAssignments,
false));
@ -4994,7 +4879,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
return;
}
// Get the slice of the source for this function.
// Get the source for the script which this function came from.
// Don't use String::cast because we don't want more assertion errors while
// we are already creating a stack dump.
String* script_source =
@ -5083,7 +4968,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
}
void Code::Relocate(int delta) {
void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
@ -5149,8 +5034,9 @@ int Code::SourcePosition(Address pc) {
// Only look at positions after the current pc.
if (it.rinfo()->pc() < pc) {
// Get position and distance.
int dist = pc - it.rinfo()->pc();
int pos = it.rinfo()->data();
int dist = static_cast<int>(pc - it.rinfo()->pc());
int pos = static_cast<int>(it.rinfo()->data());
// If this position is closer than the current candidate or if it has the
// same distance as the current candidate and the position is higher then
// this position is the new candidate.
@ -5177,7 +5063,7 @@ int Code::SourceStatementPosition(Address pc) {
RelocIterator it(this, RelocInfo::kPositionMask);
while (!it.done()) {
if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) {
int p = it.rinfo()->data();
int p = static_cast<int>(it.rinfo()->data());
if (statement_position < p && p <= position) {
statement_position = p;
}
@ -6284,6 +6170,17 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver,
return pt->GetPropertyWithReceiver(receiver, name, attributes);
}
Object* JSObject::GetLocalPropertyPostInterceptor(
JSObject* receiver,
String* name,
PropertyAttributes* attributes) {
// Check local property in holder, ignore interceptor.
LookupResult result;
LocalLookupRealNamedProperty(name, &result);
if (!result.IsValid()) return Heap::undefined_value();
return GetProperty(receiver, &result, name, attributes);
}
Object* JSObject::GetPropertyWithInterceptor(
JSObject* receiver,

229
deps/v8/src/objects.h

@ -78,7 +78,6 @@
// - SeqAsciiString
// - SeqTwoByteString
// - ConsString
// - SlicedString
// - ExternalString
// - ExternalAsciiString
// - ExternalTwoByteString
@ -210,7 +209,7 @@ enum PropertyNormalizationMode {
// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
// mentioned explicitly in the name. Likewise, the default representation is
// considered sequential. It is not mentioned in the name. The other
// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned.
// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
// STRING_TYPE (if it is not a symbol).
//
@ -235,12 +234,6 @@ enum PropertyNormalizationMode {
V(SHORT_CONS_ASCII_SYMBOL_TYPE) \
V(MEDIUM_CONS_ASCII_SYMBOL_TYPE) \
V(LONG_CONS_ASCII_SYMBOL_TYPE) \
V(SHORT_SLICED_SYMBOL_TYPE) \
V(MEDIUM_SLICED_SYMBOL_TYPE) \
V(LONG_SLICED_SYMBOL_TYPE) \
V(SHORT_SLICED_ASCII_SYMBOL_TYPE) \
V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE) \
V(LONG_SLICED_ASCII_SYMBOL_TYPE) \
V(SHORT_EXTERNAL_SYMBOL_TYPE) \
V(MEDIUM_EXTERNAL_SYMBOL_TYPE) \
V(LONG_EXTERNAL_SYMBOL_TYPE) \
@ -259,12 +252,6 @@ enum PropertyNormalizationMode {
V(SHORT_CONS_ASCII_STRING_TYPE) \
V(MEDIUM_CONS_ASCII_STRING_TYPE) \
V(LONG_CONS_ASCII_STRING_TYPE) \
V(SHORT_SLICED_STRING_TYPE) \
V(MEDIUM_SLICED_STRING_TYPE) \
V(LONG_SLICED_STRING_TYPE) \
V(SHORT_SLICED_ASCII_STRING_TYPE) \
V(MEDIUM_SLICED_ASCII_STRING_TYPE) \
V(LONG_SLICED_ASCII_STRING_TYPE) \
V(SHORT_EXTERNAL_STRING_TYPE) \
V(MEDIUM_EXTERNAL_STRING_TYPE) \
V(LONG_EXTERNAL_STRING_TYPE) \
@ -380,30 +367,6 @@ enum PropertyNormalizationMode {
ConsString::kSize, \
long_cons_ascii_symbol, \
LongConsAsciiSymbol) \
V(SHORT_SLICED_SYMBOL_TYPE, \
SlicedString::kSize, \
short_sliced_symbol, \
ShortSlicedSymbol) \
V(MEDIUM_SLICED_SYMBOL_TYPE, \
SlicedString::kSize, \
medium_sliced_symbol, \
MediumSlicedSymbol) \
V(LONG_SLICED_SYMBOL_TYPE, \
SlicedString::kSize, \
long_sliced_symbol, \
LongSlicedSymbol) \
V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \
SlicedString::kSize, \
short_sliced_ascii_symbol, \
ShortSlicedAsciiSymbol) \
V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \
SlicedString::kSize, \
medium_sliced_ascii_symbol, \
MediumSlicedAsciiSymbol) \
V(LONG_SLICED_ASCII_SYMBOL_TYPE, \
SlicedString::kSize, \
long_sliced_ascii_symbol, \
LongSlicedAsciiSymbol) \
V(SHORT_EXTERNAL_SYMBOL_TYPE, \
ExternalTwoByteString::kSize, \
short_external_symbol, \
@ -476,30 +439,6 @@ enum PropertyNormalizationMode {
ConsString::kSize, \
long_cons_ascii_string, \
LongConsAsciiString) \
V(SHORT_SLICED_STRING_TYPE, \
SlicedString::kSize, \
short_sliced_string, \
ShortSlicedString) \
V(MEDIUM_SLICED_STRING_TYPE, \
SlicedString::kSize, \
medium_sliced_string, \
MediumSlicedString) \
V(LONG_SLICED_STRING_TYPE, \
SlicedString::kSize, \
long_sliced_string, \
LongSlicedString) \
V(SHORT_SLICED_ASCII_STRING_TYPE, \
SlicedString::kSize, \
short_sliced_ascii_string, \
ShortSlicedAsciiString) \
V(MEDIUM_SLICED_ASCII_STRING_TYPE, \
SlicedString::kSize, \
medium_sliced_ascii_string, \
MediumSlicedAsciiString) \
V(LONG_SLICED_ASCII_STRING_TYPE, \
SlicedString::kSize, \
long_sliced_ascii_string, \
LongSlicedAsciiString) \
V(SHORT_EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kSize, \
short_external_string, \
@ -591,7 +530,6 @@ const uint32_t kStringRepresentationMask = 0x03;
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
kSlicedStringTag = 0x2,
kExternalStringTag = 0x3
};
@ -627,15 +565,6 @@ enum InstanceType {
kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
LONG_CONS_ASCII_SYMBOL_TYPE =
kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag,
SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag,
MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag,
LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag,
SHORT_SLICED_ASCII_SYMBOL_TYPE =
kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
MEDIUM_SLICED_ASCII_SYMBOL_TYPE =
kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
LONG_SLICED_ASCII_SYMBOL_TYPE =
kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag,
SHORT_EXTERNAL_SYMBOL_TYPE =
kShortStringTag | kSymbolTag | kExternalStringTag,
MEDIUM_EXTERNAL_SYMBOL_TYPE =
@ -662,15 +591,6 @@ enum InstanceType {
kMediumStringTag | kAsciiStringTag | kConsStringTag,
LONG_CONS_ASCII_STRING_TYPE =
kLongStringTag | kAsciiStringTag | kConsStringTag,
SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag,
MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag,
LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag,
SHORT_SLICED_ASCII_STRING_TYPE =
kShortStringTag | kAsciiStringTag | kSlicedStringTag,
MEDIUM_SLICED_ASCII_STRING_TYPE =
kMediumStringTag | kAsciiStringTag | kSlicedStringTag,
LONG_SLICED_ASCII_STRING_TYPE =
kLongStringTag | kAsciiStringTag | kSlicedStringTag,
SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag,
MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag,
LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag,
@ -790,16 +710,13 @@ class Object BASE_EMBEDDED {
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSymbol();
#ifdef DEBUG
// See objects-inl.h for more details
inline bool IsSeqString();
inline bool IsSlicedString();
inline bool IsExternalString();
inline bool IsExternalTwoByteString();
inline bool IsExternalAsciiString();
inline bool IsSeqTwoByteString();
inline bool IsSeqAsciiString();
#endif // DEBUG
inline bool IsConsString();
inline bool IsNumber();
@ -1490,6 +1407,9 @@ class JSObject: public HeapObject {
Object* GetPropertyPostInterceptor(JSObject* receiver,
String* name,
PropertyAttributes* attributes);
Object* GetLocalPropertyPostInterceptor(JSObject* receiver,
String* name,
PropertyAttributes* attributes);
Object* GetLazyProperty(Object* receiver,
LookupResult* result,
String* name,
@ -1511,6 +1431,27 @@ class JSObject: public HeapObject {
return GetLocalPropertyAttribute(name) != ABSENT;
}
// If the receiver is a JSGlobalProxy this method will return its prototype,
// otherwise the result is the receiver itself.
inline Object* BypassGlobalProxy();
// Accessors for hidden properties object.
//
// Hidden properties are not local properties of the object itself.
// Instead they are stored on an auxiliary JSObject stored as a local
// property with a special name Heap::hidden_symbol(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
// of its prototype.
//
// Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
// a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
// holder.
//
// These accessors do not touch interceptors or accessors.
inline bool HasHiddenPropertiesObject();
inline Object* GetHiddenPropertiesObject();
inline Object* SetHiddenPropertiesObject(Object* hidden_obj);
Object* DeleteProperty(String* name, DeleteMode mode);
Object* DeleteElement(uint32_t index, DeleteMode mode);
Object* DeleteLazyProperty(LookupResult* result,
@ -2873,7 +2814,7 @@ class Code: public HeapObject {
// Relocate the code by delta bytes. Called to signal that this code
// object has been moved by delta bytes.
void Relocate(int delta);
void Relocate(intptr_t delta);
// Migrate code described by desc.
void CopyFrom(const CodeDesc& desc);
@ -2910,7 +2851,8 @@ class Code: public HeapObject {
void CodeVerify();
#endif
// Code entry points are aligned to 32 bytes.
static const int kCodeAlignment = 32;
static const int kCodeAlignmentBits = 5;
static const int kCodeAlignment = 1 << kCodeAlignmentBits;
static const int kCodeAlignmentMask = kCodeAlignment - 1;
// Layout description.
@ -3238,8 +3180,11 @@ class Script: public Struct {
// [compilation]: how the the script was compiled.
DECL_ACCESSORS(compilation_type, Smi)
// [line_ends]: array of line ends positions.
DECL_ACCESSORS(line_ends, Object)
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends_fixed_array, Object)
// [line_ends]: JSArray of line ends positions.
DECL_ACCESSORS(line_ends_js_array, Object)
// [eval_from_function]: for eval scripts the funcion from which eval was
// called.
@ -3269,8 +3214,16 @@ class Script: public Struct {
static const int kWrapperOffset = kContextOffset + kPointerSize;
static const int kTypeOffset = kWrapperOffset + kPointerSize;
static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
static const int kIdOffset = kLineEndsOffset + kPointerSize;
// We have the line ends array both in FixedArray form and in JSArray form.
// The FixedArray form is useful when we don't have a context and so can't
// create a JSArray. The JSArray form is useful when we want to see the
// array from JS code (e.g. debug-delay.js) which cannot handle unboxed
// FixedArray objects.
static const int kLineEndsFixedArrayOffset =
kCompilationTypeOffset + kPointerSize;
static const int kLineEndsJSArrayOffset =
kLineEndsFixedArrayOffset + kPointerSize;
static const int kIdOffset = kLineEndsJSArrayOffset + kPointerSize;
static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize;
static const int kEvalFrominstructionsOffsetOffset =
kEvalFromFunctionOffset + kPointerSize;
@ -3371,21 +3324,19 @@ class SharedFunctionInfo: public HeapObject {
// Add information on assignments of the form this.x = ...;
void SetThisPropertyAssignmentsInfo(
bool has_only_this_property_assignments,
bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments);
// Clear information on assignments of the form this.x = ...;
void ClearThisPropertyAssignmentsInfo();
// Indicate that this function only consists of assignments of the form
// this.x = ...;.
inline bool has_only_this_property_assignments();
// Indicate that this function only consists of assignments of the form
// this.x = y; where y is either a constant or refers to an argument.
inline bool has_only_simple_this_property_assignments();
inline bool try_fast_codegen();
inline void set_try_fast_codegen(bool flag);
// For functions which only contains this property assignments this provides
// access to the names for the properties assigned.
DECL_ACCESSORS(this_property_assignments, Object)
@ -3464,8 +3415,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
static const int kHasOnlyThisPropertyAssignments = 0;
static const int kHasOnlySimpleThisPropertyAssignments = 1;
static const int kHasOnlySimpleThisPropertyAssignments = 0;
static const int kTryFastCodegen = 1;
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@ -3917,7 +3868,6 @@ class StringShape BASE_EMBEDDED {
inline bool IsSequential();
inline bool IsExternal();
inline bool IsCons();
inline bool IsSliced();
inline bool IsExternalAscii();
inline bool IsExternalTwoByte();
inline bool IsSequentialAscii();
@ -3975,9 +3925,8 @@ class String: public HeapObject {
inline uint16_t Get(int index);
// Try to flatten the top level ConsString that is hiding behind this
// string. This is a no-op unless the string is a ConsString or a
// SlicedString. Flatten mutates the ConsString and might return a
// failure.
// string. This is a no-op unless the string is a ConsString. Flatten
// mutates the ConsString and might return a failure.
Object* TryFlatten();
// Try to flatten the string. Checks first inline to see if it is necessary.
@ -3993,8 +3942,8 @@ class String: public HeapObject {
// ascii and two byte string types.
bool MarkAsUndetectable();
// Slice the string and return a substring.
Object* Slice(int from, int to);
// Return a substring.
Object* SubString(int from, int to);
// String equality operations.
inline bool Equals(String* other);
@ -4079,7 +4028,7 @@ class String: public HeapObject {
static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
static const int kMaxUC16CharCode = 0xffff;
// Minimum length for a cons or sliced string.
// Minimum length for a cons string.
static const int kMinNonFlatLength = 13;
// Mask constant for checking if a string has a computed hash code
@ -4152,12 +4101,6 @@ class String: public HeapObject {
unsigned remaining;
};
// NOTE: If you call StringInputBuffer routines on strings that are
// too deeply nested trees of cons and slice strings, then this
// routine will overflow the stack. Strings that are merely deeply
// nested trees of cons strings do not have a problem apart from
// performance.
static inline const unibrow::byte* ReadBlock(String* input,
ReadBlockBuffer* buffer,
unsigned* offset,
@ -4342,56 +4285,6 @@ class ConsString: public String {
};
// The SlicedString class describes string values that are slices of
// some other string. SlicedStrings consist of a reference to an
// underlying heap-allocated string value, a start index, and the
// length field common to all strings.
class SlicedString: public String {
public:
// The underlying string buffer.
inline String* buffer();
inline void set_buffer(String* buffer);
// The start index of the slice.
inline int start();
inline void set_start(int start);
// Dispatched behavior.
uint16_t SlicedStringGet(int index);
// Casting.
static inline SlicedString* cast(Object* obj);
// Garbage collection support.
void SlicedStringIterateBody(ObjectVisitor* v);
// Layout description
#if V8_HOST_ARCH_64_BIT
// Optimizations expect buffer to be located at same offset as a ConsString's
// first substring. In 64 bit mode we have room for the start offset before
// the buffer.
static const int kStartOffset = String::kSize;
static const int kBufferOffset = kStartOffset + kIntSize;
static const int kSize = kBufferOffset + kPointerSize;
#else
static const int kBufferOffset = String::kSize;
static const int kStartOffset = kBufferOffset + kPointerSize;
static const int kSize = kStartOffset + kIntSize;
#endif
// Support for StringInputBuffer.
inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
unsigned chars);
inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
unsigned chars);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
};
// The ExternalString class describes string values that are backed by
// a string resource that lies outside the V8 heap. ExternalStrings
// consist of the length field common to all strings, a pointer to the
@ -4433,6 +4326,9 @@ class ExternalAsciiString: public ExternalString {
// Casting.
static inline ExternalAsciiString* cast(Object* obj);
// Garbage collection support.
void ExternalAsciiStringIterateBody(ObjectVisitor* v);
// Support for StringInputBuffer.
const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
unsigned* offset,
@ -4468,6 +4364,9 @@ class ExternalTwoByteString: public ExternalString {
// Casting.
static inline ExternalTwoByteString* cast(Object* obj);
// Garbage collection support.
void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
// Support for StringInputBuffer.
void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
@ -4719,6 +4618,7 @@ class AccessorInfo: public Struct {
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
DECL_ACCESSORS(load_stub_cache, Object)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@ -4744,7 +4644,8 @@ class AccessorInfo: public Struct {
static const int kDataOffset = kSetterOffset + kPointerSize;
static const int kNameOffset = kDataOffset + kPointerSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kSize = kFlagOffset + kPointerSize;
static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize;
static const int kSize = kLoadStubCacheOffset + kPointerSize;
private:
// Bit positions in flag.
@ -5097,6 +4998,12 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a runtime entry in the instruction stream.
virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
// Visits the resource of an ASCII or two-byte string.
virtual void VisitExternalAsciiString(
v8::String::ExternalAsciiStringResource** resource) {}
virtual void VisitExternalTwoByteString(
v8::String::ExternalStringResource** resource) {}
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save