Browse Source

Merge branch 'develop' into net2

Conflicts:
	src/node.cc
v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
653bf580bf
  1. 25
      ChangeLog
  2. 6
      LICENSE
  3. 20
      deps/evcom/evcom.c
  4. 2
      deps/udns/Makefile.in
  5. 1
      deps/udns/configure
  6. 16
      deps/v8/ChangeLog
  7. 23
      deps/v8/SConstruct
  8. 5
      deps/v8/include/v8-debug.h
  9. 1
      deps/v8/include/v8.h
  10. 4
      deps/v8/src/SConscript
  11. 4
      deps/v8/src/api.cc
  12. 16
      deps/v8/src/arm/assembler-arm-inl.h
  13. 10
      deps/v8/src/arm/codegen-arm-inl.h
  14. 647
      deps/v8/src/arm/codegen-arm.cc
  15. 18
      deps/v8/src/arm/codegen-arm.h
  16. 10
      deps/v8/src/arm/cpu-arm.cc
  17. 656
      deps/v8/src/arm/fast-codegen-arm.cc
  18. 9
      deps/v8/src/arm/ic-arm.cc
  19. 6
      deps/v8/src/arm/macro-assembler-arm.cc
  20. 6
      deps/v8/src/arm/macro-assembler-arm.h
  21. 105
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  22. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  23. 10
      deps/v8/src/arm/simulator-arm.h
  24. 22
      deps/v8/src/arm/virtual-frame-arm.cc
  25. 8
      deps/v8/src/arm/virtual-frame-arm.h
  26. 82
      deps/v8/src/array.js
  27. 13
      deps/v8/src/assembler.cc
  28. 5
      deps/v8/src/assembler.h
  29. 2
      deps/v8/src/ast.cc
  30. 48
      deps/v8/src/ast.h
  31. 1
      deps/v8/src/bootstrapper.cc
  32. 5
      deps/v8/src/builtins.cc
  33. 4
      deps/v8/src/builtins.h
  34. 5
      deps/v8/src/code-stubs.h
  35. 22
      deps/v8/src/codegen.cc
  36. 60
      deps/v8/src/codegen.h
  37. 60
      deps/v8/src/compiler.cc
  38. 2
      deps/v8/src/contexts.h
  39. 171
      deps/v8/src/date-delay.js
  40. 19
      deps/v8/src/debug.cc
  41. 3
      deps/v8/src/debug.h
  42. 7
      deps/v8/src/dtoa-config.c
  43. 203
      deps/v8/src/fast-codegen.cc
  44. 55
      deps/v8/src/fast-codegen.h
  45. 7
      deps/v8/src/flag-definitions.h
  46. 4
      deps/v8/src/frames.cc
  47. 26
      deps/v8/src/heap-inl.h
  48. 290
      deps/v8/src/heap.cc
  49. 47
      deps/v8/src/heap.h
  50. 73
      deps/v8/src/ia32/assembler-ia32.cc
  51. 10
      deps/v8/src/ia32/assembler-ia32.h
  52. 15
      deps/v8/src/ia32/builtins-ia32.cc
  53. 10
      deps/v8/src/ia32/codegen-ia32-inl.h
  54. 1769
      deps/v8/src/ia32/codegen-ia32.cc
  55. 83
      deps/v8/src/ia32/codegen-ia32.h
  56. 55
      deps/v8/src/ia32/disasm-ia32.cc
  57. 657
      deps/v8/src/ia32/fast-codegen-ia32.cc
  58. 43
      deps/v8/src/ia32/ic-ia32.cc
  59. 35
      deps/v8/src/ia32/macro-assembler-ia32.cc
  60. 25
      deps/v8/src/ia32/macro-assembler-ia32.h
  61. 143
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  62. 6
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  63. 6
      deps/v8/src/ia32/simulator-ia32.h
  64. 18
      deps/v8/src/ia32/stub-cache-ia32.cc
  65. 22
      deps/v8/src/ia32/virtual-frame-ia32.cc
  66. 5
      deps/v8/src/ia32/virtual-frame-ia32.h
  67. 4
      deps/v8/src/ic.cc
  68. 4
      deps/v8/src/ic.h
  69. 706
      deps/v8/src/jsregexp.cc
  70. 183
      deps/v8/src/jsregexp.h
  71. 3
      deps/v8/src/jump-target.h
  72. 13
      deps/v8/src/macros.py
  73. 245
      deps/v8/src/mark-compact.cc
  74. 10
      deps/v8/src/mark-compact.h
  75. 8
      deps/v8/src/math.js
  76. 4
      deps/v8/src/messages.js
  77. 1
      deps/v8/src/mksnapshot.cc
  78. 1
      deps/v8/src/objects-debug.cc
  79. 15
      deps/v8/src/objects-inl.h
  80. 67
      deps/v8/src/objects.cc
  81. 145
      deps/v8/src/objects.h
  82. 27
      deps/v8/src/parser.cc
  83. 1
      deps/v8/src/parser.h
  84. 24
      deps/v8/src/platform-freebsd.cc
  85. 24
      deps/v8/src/platform-linux.cc
  86. 24
      deps/v8/src/platform-macos.cc
  87. 24
      deps/v8/src/platform-openbsd.cc
  88. 25
      deps/v8/src/platform-posix.cc
  89. 686
      deps/v8/src/platform-solaris.cc
  90. 14
      deps/v8/src/platform.h
  91. 12
      deps/v8/src/regexp-delay.js
  92. 9
      deps/v8/src/regexp-macro-assembler-tracer.cc
  93. 2
      deps/v8/src/regexp-macro-assembler-tracer.h
  94. 17
      deps/v8/src/regexp-macro-assembler.cc
  95. 2
      deps/v8/src/regexp-macro-assembler.h
  96. 12
      deps/v8/src/regexp-stack.h
  97. 308
      deps/v8/src/runtime.cc
  98. 7
      deps/v8/src/runtime.h
  99. 50
      deps/v8/src/runtime.js
  100. 2
      deps/v8/src/scopes.cc

25
ChangeLog

@ -1,4 +1,27 @@
2010.01.09, Version 0.1.25
2010.01.20, Version 0.1.26
* Bugfix, HTTP eof causing crash (Ben Williamson)
* Better error message on SyntaxError
* API: Move Promise and EventEmitter into 'events' module
* API: Add process.nextTick()
* Allow optional params to setTimeout, setInterval
(Micheil Smith)
* API: change some Promise behavior (Felix Geisendörfer)
- Removed Promise.cancel()
- Support late callback binding
- Make unhandled Promise errors throw an exception
* Upgrade V8 to 2.0.6.1
* Solaris port
2010.01.09, Version 0.1.25, 39ca93549af91575ca9d4cbafd1e170fbcef3dfa
* sys.inspect() improvements (Tim Caswell)

6
LICENSE

@ -12,10 +12,6 @@ are:
- JSMin JavaScript minifier, located at tools/jsmin.py. This code is
copyrighted by Douglas Crockford and Baruch Even and has an MIT license.
- parseUri, a URI parser, is located in lib/http.js. This is just a small
snippit. It is copyrighted 2007 by Steven Levithan and released under an
MIT license.
- WAF build system, located at tools/waf. Copyrighted Thomas Nagy.
Released under an MIT license.
@ -30,7 +26,7 @@ Other external libraries are my own and all use the same license as Node.
Node's license follows:
Copyright 2009, Ryan Lienhart Dahl. All rights reserved.
Copyright 2009, 2010 Ryan Lienhart Dahl. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the

20
deps/evcom/evcom.c

@ -849,21 +849,27 @@ static inline socklen_t
address_length (struct sockaddr *address)
{
struct sockaddr_un* unix_address = (struct sockaddr_un*)address;
int length = 0;
switch (address->sa_family) {
case AF_INET:
return sizeof(struct sockaddr_in);
length = sizeof(struct sockaddr_in);
break;
case AF_INET6:
return sizeof(struct sockaddr_in6);
length = sizeof(struct sockaddr_in6);
break;
case AF_UNIX:
return strlen(unix_address->sun_path) + sizeof(unix_address->sun_family);
#ifdef SUN_LEN
length = SUN_LEN(unix_address);
#else
length = strlen(unix_address->sun_path) + sizeof(unix_address->sun_family);
#endif
break;
default:
assert(0 && "Unsupported socket family");
}
return 0;
return length;
}
int
@ -1311,7 +1317,7 @@ int evcom_stream_pair (evcom_stream *a, evcom_stream *b)
int sv[2];
int old_errno;
int r = socketpair(PF_LOCAL, SOCK_STREAM, 0, sv);
int r = socketpair(PF_UNIX, SOCK_STREAM, 0, sv);
if (r < 0) return -1;
r = set_nonblock(sv[0]);

2
deps/udns/Makefile.in

@ -57,7 +57,7 @@ CDEFS = @CDEFS@
PICFLAGS = -fPIC
AWK = awk
all: static
all: staticlib
.SUFFIXES: .c .o .lo

1
deps/udns/configure

@ -83,6 +83,7 @@ ac_ign \
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netinet/in.h>
int main() {
char buf[64];
long x = 0;

16
deps/v8/ChangeLog

@ -1,3 +1,19 @@
2010-01-14: Version 2.0.6
Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
GetOwnProperty, FromPropertyDescriptor.
Fixed Mac x64 build errors.
Improved performance of some math and string operations.
Improved performance of some regexp operations.
Improved performance of context creation.
Improved performance of hash tables.
2009-12-18: Version 2.0.5
Extended to upper limit of map space to allow for 7 times as many map

23
deps/v8/SConstruct

@ -35,7 +35,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
@ -157,6 +156,11 @@ LIBRARY_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
'os:solaris': {
'CPPPATH' : ['/usr/local/include'],
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
@ -312,6 +316,9 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
'LIBS': ['pthread', 'socket', 'nsl', 'rt']
},
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
},
@ -361,6 +368,9 @@ CCTEST_EXTRA_FLAGS = {
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
'LIBS': ['pthread', 'socket', 'nsl', 'rt']
},
'os:openbsd': {
'LIBS': ['execinfo', 'pthread']
},
@ -419,6 +429,10 @@ SAMPLE_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['pthread', 'socket', 'nsl', 'rt']
},
'os:openbsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
@ -528,6 +542,9 @@ D8_FLAGS = {
'os:freebsd': {
'LIBS': ['pthread'],
},
'os:solaris': {
'LIBS': ['pthread', 'socket', 'nsl', 'rt']
},
'os:openbsd': {
'LIBS': ['pthread'],
},
@ -581,7 +598,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd'],
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
@ -935,6 +952,7 @@ def BuildSpecific(env, mode, env_overrides):
# Link the object files into a library.
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files)
@ -948,6 +966,7 @@ def BuildSpecific(env, mode, env_overrides):
d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)

5
deps/v8/include/v8-debug.h

@ -258,8 +258,11 @@ class EXPORT Debug {
* supplied TCP/IP port for remote debugger connection.
* \param name the name of the embedding application
* \param port the TCP/IP port to listen on
* \param wait_for_connection whether V8 should pause on a first statement
* allowing remote debugger to connect before anything interesting happened
*/
static bool EnableAgent(const char* name, int port);
static bool EnableAgent(const char* name, int port,
bool wait_for_connection = false);
};

1
deps/v8/include/v8.h

@ -503,6 +503,7 @@ class V8EXPORT ScriptData { // NOLINT
virtual int Length() = 0;
virtual unsigned* Data() = 0;
virtual bool HasError() = 0;
};

4
deps/v8/src/SConscript

@ -168,6 +168,7 @@ SOURCES = {
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
@ -196,6 +197,9 @@ D8_FILES = {
'os:openbsd': [
'd8-posix.cc'
],
'os:solaris': [
'd8-posix.cc'
],
'os:win32': [
'd8-windows.cc'
],

4
deps/v8/src/api.cc

@ -3741,8 +3741,8 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
}
bool Debug::EnableAgent(const char* name, int port) {
return i::Debugger::StartAgent(name, port);
bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
return i::Debugger::StartAgent(name, port, wait_for_connection);
}
#endif // ENABLE_DEBUGGER_SUPPORT

16
deps/v8/src/arm/assembler-arm-inl.h

@ -229,14 +229,24 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_address_at(Address pc) {
Instr instr = Memory::int32_at(pc);
// Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
Address target_pc = pc;
Instr instr = Memory::int32_at(target_pc);
// If we have a bx instruction, the instruction before the bx is
// what we need to patch.
static const int32_t kBxInstMask = 0x0ffffff0;
static const int32_t kBxInstPattern = 0x012fff10;
if ((instr & kBxInstMask) == kBxInstPattern) {
target_pc -= kInstrSize;
instr = Memory::int32_at(target_pc);
}
// Verify that the instruction to patch is a
// ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
return pc + offset + 8;
return target_pc + offset + 8;
}

10
deps/v8/src/arm/codegen-arm-inl.h

@ -67,16 +67,6 @@ void Reference::GetValueAndSpill() {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __
} } // namespace v8::internal

647
deps/v8/src/arm/codegen-arm.cc

@ -44,7 +44,8 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc);
Condition cc,
bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* rhs_not_nan,
Label* slow,
@ -186,12 +187,18 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_is_shadowed_ = false;
VirtualFrame::SpilledScope spilled_scope;
if (scope_->num_heap_slots() > 0) {
int heap_slots = scope_->num_heap_slots();
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
frame_->CallStub(&stub, 1);
} else {
frame_->CallRuntime(Runtime::kNewContext, 1);
}
#ifdef DEBUG
JumpTarget verified_true;
@ -240,14 +247,16 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// initialization because the arguments object may be stored in the
// context.
if (scope_->arguments() != NULL) {
ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ allocate arguments object");
{ Reference shadow_ref(this, scope_->arguments_shadow());
{ Reference arguments_ref(this, scope_->arguments());
ASSERT(scope_->arguments_shadow() != NULL);
Variable* arguments = scope_->arguments()->var();
Variable* shadow = scope_->arguments_shadow()->var();
ASSERT(arguments != NULL && arguments->slot() != NULL);
ASSERT(shadow != NULL && shadow->slot() != NULL);
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
__ ldr(r2, frame_->Function());
// The receiver is below the arguments, the return address,
// and the frame pointer on the stack.
// The receiver is below the arguments, the return address, and the
// frame pointer on the stack.
const int kReceiverDisplacement = 2 + scope_->num_parameters();
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
__ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
@ -255,13 +264,18 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
arguments_ref.SetValue(NOT_CONST_INIT);
}
shadow_ref.SetValue(NOT_CONST_INIT);
}
StoreToSlot(arguments->slot(), NOT_CONST_INIT);
StoreToSlot(shadow->slot(), NOT_CONST_INIT);
frame_->Drop(); // Value is no longer needed.
}
// Initialize ThisFunction reference if present.
if (scope_->is_function_scope() && scope_->function() != NULL) {
__ mov(ip, Operand(Factory::the_hole_value()));
frame_->EmitPush(ip);
StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
}
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@ -613,15 +627,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
LoadAndSpill(property->obj());
// We use a named reference if the key is a literal symbol, unless it is
// a string that can be legally parsed as an integer. This is because
// otherwise we will not get into the slow case code that handles [] on
// String objects.
Literal* literal = property->key()->AsLiteral();
uint32_t dummy;
if (literal != NULL &&
literal->handle()->IsSymbol() &&
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
LoadAndSpill(property->key());
@ -1986,13 +1992,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->EmitPush(r0);
// Store the caught exception in the catch variable.
{ Reference ref(this, node->catch_var());
ASSERT(ref.is_slot());
// Here we make use of the convenient property that it doesn't matter
// whether a value is immediately on top of or underneath a zero-sized
// reference.
ref.SetValue(NOT_CONST_INIT);
}
Variable* catch_var = node->catch_var()->var();
ASSERT(catch_var != NULL && catch_var->slot() != NULL);
StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@ -2298,13 +2300,22 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate());
__ mov(r0, Operand(boilerplate));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
FastNewClosureStub stub;
frame_->EmitPush(r0);
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
} else {
// Create a new closure.
frame_->EmitPush(cp);
__ mov(r0, Operand(boilerplate));
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->EmitPush(r0);
}
}
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
@ -2444,6 +2455,87 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
frame_->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
frame_->EmitPush(r0);
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
// (e.g. READ_ONLY) of context slot so that we can initialize
// const properties (introduced via eval("const foo = (some
// expr);")). Also, uses the current function context instead of
// the top context.
//
// Note that we must declare the foo upon entry of eval(), via a
// context slot declaration, but we cannot initialize it at the
// same time, because the const declaration may be at the end of
// the eval code (sigh...) and the const variable may have been
// used before (where its value is 'undefined'). Thus, we can only
// do the initialization when we actually encounter the expression
// and when the expression operands are defined and valid, and
// thus we need the split into 2 operations: declaration of the
// context slot followed by initialization.
frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions.
frame_->EmitPush(r0);
} else {
ASSERT(!slot->var()->is_dynamic());
JumpTarget exit;
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
// still contains 'the hole' value). When the assignment is
// executed, the code is identical to a normal store (see below).
Comment cmnt(masm_, "[ Init const");
__ ldr(r2, SlotOperand(slot, r2));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
exit.Branch(ne);
}
// We must execute the store. Storing a variable must keep the
// (new) value on the stack. This is necessary for compiling
// assignment expressions.
//
// Note: We will reach here even with slot->var()->mode() ==
// Variable::CONST because of const declarations which will
// initialize consts to 'the hole' value and by doing so, end up
// calling this code. r2 may be loaded with context; used below in
// RecordWrite.
frame_->EmitPop(r0);
__ str(r0, SlotOperand(slot, r2));
frame_->EmitPush(r0);
if (slot->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
exit.Branch(eq);
// r2 is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(offset));
__ RecordWrite(r2, r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
exit.Bind();
}
}
}
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@ -2601,42 +2693,6 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
}
// This deferred code stub will be used for creating the boilerplate
// by calling Runtime_CreateObjectLiteralBoilerplate.
// Each created boilerplate is stored in the JSFunction and they are
// therefore context dependent.
class DeferredObjectLiteral: public DeferredCode {
public:
explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
set_comment("[ DeferredObjectLiteral");
}
virtual void Generate();
private:
ObjectLiteral* node_;
};
void DeferredObjectLiteral::Generate() {
// Argument is passed in r1.
// If the entry is undefined we call the runtime system to compute
// the literal.
// Literal array (0).
__ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
__ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->constant_properties()));
__ push(r0);
__ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
__ mov(r2, Operand(r0));
// Result is returned in r2.
}
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@ -2644,39 +2700,22 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
__ ldr(r1, frame_->Function());
// Load the literals array of the function.
__ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset));
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r2, Operand(ip));
deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
frame_->EmitPush(r2);
// Clone the boilerplate object.
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
if (node->depth() == 1) {
clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
__ ldr(r2, frame_->Function());
// Literal array.
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
// Literal index.
__ mov(r1, Operand(Smi::FromInt(node->literal_index())));
// Constant properties.
__ mov(r0, Operand(node->constant_properties()));
frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
} else {
frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
// r0: cloned object literal
// r0: created object literal
for (int i = 0; i < node->properties()->length(); i++) {
ObjectLiteral::Property* property = node->properties()->at(i);
@ -2724,42 +2763,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
// This deferred code stub will be used for creating the boilerplate
// by calling Runtime_CreateArrayLiteralBoilerplate.
// Each created boilerplate is stored in the JSFunction and they are
// therefore context dependent.
class DeferredArrayLiteral: public DeferredCode {
public:
explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
set_comment("[ DeferredArrayLiteral");
}
virtual void Generate();
private:
ArrayLiteral* node_;
};
void DeferredArrayLiteral::Generate() {
// Argument is passed in r1.
// If the entry is undefined we call the runtime system to computed
// the literal.
// Literal array (0).
__ push(r1);
// Literal index (1).
__ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
__ push(r0);
// Constant properties (2).
__ mov(r0, Operand(node_->literals()));
__ push(r0);
__ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
__ mov(r2, Operand(r0));
// Result is returned in r2.
}
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@ -2767,39 +2770,22 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
__ ldr(r1, frame_->Function());
// Load the literals array of the function.
__ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
// Load the literal at the ast saved index.
int literal_offset =
FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
__ ldr(r2, FieldMemOperand(r1, literal_offset));
// Check whether we need to materialize the object literal boilerplate.
// If so, jump to the deferred code.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r2, Operand(ip));
deferred->Branch(eq);
deferred->BindExit();
// Push the object literal boilerplate.
frame_->EmitPush(r2);
// Clone the boilerplate object.
Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
if (node->depth() == 1) {
clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
__ ldr(r2, frame_->Function());
// Literals array.
__ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
// Literal index.
__ mov(r1, Operand(Smi::FromInt(node->literal_index())));
// Constant elements.
__ mov(r0, Operand(node->constant_elements()));
frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
if (node->depth() > 1) {
frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
// r0: cloned object literal
// r0: created object literal
// Generate code to set the elements in the array that are not
// literals.
@ -2998,13 +2984,15 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r2);
}
// Push the receiver.
__ ldr(r1, frame_->Receiver());
frame_->EmitPush(r1);
// Resolve the call.
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// Touch up stack with the right values for the function and the receiver.
__ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
__ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
__ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
// Call the function.
@ -3544,28 +3532,49 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope;
LoadAndSpill(args->at(0));
switch (op) {
case SIN:
frame_->CallRuntime(Runtime::kMath_sin, 1);
break;
case COS:
frame_->CallRuntime(Runtime::kMath_cos, 1);
break;
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
Load(args->at(0));
Load(args->at(1));
frame_->CallRuntime(Runtime::kStringAdd, 2);
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
ASSERT_EQ(3, args->length());
Load(args->at(0));
Load(args->at(1));
Load(args->at(2));
frame_->CallRuntime(Runtime::kSubString, 3);
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
Load(args->at(0));
Load(args->at(1));
frame_->CallRuntime(Runtime::kStringAdd, 2);
frame_->CallRuntime(Runtime::kStringCompare, 2);
frame_->EmitPush(r0);
}
void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
ASSERT_EQ(4, args->length());
Load(args->at(0));
Load(args->at(1));
Load(args->at(2));
Load(args->at(3));
frame_->CallRuntime(Runtime::kRegExpExec, 4);
frame_->EmitPush(r0);
}
@ -3713,7 +3722,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
UnarySubStub stub(overwrite);
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
}
@ -4343,83 +4352,7 @@ void Reference::SetValue(InitState init_state) {
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
// For now, just do a runtime call.
frame->EmitPush(cp);
__ mov(r0, Operand(slot->var()->name()));
frame->EmitPush(r0);
if (init_state == CONST_INIT) {
// Same as the case for a normal store, but ignores attribute
// (e.g. READ_ONLY) of context slot so that we can initialize
// const properties (introduced via eval("const foo = (some
// expr);")). Also, uses the current function context instead of
// the top context.
//
// Note that we must declare the foo upon entry of eval(), via a
// context slot declaration, but we cannot initialize it at the
// same time, because the const declaration may be at the end of
// the eval code (sigh...) and the const variable may have been
// used before (where its value is 'undefined'). Thus, we can only
// do the initialization when we actually encounter the expression
// and when the expression operands are defined and valid, and
// thus we need the split into 2 operations: declaration of the
// context slot followed by initialization.
frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
frame->CallRuntime(Runtime::kStoreContextSlot, 3);
}
// Storing a variable must keep the (new) value on the expression
// stack. This is necessary for compiling assignment expressions.
frame->EmitPush(r0);
} else {
ASSERT(!slot->var()->is_dynamic());
JumpTarget exit;
if (init_state == CONST_INIT) {
ASSERT(slot->var()->mode() == Variable::CONST);
// Only the first const initialization must be executed (the slot
// still contains 'the hole' value). When the assignment is
// executed, the code is identical to a normal store (see below).
Comment cmnt(masm, "[ Init const");
__ ldr(r2, cgen_->SlotOperand(slot, r2));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
exit.Branch(ne);
}
// We must execute the store. Storing a variable must keep the
// (new) value on the stack. This is necessary for compiling
// assignment expressions.
//
// Note: We will reach here even with slot->var()->mode() ==
// Variable::CONST because of const declarations which will
// initialize consts to 'the hole' value and by doing so, end up
// calling this code. r2 may be loaded with context; used below in
// RecordWrite.
frame->EmitPop(r0);
__ str(r0, cgen_->SlotOperand(slot, r2));
frame->EmitPush(r0);
if (slot->type() == Slot::CONTEXT) {
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
exit.Branch(eq);
// r2 is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
__ mov(r3, Operand(offset));
__ RecordWrite(r2, r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
// optimization.
if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
exit.Bind();
}
}
cgen_->StoreToSlot(slot, init_state);
break;
}
@ -4466,6 +4399,103 @@ void Reference::SetValue(InitState init_state) {
}
void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Clone the boilerplate in new space. Set the context to the
// current context in cp.
Label gc;
// Pop the boilerplate function from the stack.
__ pop(r3);
// Attempt to allocate new JSFunction in new space.
__ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
r0,
r1,
r2,
&gc,
TAG_OBJECT);
// Compute the function map in the current global context and set that
// as the map of the allocated object.
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
__ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Clone the rest of the boilerplate fields. We don't have to update
// the write barrier because the allocated object is in new space.
for (int offset = kPointerSize;
offset < JSFunction::kSize;
offset += kPointerSize) {
if (offset == JSFunction::kContextOffset) {
__ str(cp, FieldMemOperand(r0, offset));
} else {
__ ldr(r1, FieldMemOperand(r3, offset));
__ str(r1, FieldMemOperand(r0, offset));
}
}
// Return result. The argument boilerplate has been popped already.
__ Ret();
// Create a new closure through the slower runtime call.
__ bind(&gc);
__ push(cp);
__ push(r3);
__ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
}
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
__ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
r0,
r1,
r2,
&gc,
TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0 * kPointerSize));
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(length));
__ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
// Copy the global object from the surrounding context.
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ str(r1, MemOperand(r0, Context::SlotOffset(i)));
}
// Remove the on-stack argument and return.
__ mov(cp, r0);
__ pop();
__ Ret();
// Need to collect. Call into runtime system.
__ bind(&gc);
__ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
}
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
@ -4692,17 +4722,21 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc) {
Condition cc,
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = r5;
__ cmp(r0, Operand(r1));
__ b(ne, &not_identical);
Register exp_mask_reg = r5;
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
__ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
Label heap_number, return_equal;
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cc == lt || cc == gt) {
@ -4716,8 +4750,8 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but (undefined <= undefined)
// == false! See ECMAScript 11.8.5.
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
if (cc == le || cc == ge) {
__ cmp(r4, Operand(ODDBALL_TYPE));
__ b(ne, &return_equal);
@ -4725,14 +4759,18 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ cmp(r0, Operand(r2));
__ b(ne, &return_equal);
if (cc == le) {
__ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
// undefined <= undefined should fail.
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
// undefined >= undefined should fail.
__ mov(r0, Operand(LESS));
}
__ mov(pc, Operand(lr)); // Return.
}
}
}
}
__ bind(&return_equal);
if (cc == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
@ -4743,6 +4781,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ mov(pc, Operand(lr)); // Return.
if (cc != eq || !never_nan_nan) {
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
// for NaN.
@ -4780,6 +4819,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
__ mov(pc, Operand(lr)); // Return.
}
// No fall through here.
}
__ bind(&not_identical);
}
@ -4979,6 +5019,14 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
// Check for oddballs: true, false, null, undefined.
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
// Now that we have the types we might as well check for symbol-symbol.
// Ensure that no non-strings have the symbol bit set.
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
ASSERT(kSymbolTag != 0);
__ and_(r2, r2, Operand(r3));
__ tst(r2, Operand(kIsSymbolMask));
__ b(ne, &return_not_equal);
}
@ -5005,12 +5053,13 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
// r2 is object type of r0.
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, slow);
// Ensure that no non-strings have the symbol bit set.
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, slow);
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, slow);
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, slow);
@ -5032,7 +5081,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc_);
EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@ -5096,19 +5145,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
&slow);
__ bind(&check_for_symbols);
if (cc_ == eq) {
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
EmitCheckForSymbols(masm, &slow);
}
__ bind(&slow);
__ push(lr);
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
int arg_count = 1; // Not counting receiver.
if (cc_ == eq) {
native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
@ -5120,16 +5169,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(cc_ == gt || cc_ == ge); // remaining cases
ncr = LESS;
}
arg_count++;
__ mov(r0, Operand(Smi::FromInt(ncr)));
__ push(r0);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, CALL_JS);
__ cmp(r0, Operand(0));
__ pop(pc);
__ InvokeBuiltin(native, JUMP_JS);
}
@ -5955,7 +6001,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
void UnarySubStub::Generate(MacroAssembler* masm) {
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
ASSERT(op_ == Token::SUB);
Label undo;
Label slow;
Label not_smi;
@ -6579,10 +6627,53 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
const char* CompareStub::GetName() {
switch (cc_) {
case lt: return "CompareStub_LT";
case gt: return "CompareStub_GT";
case le: return "CompareStub_LE";
case ge: return "CompareStub_GE";
case ne: {
if (strict_) {
if (never_nan_nan_) {
return "CompareStub_NE_STRICT_NO_NAN";
} else {
return "CompareStub_NE_STRICT";
}
} else {
if (never_nan_nan_) {
return "CompareStub_NE_NO_NAN";
} else {
return "CompareStub_NE";
}
}
}
case eq: {
if (strict_) {
if (never_nan_nan_) {
return "CompareStub_EQ_STRICT_NO_NAN";
} else {
return "CompareStub_EQ_STRICT";
}
} else {
if (never_nan_nan_) {
return "CompareStub_EQ_NO_NAN";
} else {
return "CompareStub_EQ";
}
}
}
default: return "CompareStub";
}
}
int CompareStub::MinorKey() {
// Encode the two parameters in a unique 16 bit value.
ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
// Encode the three parameters in a unique 16 bit value.
ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
int nnn_value = (never_nan_nan_ ? 2 : 0);
if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
}

18
deps/v8/src/arm/codegen-arm.h

@ -272,6 +272,9 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@ -360,15 +363,18 @@ class CodeGenerator: public AstVisitor {
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
// Fast support for SubString.
void GenerateSubString(ZoneList<Expression*>* args);
// Fast support for StringCompare.
void GenerateStringCompare(ZoneList<Expression*>* args);
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,

10
deps/v8/src/arm/cpu-arm.cc

@ -61,28 +61,32 @@ void CPU::FlushICache(void* start, size_t size) {
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
#ifdef __ARM_EABI__
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"swi 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
// r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: swi 0x0 \n\t"
"1: push {r7} \n\t"
"mov r7, %4 \n\t"
"swi 0x0 \n\t"
"pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno)
: "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
#else

656
deps/v8/src/arm/fast-codegen-arm.cc

File diff suppressed because it is too large

9
deps/v8/src/arm/ic-arm.cc

@ -618,6 +618,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
// -- sp[0] : key
// -- sp[4] : receiver
GenerateGeneric(masm);
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.

6
deps/v8/src/arm/macro-assembler-arm.cc

@ -162,9 +162,9 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
void MacroAssembler::Drop(int stack_elements, Condition cond) {
if (stack_elements > 0) {
add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
void MacroAssembler::Drop(int count, Condition cond) {
if (count > 0) {
add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
}
}

6
deps/v8/src/arm/macro-assembler-arm.h

@ -64,7 +64,11 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
void Drop(int stack_elements, Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".

105
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -59,15 +59,19 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - at_start (if 1, start at start of string, if 0, don't)
* - at_start (if 1, we are starting at the start of the
* string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
* - backup of registers r4..r11
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
@ -85,11 +89,13 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
* byte* stack_area_base)
* byte* stack_area_base,
* bool direct_call)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
*/
@ -459,8 +465,6 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@ -469,11 +473,6 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
@ -487,11 +486,6 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return false;
case 'S':
// Match non-space characters.
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), Operand(' '));
@ -504,33 +498,18 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ sub(r0, current_character(), Operand('0'));
__ cmp(current_character(), Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ sub(r0, current_character(), Operand('0'));
__ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(ls, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ eor(r0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
@ -546,13 +525,71 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
}
return true;
}
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ eor(r0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
__ cmp(r0, Operand(0x0c - 0x0b));
if (mode_ == ASCII) {
BranchOrBacktrack(hi, on_no_match);
} else {
Label done;
__ b(ls, &done);
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
__ sub(r0, r0, Operand(0x2028 - 0x0b));
__ cmp(r0, Operand(1));
BranchOrBacktrack(hi, on_no_match);
__ bind(&done);
}
return true;
}
case 'w': {
// Match word character (0-9, A-Z, a-z and _).
Label digits, done;
__ cmp(current_character(), Operand('9'));
__ b(ls, &digits);
__ cmp(current_character(), Operand('_'));
__ b(eq, &done);
__ orr(r0, current_character(), Operand(0x20));
__ sub(r0, r0, Operand('a'));
__ cmp(r0, Operand('z' - 'a'));
BranchOrBacktrack(hi, on_no_match);
__ jmp(&done);
__ bind(&digits);
__ cmp(current_character(), Operand('0'));
BranchOrBacktrack(lo, on_no_match);
__ bind(&done);
return true;
}
case 'W': {
// Match non-word character (not 0-9, A-Z, a-z and _).
Label digits, done;
__ cmp(current_character(), Operand('9'));
__ b(ls, &digits);
__ cmp(current_character(), Operand('_'));
BranchOrBacktrack(eq, on_no_match);
__ orr(r0, current_character(), Operand(0x20));
__ sub(r0, r0, Operand('a'));
__ cmp(r0, Operand('z' - 'a'));
BranchOrBacktrack(ls, on_no_match);
__ jmp(&done);
__ bind(&digits);
__ cmp(current_character(), Operand('0'));
BranchOrBacktrack(hs, on_no_match);
__ bind(&done);
return true;
}
case '*':
// Match any character.
if (check_offset) {
CheckPosition(cp_offset, on_no_match);
}
return true;
// No custom implementation (yet): w, W, s(UC16), S(UC16).
// No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -80,8 +80,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
@ -127,6 +125,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.

10
deps/v8/src/arm/simulator-arm.h

@ -62,9 +62,9 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
// expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@ -79,9 +79,9 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
assembler::arm::Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \

22
deps/v8/src/arm/virtual-frame-arm.cc

@ -145,11 +145,24 @@ void VirtualFrame::AllocateStackSlots() {
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
}
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
if (count < kLocalVarBound) {
// For less locals the unrolled loop is more compact.
for (int i = 0; i < count; i++) {
__ push(ip);
}
} else {
// For more locals a loop in generated code is more compact.
Label alloc_locals_loop;
__ mov(r1, Operand(count));
__ bind(&alloc_locals_loop);
__ push(ip);
__ sub(r1, r1, Operand(1), SetCC);
__ b(ne, &alloc_locals_loop);
}
} else {
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
// Check the stack for overflow or a break request.
// Put the lr setup instruction in the delay slot. The kInstrSize is added
// to the implicit 8 byte offset that always applies to operations with pc
@ -387,6 +400,13 @@ void VirtualFrame::EmitPush(Register reg) {
}
void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
ASSERT(stack_pointer_ == element_count() - 1);
Adjust(count);
__ stm(db_w, sp, src_regs);
}
#undef __
} } // namespace v8::internal

8
deps/v8/src/arm/virtual-frame-arm.h

@ -180,6 +180,9 @@ class VirtualFrame : public ZoneObject {
// shared return site. Emits code for spills.
void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 5;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@ -346,6 +349,11 @@ class VirtualFrame : public ZoneObject {
// corresponding push instruction.
void EmitPush(Register reg);
// Push multiple registers on the stack and the virtual frame
// Register are selected by setting bit in src_regs and
// are pushed in decreasing order: r15 .. r0.
void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);

82
deps/v8/src/array.js

@ -70,19 +70,22 @@ function GetSortedArrayKeys(array, intervals) {
// Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var builder = new StringBuilder();
var last_key = -1;
var keys_length = keys.length;
var elements = new $Array(keys_length);
var elements_length = 0;
for (var i = 0; i < keys_length; i++) {
var key = keys[i];
if (key != last_key) {
var e = array[key];
if (typeof(e) !== 'string') e = convert(e);
builder.add(e);
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
last_key = key;
}
}
return builder.generate();
return %StringBuilderConcat(elements, elements_length, '');
}
@ -107,7 +110,7 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements.
try {
if (UseSparseVariant(array, length, is_array) && separator === '') {
if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
return SparseJoin(array, length, convert);
}
@ -115,39 +118,37 @@ function Join(array, length, separator, convert) {
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
if (typeof(e) === 'string') return e;
if (IS_STRING(e)) return e;
return convert(e);
}
}
var builder = new StringBuilder();
// Construct an array for the elements.
var elements;
var elements_length = 0;
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
elements = new $Array(length);
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e) || (i in array)) {
if (typeof(e) !== 'string') e = convert(e);
if (e.length > 0) {
var elements = builder.elements;
elements[elements.length] = e;
}
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
}
}
} else {
elements = new $Array(length << 1);
for (var i = 0; i < length; i++) {
var e = array[i];
if (i != 0) builder.add(separator);
if (i != 0) elements[elements_length++] = separator;
if (!IS_UNDEFINED(e) || (i in array)) {
if (typeof(e) !== 'string') e = convert(e);
if (e.length > 0) {
var elements = builder.elements;
elements[elements.length] = e;
}
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
}
}
}
return builder.generate();
return %StringBuilderConcat(elements, elements_length, '');
} finally {
// Make sure to pop the visited array no matter what happens.
if (is_array) visited_arrays.pop();
@ -156,16 +157,15 @@ function Join(array, length, separator, convert) {
function ConvertToString(e) {
if (typeof(e) === 'string') return e;
if (e == null) return '';
else return ToString(e);
}
function ConvertToLocaleString(e) {
if (typeof(e) === 'string') return e;
if (e == null) return '';
else {
if (e == null) {
return '';
} else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
// Call ToString if toLocaleString is not a function.
// See issue 877615.
@ -359,16 +359,20 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
if (IS_UNDEFINED(separator)) separator = ',';
else separator = ToString(separator);
return Join(this, ToUint32(this.length), separator, ConvertToString);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
separator = ToString(separator);
}
var length = TO_UINT32(this.length);
return Join(this, length, separator, ConvertToString);
}
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
var n = ToUint32(this.length);
var n = TO_UINT32(this.length);
if (n == 0) {
this.length = n;
return;
@ -384,7 +388,7 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
var n = ToUint32(this.length);
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
@ -452,7 +456,7 @@ function SparseReverse(array, len) {
function ArrayReverse() {
var j = ToUint32(this.length) - 1;
var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) {
SparseReverse(this, j+1);
@ -483,7 +487,7 @@ function ArrayReverse() {
function ArrayShift() {
var len = ToUint32(this.length);
var len = TO_UINT32(this.length);
if (len === 0) {
this.length = 0;
@ -504,7 +508,7 @@ function ArrayShift() {
function ArrayUnshift(arg1) { // length == 1
var len = ToUint32(this.length);
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this))
@ -523,7 +527,7 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
var len = ToUint32(this.length);
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@ -568,7 +572,7 @@ function ArraySplice(start, delete_count) {
// compatibility.
if (num_arguments == 0) return;
var len = ToUint32(this.length);
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
if (start_i < 0) {
@ -850,7 +854,7 @@ function ArraySort(comparefn) {
return first_undefined;
}
length = ToUint32(this.length);
length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@ -915,7 +919,7 @@ function ArrayForEach(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@ -933,7 +937,7 @@ function ArraySome(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@ -950,25 +954,23 @@ function ArrayEvery(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (!f.call(receiver, current, i, this)) return false;
}
}
return true;
}
function ArrayMap(f, receiver) {
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var length = TO_UINT32(this.length);
var result = new $Array(length);
for (var i = 0; i < length; i++) {
var current = this[i];

13
deps/v8/src/assembler.cc

@ -674,6 +674,19 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
ExternalReference ExternalReference::address_of_static_offsets_vector() {
return ExternalReference(OffsetsVector::static_offsets_vector_address());
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
return ExternalReference(RegExpStack::memory_address());
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
return ExternalReference(RegExpStack::memory_size_address());
}
#endif

5
deps/v8/src/assembler.h

@ -420,6 +420,11 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
// Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector();
static ExternalReference address_of_regexp_stack_memory_address();
static ExternalReference address_of_regexp_stack_memory_size();
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference heap_always_allocate_scope_depth();

2
deps/v8/src/ast.cc

@ -433,7 +433,7 @@ void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
} else {
stream()->Add("%i ", that->max());
}
stream()->Add(that->is_greedy() ? "g " : "n ");
stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
that->body()->Accept(this, data);
stream()->Add(")");
return NULL;

48
deps/v8/src/ast.h

@ -187,6 +187,11 @@ class Expression: public AstNode {
virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; }
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
@ -642,21 +647,20 @@ class TryStatement: public Statement {
class TryCatchStatement: public TryStatement {
public:
TryCatchStatement(Block* try_block,
Expression* catch_var,
VariableProxy* catch_var,
Block* catch_block)
: TryStatement(try_block),
catch_var_(catch_var),
catch_block_(catch_block) {
ASSERT(catch_var->AsVariableProxy() != NULL);
}
virtual void Accept(AstVisitor* v);
Expression* catch_var() const { return catch_var_; }
VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; }
private:
Expression* catch_var_;
VariableProxy* catch_var_;
Block* catch_block_;
};
@ -707,6 +711,14 @@ class Literal: public Expression {
virtual bool IsValidJSON() { return true; }
virtual bool IsPropertyName() {
if (handle_->IsSymbol()) {
uint32_t ignored;
return !String::cast(*handle_)->AsArrayIndex(&ignored);
}
return false;
}
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -827,24 +839,24 @@ class RegExpLiteral: public MaterializedLiteral {
// for minimizing the work when constructing it at runtime.
class ArrayLiteral: public MaterializedLiteral {
public:
ArrayLiteral(Handle<FixedArray> literals,
ArrayLiteral(Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
literals_(literals),
constant_elements_(constant_elements),
values_(values) {}
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsValidJSON();
Handle<FixedArray> literals() const { return literals_; }
Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
private:
Handle<FixedArray> literals_;
Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
};
@ -1526,6 +1538,7 @@ class CharacterSet BASE_EMBEDDED {
standard_set_type_ = special_set_type;
}
bool is_standard() { return standard_set_type_ != 0; }
void Canonicalize();
private:
ZoneList<CharacterRange>* ranges_;
// If non-zero, the value represents a standard set (e.g., all whitespace
@ -1619,12 +1632,13 @@ class RegExpText: public RegExpTree {
class RegExpQuantifier: public RegExpTree {
public:
RegExpQuantifier(int min, int max, bool is_greedy, RegExpTree* body)
: min_(min),
enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
: body_(body),
min_(min),
max_(max),
is_greedy_(is_greedy),
body_(body),
min_match_(min * body->min_match()) {
min_match_(min * body->min_match()),
type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
@ -1648,15 +1662,17 @@ class RegExpQuantifier: public RegExpTree {
virtual int max_match() { return max_match_; }
int min() { return min_; }
int max() { return max_; }
bool is_greedy() { return is_greedy_; }
bool is_possessive() { return type_ == POSSESSIVE; }
bool is_non_greedy() { return type_ == NON_GREEDY; }
bool is_greedy() { return type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
RegExpTree* body_;
int min_;
int max_;
bool is_greedy_;
RegExpTree* body_;
int min_match_;
int max_match_;
Type type_;
};

1
deps/v8/src/bootstrapper.cc

@ -992,6 +992,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
configure_instance_fun);

5
deps/v8/src/builtins.cc

@ -544,6 +544,11 @@ static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
}
static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
KeyedLoadIC::GenerateString(masm);
}
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}

4
deps/v8/src/builtins.h

@ -74,6 +74,7 @@ namespace internal {
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
@ -147,7 +148,8 @@ namespace internal {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
V(APPLY_OVERFLOW, 1) \
V(STRING_CHAR_AT, 1)
class ObjectVisitor;

5
deps/v8/src/code-stubs.h

@ -37,6 +37,8 @@ namespace internal {
V(CallFunction) \
V(GenericBinaryOp) \
V(StringAdd) \
V(SubString) \
V(StringCompare) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
@ -46,12 +48,13 @@ namespace internal {
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(UnarySub) \
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
V(Runtime) \
V(CEntry) \
V(JSEntry)

22
deps/v8/src/codegen.cc

@ -342,11 +342,12 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
{&CodeGenerator::GenerateMathSin, "_Math_sin"},
{&CodeGenerator::GenerateMathCos, "_Math_cos"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
{&CodeGenerator::GenerateSubString, "_SubString"},
{&CodeGenerator::GenerateStringCompare, "_StringCompare"},
{&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
};
@ -450,6 +451,23 @@ const char* RuntimeStub::GetName() {
}
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
return overwrite_
? "GenericUnaryOpStub_SUB_Overwrite"
: "GenericUnaryOpStub_SUB_Alloc";
case Token::BIT_NOT:
return overwrite_
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:
UNREACHABLE();
return "<unknown>";
}
}
void RuntimeStub::Generate(MacroAssembler* masm) {
Runtime::Function* f = Runtime::FunctionForId(id_);
masm->TailCallRuntime(ExternalReference(f),

60
deps/v8/src/codegen.h

@ -294,30 +294,53 @@ class InstanceofStub: public CodeStub {
};
class UnarySubStub : public CodeStub {
class GenericUnaryOpStub : public CodeStub {
public:
explicit UnarySubStub(bool overwrite)
: overwrite_(overwrite) { }
GenericUnaryOpStub(Token::Value op, bool overwrite)
: op_(op), overwrite_(overwrite) { }
private:
Token::Value op_;
bool overwrite_;
Major MajorKey() { return UnarySub; }
int MinorKey() { return overwrite_ ? 1 : 0; }
class OverwriteField: public BitField<int, 0, 1> {};
class OpField: public BitField<Token::Value, 1, kMinorBits - 1> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) | OverwriteField::encode(overwrite_);
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "UnarySubStub"; }
const char* GetName();
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
CompareStub(Condition cc,
bool strict,
NaNInformation nan_info = kBothCouldBeNaN) :
cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
// Only used for 'equal' comparisons. Tells the stub that we already know
// that at least one side of the comparison is not NaN. This allows the
// stub to use object identity in the positive case. We ignore it when
// generating the minor key for other comparisons to avoid creating more
// stubs.
bool never_nan_nan_;
Major MajorKey() { return Compare; }
@ -329,6 +352,9 @@ class CompareStub: public CodeStub {
Register object,
Register scratch);
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
@ -470,6 +496,26 @@ class ArgumentsAccessStub: public CodeStub {
};
class RegExpExecStub: public CodeStub {
public:
RegExpExecStub() { }
private:
Major MajorKey() { return RegExpExec; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpExecStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpExecStub\n");
}
#endif
};
} // namespace internal
} // namespace v8

60
deps/v8/src/compiler.cc

@ -538,7 +538,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
#ifdef ENABLE_OPROFILE_AGENT
OProfileAgent::CreateNativeCodeRegion(*node->name(),
OProfileAgent::CreateNativeCodeRegion(*literal->name(),
code->instruction_start(),
code->instruction_size());
#endif
@ -649,12 +649,6 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
// Property rewrites are shared, ensure we are not changing its
// expression context state.
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
}
@ -746,7 +740,9 @@ void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
BAILOUT("TryCatchStatement");
Visit(stmt->try_block());
CHECK_BAILOUT;
Visit(stmt->catch_block());
}
@ -876,7 +872,9 @@ void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
BAILOUT("CatchExtensionObject");
ProcessExpression(expr->key(), Expression::kValue);
CHECK_BAILOUT;
ProcessExpression(expr->value(), Expression::kValue);
}
@ -890,6 +888,9 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (var->mode() == Variable::CONST) {
BAILOUT("Assignment to const");
}
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
@ -899,20 +900,12 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
}
}
} else if (prop != NULL) {
ASSERT(prop->obj()->context() == Expression::kUninitialized ||
prop->obj()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
// stores. Leave its expression context uninitialized for named
// property stores.
Literal* lit = prop->key()->AsLiteral();
uint32_t ignored;
if (lit == NULL ||
!lit->handle()->IsSymbol() ||
String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
ASSERT(prop->key()->context() == Expression::kUninitialized ||
prop->key()->context() == Expression::kValue);
if (!prop->key()->IsPropertyName()) {
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
@ -926,7 +919,7 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
void CodeGenSelector::VisitThrow(Throw* expr) {
BAILOUT("Throw");
ProcessExpression(expr->exception(), Expression::kValue);
}
@ -1018,11 +1011,32 @@ void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
// We support postfix count operations on global variables.
if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
ProcessExpression(expr->expression(), Expression::kValue);
Property* prop = expr->expression()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("CountOperation with lookup slot");
}
}
} else if (prop != NULL) {
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
// stores. Leave its expression context uninitialized for named
// property stores.
if (!prop->key()->IsPropertyName()) {
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
} else {
// This is a throw reference error.
BAILOUT("CountOperation non-variable/non-property expression");
}
}

2
deps/v8/src/contexts.h

@ -77,6 +77,7 @@ enum ContextLookupFlags {
V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
@ -202,6 +203,7 @@ class Context: public FixedArray {
TO_UINT32_FUN_INDEX,
TO_INT32_FUN_INDEX,
TO_BOOLEAN_FUN_INDEX,
GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
SPECIAL_FUNCTION_TABLE_INDEX,

171
deps/v8/src/date-delay.js

@ -45,12 +45,6 @@ function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
}
// ECMA 262 - 15.9.1.2
function Day(time) {
return FLOOR(time / msPerDay);
}
// ECMA 262 - 5.2
function Modulo(value, remainder) {
var mod = value % remainder;
@ -86,30 +80,13 @@ function TimeFromYear(year) {
}
function YearFromTime(time) {
return FromJulianDay(Day(time) + kDayZeroInJulianDay).year;
}
function InLeapYear(time) {
return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
}
// ECMA 262 - 15.9.1.4
function MonthFromTime(time) {
return FromJulianDay(Day(time) + kDayZeroInJulianDay).month;
return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
return Day(time) - DayFromYear(YearFromTime(time));
}
// ECMA 262 - 15.9.1.5
function DateFromTime(time) {
return FromJulianDay(Day(time) + kDayZeroInJulianDay).date;
return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
}
@ -136,7 +113,7 @@ function EquivalentTime(t) {
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return TimeClip(MakeDate(day, TimeWithinDay(t)));
}
@ -232,7 +209,7 @@ function LocalTimezone(t) {
function WeekDay(time) {
return Modulo(Day(time) + 4, 7);
return Modulo(DAY(time) + 4, 7);
}
var local_time_offset = %DateLocalTimeOffset();
@ -243,7 +220,14 @@ function LocalTime(time) {
}
function LocalTimeNoCheck(time) {
return time + local_time_offset + DaylightSavingsOffset(time);
// Inline the DST offset cache checks for speed.
var cache = DST_offset_cache;
if (cache.start <= time && time <= cache.end) {
var dst_offset = cache.offset;
} else {
var dst_offset = DaylightSavingsOffset(time);
}
return time + local_time_offset + dst_offset;
}
@ -254,27 +238,6 @@ function UTC(time) {
}
// ECMA 262 - 15.9.1.10
function HourFromTime(time) {
return Modulo(FLOOR(time / msPerHour), HoursPerDay);
}
function MinFromTime(time) {
return Modulo(FLOOR(time / msPerMinute), MinutesPerHour);
}
function SecFromTime(time) {
return Modulo(FLOOR(time / msPerSecond), SecondsPerMinute);
}
function msFromTime(time) {
return Modulo(time, msPerSecond);
}
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
if (!$isFinite(hour)) return $NaN;
@ -468,7 +431,7 @@ var Date_cache = {
value = DateParse(year);
if (!NUMBER_IS_NAN(value)) {
cache.time = value;
cache.year = YearFromTime(LocalTimeNoCheck(value));
cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
cache.string = year;
}
}
@ -508,60 +471,59 @@ function GetTimeFrom(aDate) {
return DATE_VALUE(aDate);
}
function GetMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return msFromTime(LocalTimeNoCheck(t));
return MS_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return msFromTime(t);
return MS_FROM_TIME(t);
}
function GetSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(LocalTimeNoCheck(t));
return SEC_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return SecFromTime(t);
return SEC_FROM_TIME(t);
}
function GetMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(LocalTimeNoCheck(t));
return MIN_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MinFromTime(t);
return MIN_FROM_TIME(t);
}
function GetHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(LocalTimeNoCheck(t));
return HOUR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return HourFromTime(t);
return HOUR_FROM_TIME(t);
}
@ -570,42 +532,42 @@ function GetFullYearFrom(aDate) {
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
return YearFromTime(LocalTimeNoCheck(t));
return YEAR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCFullYearFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return YearFromTime(t);
return YEAR_FROM_TIME(t);
}
function GetMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(LocalTimeNoCheck(t));
return MONTH_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return MonthFromTime(t);
return MONTH_FROM_TIME(t);
}
function GetDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(LocalTimeNoCheck(t));
return DATE_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
return DateFromTime(t);
return DATE_FROM_TIME(t);
}
@ -622,7 +584,7 @@ function TwoDigitString(value) {
function DateString(time) {
var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return WeekDays[WeekDay(time)] + ' '
+ Months[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ' '
@ -635,7 +597,7 @@ var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July'
function LongDateString(time) {
var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return LongWeekDays[WeekDay(time)] + ', '
+ LongMonths[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ', '
@ -644,9 +606,9 @@ function LongDateString(time) {
function TimeString(time) {
return TwoDigitString(HourFromTime(time)) + ':'
+ TwoDigitString(MinFromTime(time)) + ':'
+ TwoDigitString(SecFromTime(time));
return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
+ TwoDigitString(MIN_FROM_TIME(time)) + ':'
+ TwoDigitString(SEC_FROM_TIME(time));
}
@ -892,8 +854,8 @@ function DateSetTime(ms) {
function DateSetMilliseconds(ms) {
var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -901,8 +863,8 @@ function DateSetMilliseconds(ms) {
function DateSetUTCMilliseconds(ms) {
var t = DATE_VALUE(this);
ms = ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -911,8 +873,8 @@ function DateSetSeconds(sec, ms) {
var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -921,8 +883,8 @@ function DateSetUTCSeconds(sec, ms) {
var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -933,8 +895,8 @@ function DateSetMinutes(min, sec, ms) {
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -945,8 +907,8 @@ function DateSetUTCMinutes(min, sec, ms) {
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(HourFromTime(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -959,7 +921,7 @@ function DateSetHours(hour, min, sec, ms) {
sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@ -972,7 +934,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@ -980,7 +942,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
function DateSetDate(date) {
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -989,7 +951,7 @@ function DateSetDate(date) {
function DateSetUTCDate(date) {
var t = DATE_VALUE(this);
date = ToNumber(date);
var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -999,7 +961,7 @@ function DateSetMonth(month, date) {
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -1009,7 +971,7 @@ function DateSetUTCMonth(month, date) {
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
var day = MakeDay(YearFromTime(t), month, date);
var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -1020,8 +982,8 @@ function DateSetFullYear(year, month, date) {
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
date = argc < 3 ? DateFromTime(t) : ToNumber(date);
month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -1033,8 +995,8 @@ function DateSetUTCFullYear(year, month, date) {
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
date = argc < 3 ? DateFromTime(t) : ToNumber(date);
month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@ -1046,9 +1008,9 @@ function DateToUTCString() {
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
+ TwoDigitString(DateFromTime(t)) + ' '
+ Months[MonthFromTime(t)] + ' '
+ YearFromTime(t) + ' '
+ TwoDigitString(DATE_FROM_TIME(t)) + ' '
+ Months[MONTH_FROM_TIME(t)] + ' '
+ YEAR_FROM_TIME(t) + ' '
+ TimeString(t) + ' GMT';
}
@ -1057,7 +1019,7 @@ function DateToUTCString() {
function DateGetYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
return YearFromTime(LocalTimeNoCheck(t)) - 1900;
return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
}
@ -1069,7 +1031,7 @@ function DateSetYear(year) {
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@ -1086,16 +1048,19 @@ function DateToGMTString() {
}
function PadInt(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
function PadInt(n, digits) {
if (digits == 1) return n;
return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
}
function DateToISOString() {
return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
'-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
'-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
'.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
}

19
deps/v8/src/debug.cc

@ -2483,7 +2483,24 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
}
bool Debugger::StartAgent(const char* name, int port) {
static void StubMessageHandler2(const v8::Debug::Message& message) {
// Simply ignore message.
}
bool Debugger::StartAgent(const char* name, int port,
bool wait_for_connection) {
if (wait_for_connection) {
// Suspend V8 if it is already running or set V8 to suspend whenever
// it starts.
// Provide stub message handler; V8 auto-continues each suspend
// when there is no message handler; we doesn't need it.
// Once become suspended, V8 will stay so indefinitely long, until remote
// debugger connects and issues "continue" command.
Debugger::message_handler_ = StubMessageHandler2;
v8::Debug::DebugBreak();
}
if (Socket::Setup()) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();

3
deps/v8/src/debug.h

@ -636,7 +636,8 @@ class Debugger {
bool* pending_exception);
// Start the debugger agent listening on the provided port.
static bool StartAgent(const char* name, int port);
static bool StartAgent(const char* name, int port,
bool wait_for_connection = false);
// Stop the debugger agent.
static void StopAgent();

7
deps/v8/src/dtoa-config.c

@ -38,7 +38,8 @@
*/
#if !(defined(__APPLE__) && defined(__MACH__)) && \
!defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
!defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
!defined(__sun)
#include <endian.h>
#endif
#include <math.h>
@ -47,7 +48,7 @@
/* The floating point word order on ARM is big endian when floating point
* emulation is used, even if the byte order is little endian */
#if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
!defined(__FreeBSD__) && !defined(__OpenBSD__) && \
!defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__sun) && \
__FLOAT_WORD_ORDER == __BIG_ENDIAN
#define IEEE_MC68k
#else
@ -56,7 +57,7 @@
#define __MATH_H__
#if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
defined(__OpenBSD__)
defined(__OpenBSD__) || defined(__sun)
/* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
* name of strtod. If it's included after strtod is redefined as
* gay_strtod, it will mangle the name of gay_strtod, which is

203
deps/v8/src/fast-codegen.cc

@ -67,13 +67,47 @@ int FastCodeGenerator::SlotOffset(Slot* slot) {
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
break;
default:
case Slot::CONTEXT:
case Slot::LOOKUP:
UNREACHABLE();
}
return offset;
}
void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
case Expression::kValue:
__ push(reg);
break;
case Expression::kTest:
TestAndBranch(reg, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
__ push(reg);
TestAndBranch(reg, true_label_, &discard);
__ bind(&discard);
__ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
__ push(reg);
TestAndBranch(reg, &discard, false_label_);
__ bind(&discard);
__ Drop(1);
__ jmp(true_label_);
}
}
}
void FastCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@ -162,7 +196,7 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect: // Fall through.
case Expression::kEffect:
case Expression::kTest:
// The value of the left subexpression is not needed.
expected = Expression::kTest;
@ -192,36 +226,33 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
#endif
Label eval_right, done;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
// Set up the appropriate context for the left subexpression based on the
// operation and our own context.
// Set up the appropriate context for the left subexpression based
// on the operation and our own context. Initially assume we can
// inherit both true and false labels from our context.
Label* if_true = true_label_;
Label* if_false = false_label_;
if (expr->op() == Token::OR) {
// If there is no usable true label in the OR expression's context, use
// the end of this expression, otherwise inherit the same true label.
// If we are not in some kind of a test context, we did not inherit a
// true label from our context. Use the end of the expression.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
true_label_ = &done;
if_true = &done;
}
// The false label is the label of the second subexpression.
false_label_ = &eval_right;
// The false label is the label of the right subexpression.
if_false = &eval_right;
} else {
ASSERT_EQ(Token::AND, expr->op());
// The true label is the label of the second subexpression.
true_label_ = &eval_right;
// If there is no usable false label in the AND expression's context,
// use the end of the expression, otherwise inherit the same false
// label.
// The true label is the label of the right subexpression.
if_true = &eval_right;
// If we are not in some kind of a test context, we did not inherit a
// false label from our context. Use the end of the expression.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
false_label_ = &done;
if_false = &done;
}
}
Visit(expr->left());
true_label_ = saved_true;
false_label_ = saved_false;
VisitForControl(expr->left(), if_true, if_false);
__ bind(&eval_right);
Visit(expr->right());
@ -254,19 +285,11 @@ void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
Comment cmnt(masm_, "[ IfStatement");
// Expressions cannot recursively enter statements, there are no labels in
// the state.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
SetStatementPosition(stmt);
Label then_part, else_part, done;
// Do not worry about optimizing for empty then or else bodies.
true_label_ = &then_part;
false_label_ = &else_part;
ASSERT(stmt->condition()->context() == Expression::kTest);
Visit(stmt->condition());
true_label_ = NULL;
false_label_ = NULL;
VisitForControl(stmt->condition(), &then_part, &else_part);
__ bind(&then_part);
Visit(stmt->then_statement());
@ -281,6 +304,7 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
Comment cmnt(masm_, "[ ContinueStatement");
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsContinueTarget(stmt->target())) {
@ -296,6 +320,7 @@ void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
Comment cmnt(masm_, "[ BreakStatement");
SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsBreakTarget(stmt->target())) {
@ -311,6 +336,7 @@ void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
SetStatementPosition(stmt);
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
@ -372,6 +398,7 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@ -384,17 +411,8 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
__ bind(loop_statement.continue_target());
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = loop_statement.break_target();
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
@ -409,6 +427,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@ -425,16 +444,7 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
// We are not in an expression context because we have been compiling
// statements. Set up a test expression context for the condition.
ASSERT_EQ(NULL, true_label_);
ASSERT_EQ(NULL, false_label_);
true_label_ = &body;
false_label_ = loop_statement.break_target();
ASSERT(stmt->cond()->context() == Expression::kTest);
Visit(stmt->cond());
true_label_ = NULL;
false_label_ = NULL;
VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
@ -457,11 +467,52 @@ void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
Comment cmnt(masm_, "[ TryCatchStatement");
SetStatementPosition(stmt);
// The try block adds a handler to the exception handler chain
// before entering, and removes it again when exiting normally.
// If an exception is thrown during execution of the try block,
// control is passed to the handler, which also consumes the handler.
// At this point, the exception is in a register, and store it in
// the temporary local variable (prints as ".catch-var") before
// executing the catch block. The catch block has been rewritten
// to introduce a new scope to bind the catch variable and to remove
// that scope again afterwards.
Label try_handler_setup, catch_entry, done;
__ Call(&try_handler_setup);
// Try handler code, exception in result register.
// Store exception in local .catch variable before executing catch block.
{
// The catch variable is *always* a variable proxy for a local variable.
Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(catch_var);
Slot* variable_slot = catch_var->slot();
ASSERT_NOT_NULL(variable_slot);
ASSERT_EQ(Slot::LOCAL, variable_slot->type());
StoreToFrameField(SlotOffset(variable_slot), result_register());
}
Visit(stmt->catch_block());
__ jmp(&done);
// Try block code. Sets up the exception handler chain.
__ bind(&try_handler_setup);
{
TryCatch try_block(this, &catch_entry);
__ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
Visit(stmt->try_block());
__ PopTryHandler();
}
__ bind(&done);
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Comment cmnt(masm_, "[ TryFinallyStatement");
SetStatementPosition(stmt);
// Try finally is compiled by setting up a try-handler on the stack while
// executing the try body, and removing it again afterwards.
//
@ -474,7 +525,7 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// its outward control transfer.
// 3. by exiting the try-block with a thrown exception.
// This can happen in nested function calls. It traverses the try-handler
// chaing and consumes the try-handler entry before jumping to the
// chain and consumes the try-handler entry before jumping to the
// handler code. The handler code then calls the finally-block before
// rethrowing the exception.
//
@ -497,14 +548,15 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// is retained by the finally block.
// Call the finally block and then rethrow the exception.
__ Call(&finally_entry);
ThrowException();
__ push(result_register());
__ CallRuntime(Runtime::kReThrow, 1);
}
__ bind(&finally_entry);
{
// Finally block implementation.
EnterFinallyBlock();
Finally finally_block(this);
EnterFinallyBlock();
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
@ -512,9 +564,9 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
__ bind(&try_handler_setup);
{
// Setup try handler (stack pointer registers).
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
TryFinally try_block(this, &finally_entry);
VisitStatements(stmt->try_block()->statements());
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
Visit(stmt->try_block());
__ PopTryHandler();
}
// Execute the finally block on the way out.
@ -546,14 +598,7 @@ void FastCodeGenerator::VisitConditional(Conditional* expr) {
Label true_case, false_case, done;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
true_label_ = &true_case;
false_label_ = &false_case;
Visit(expr->condition());
true_label_ = saved_true;
false_label_ = saved_false;
VisitForControl(expr->condition(), &true_case, &false_case);
__ bind(&true_case);
Visit(expr->then_expression());
@ -581,7 +626,7 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
Move(expr->context(), expr);
Apply(expr->context(), expr);
}
@ -634,7 +679,7 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(prop, Expression::kValue);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(Expression::kValue);
EmitKeyedPropertyLoad(prop, Expression::kValue);
break;
}
}
@ -652,7 +697,8 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr);
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->context());
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@ -665,12 +711,29 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
UNREACHABLE();
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
// Push key string.
ASSERT_EQ(Expression::kValue, expr->key()->context());
Visit(expr->key());
ASSERT_EQ(Expression::kValue, expr->value()->context());
Visit(expr->value());
// Create catch extension object.
__ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
__ push(result_register());
}
void FastCodeGenerator::VisitThrow(Throw* expr) {
UNREACHABLE();
Comment cmnt(masm_, "[ Throw");
Visit(expr->exception());
// Exception is on stack.
__ CallRuntime(Runtime::kThrow, 1);
// Never returns here.
}

55
deps/v8/src/fast-codegen.h

@ -212,26 +212,48 @@ class FastCodeGenerator: public AstVisitor {
int SlotOffset(Slot* slot);
void Move(Expression::Context destination, Register source);
void Move(Expression::Context destination, Slot* source, Register scratch);
void Move(Expression::Context destination, Literal* source);
// Emit code to complete the evaluation of an expression based on its
// expression context and given its value is in a register, non-lookup
// slot, or a literal.
void Apply(Expression::Context context, Register reg);
void Apply(Expression::Context context, Slot* slot, Register scratch);
void Apply(Expression::Context context, Literal* lit);
// Emit code to complete the evaluation of an expression based on its
// expression context and given its value is on top of the stack.
void ApplyTOS(Expression::Context context);
// Emit code to discard count elements from the top of stack, then
// complete the evaluation of an expression based on its expression
// context and given its value is in a register.
void DropAndApply(int count, Expression::Context context, Register reg);
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
// Templated to allow for Operand on intel and MemOperand on ARM.
template <typename MemoryLocation>
MemoryLocation CreateSlotOperand(Slot* slot, Register scratch);
// Drop the TOS, and store source to destination.
// If destination is TOS, just overwrite TOS with source.
void DropAndMove(Expression::Context destination,
Register source,
int drop_count = 1);
// Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
// May emit code to traverse the context chain, destroying the scratch
// register.
MemOperand EmitSlotSearch(Slot* slot, Register scratch);
// Test the JavaScript value in source as if in a test context, compile
// control flow to a pair of labels.
void TestAndBranch(Register source, Label* true_label, Label* false_label);
void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
ASSERT(expr->context() == Expression::kTest ||
expr->context() == Expression::kValueTest ||
expr->context() == Expression::kTestValue);
Label* saved_true = true_label_;
Label* saved_false = false_label_;
true_label_ = if_true;
false_label_ = if_false;
Visit(expr);
true_label_ = saved_true;
false_label_ = saved_false;
}
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
@ -247,13 +269,13 @@ class FastCodeGenerator: public AstVisitor {
// Platform-specific support for compiling assignments.
// Load a value from a named property and push the result on the stack.
// Load a value from a named property.
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
// Load a value from a named property and push the result on the stack.
// Load a value from a keyed property.
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Expression::Context context);
void EmitKeyedPropertyLoad(Property* expr, Expression::Context context);
// Apply the compound assignment operator. Expects both operands on top
// of the stack.
@ -261,7 +283,7 @@ class FastCodeGenerator: public AstVisitor {
// Complete a variable assignment. The right-hand-side value is expected
// on top of the stack.
void EmitVariableAssignment(Assignment* expr);
void EmitVariableAssignment(Variable* var, Expression::Context context);
// Complete a named property assignment. The receiver and right-hand-side
// value are expected on top of the stack.
@ -279,7 +301,6 @@ class FastCodeGenerator: public AstVisitor {
// Non-local control flow support.
void EnterFinallyBlock();
void ExitFinallyBlock();
void ThrowException();
// Loop nesting counter.
int loop_depth() { return loop_depth_; }

7
deps/v8/src/flag-definitions.h

@ -198,6 +198,9 @@ DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
DEFINE_bool(use_big_map_space, true,
"Use big map space, but don't compact if it grew too big.")
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
@ -228,6 +231,7 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@ -325,6 +329,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
"(requires heap_stats)")
// Regexp
DEFINE_bool(regexp_possessive_quantifier,
false,
"enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
DEFINE_bool(trace_regexp_assembler,
false,

4
deps/v8/src/frames.cc

@ -306,7 +306,7 @@ void StackHandler::Cook(Code* code) {
void StackHandler::Uncook(Code* code) {
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(MarkCompactCollector::HasCompacted());
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
@ -336,7 +336,7 @@ void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
// Only uncooking frames when the collector is compacting and thus moving code
// around.
ASSERT(MarkCompactCollector::IsCompacting());
ASSERT(MarkCompactCollector::HasCompacted());
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();

26
deps/v8/src/heap-inl.h

@ -54,7 +54,8 @@ Object* Heap::AllocateRaw(int size_in_bytes,
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
retry_space == OLD_DATA_SPACE);
retry_space == OLD_DATA_SPACE ||
retry_space == LO_SPACE);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
@ -196,12 +197,23 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
// other object types are promoted to old pointer space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
bool has_pointers =
type != HEAP_NUMBER_TYPE &&
(type >= FIRST_NONSTRING_TYPE ||
(type & kStringRepresentationMask) != kSeqStringTag);
return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
// These objects are never allocated in new space.
ASSERT(type != MAP_TYPE);
ASSERT(type != CODE_TYPE);
ASSERT(type != ODDBALL_TYPE);
ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
if (type < FIRST_NONSTRING_TYPE) {
// There are three string representations: sequential strings, cons
// strings, and external strings. Only cons strings contain
// non-map-word pointers to heap objects.
return ((type & kStringRepresentationMask) == kConsStringTag)
? OLD_POINTER_SPACE
: OLD_DATA_SPACE;
} else {
return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
}
}

290
deps/v8/src/heap.cc

@ -479,6 +479,65 @@ static void VerifySymbolTable() {
}
void Heap::ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size) {
NewSpace* new_space = Heap::new_space();
PagedSpace* old_pointer_space = Heap::old_pointer_space();
PagedSpace* old_data_space = Heap::old_data_space();
PagedSpace* code_space = Heap::code_space();
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
bool gc_performed = true;
while (gc_performed) {
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(new_space_size, NEW_SPACE);
gc_performed = true;
}
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(code_space_size, CODE_SPACE);
gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(map_space_size, MAP_SPACE);
gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for the remembered
// set and a series of large-object allocations that are only just larger
// than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
if (!(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(large_object_size, LO_SPACE);
gc_performed = true;
}
}
}
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
@ -576,6 +635,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
if (is_compacting) FlushNumberStringCache();
}
@ -804,7 +865,8 @@ void Heap::ScavengeExternalStringTable() {
}
}
ExternalStringTable::ShrinkNewStrings(last - start);
ASSERT(last <= end);
ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
}
@ -1354,9 +1416,6 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@ -1576,10 +1635,7 @@ bool Heap::CreateInitialObjects() {
CreateFixedStubs();
// Allocate the number->string conversion cache
obj = AllocateFixedArray(kNumberStringCacheSize * 2);
if (obj->IsFailure()) return false;
set_number_string_cache(FixedArray::cast(obj));
if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
@ -1610,25 +1666,45 @@ bool Heap::CreateInitialObjects() {
}
Object* Heap::InitializeNumberStringCache() {
// Compute the size of the number string cache based on the max heap size.
// max_semispace_size_ == 512 KB => number_string_cache_size = 32.
// max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
int number_string_cache_size = max_semispace_size_ / 512;
number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
Object* obj = AllocateFixedArray(number_string_cache_size * 2);
if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
return obj;
}
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
int len = number_string_cache()->length();
for (int i = 0; i < len; i++) {
number_string_cache()->set_undefined(i);
}
}
static inline int double_get_hash(double d) {
DoubleRepresentation rep(d);
return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
(Heap::kNumberStringCacheSize - 1));
return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
}
static inline int smi_get_hash(Smi* smi) {
return (smi->value() & (Heap::kNumberStringCacheSize - 1));
return smi->value();
}
Object* Heap::GetNumberStringCache(Object* number) {
int hash;
int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
hash = smi_get_hash(Smi::cast(number));
hash = smi_get_hash(Smi::cast(number)) & mask;
} else {
hash = double_get_hash(number->Number());
hash = double_get_hash(number->Number()) & mask;
}
Object* key = number_string_cache()->get(hash * 2);
if (key == number) {
@ -1644,11 +1720,12 @@ Object* Heap::GetNumberStringCache(Object* number) {
void Heap::SetNumberStringCache(Object* number, String* string) {
int hash;
int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
hash = smi_get_hash(Smi::cast(number));
hash = smi_get_hash(Smi::cast(number)) & mask;
number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
} else {
hash = double_get_hash(number->Number());
hash = double_get_hash(number->Number()) & mask;
number_string_cache()->set(hash * 2, number);
}
number_string_cache()->set(hash * 2 + 1, string);
@ -1762,7 +1839,6 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@ -1902,8 +1978,7 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
Object* result = Allocate(map,
always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
@ -1967,8 +2042,7 @@ Object* Heap::AllocateExternalStringFromAscii(
}
Map* map = external_ascii_string_map();
Object* result = Allocate(map,
always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
@ -1989,8 +2063,7 @@ Object* Heap::AllocateExternalStringFromTwoByte(
}
Map* map = Heap::external_string_map();
Object* result = Allocate(map,
always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
@ -2025,15 +2098,16 @@ Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
if (pretenure == NOT_TENURED) {
return AllocateByteArray(length);
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
Object* result = (size <= MaxObjectSizeInPagedSpace())
? old_data_space_->AllocateRaw(size)
: lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@ -2043,15 +2117,13 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* Heap::AllocateByteArray(int length) {
if (length < 0 || length > ByteArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = LO_SPACE;
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@ -2076,12 +2148,7 @@ Object* Heap::AllocatePixelArray(int length,
uint8_t* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
@ -2097,14 +2164,9 @@ Object* Heap::AllocateExternalArray(int length,
void* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = AllocateRaw(ExternalArray::kAlignedSize,
space,
OLD_DATA_SPACE);
if (result->IsFailure()) return result;
reinterpret_cast<ExternalArray*>(result)->set_map(
@ -2193,9 +2255,12 @@ Object* Heap::CopyCode(Code* code) {
Object* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
Object* result = AllocateRaw(map->instance_size(),
space,
TargetSpaceId(map->instance_type()));
// If allocation failures are disallowed, we may allocate in a different
// space when new space is full and the object is not a large object.
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
Object* result =
AllocateRaw(map->instance_size(), space, retry_space);
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(map);
#ifdef ENABLE_LOGGING_AND_PROFILING
@ -2383,7 +2448,6 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@ -2658,12 +2722,16 @@ Map* Heap::SymbolMapForString(String* string) {
Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
uint32_t hash_field) {
ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
bool is_ascii = true;
while (buffer->has_more() && is_ascii) {
if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
while (buffer->has_more()) {
if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
is_ascii = false;
break;
}
}
buffer->Rewind();
@ -2672,17 +2740,23 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map;
if (is_ascii) {
if (chars > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
map = ascii_symbol_map();
size = SeqAsciiString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
map = symbol_map();
size = SeqTwoByteString::SizeFor(chars);
}
// Allocate string.
AllocationSpace space =
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
Object* result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
@ -2702,22 +2776,28 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
if (length < 0 || length > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
int size = SeqAsciiString::SizeFor(length);
ASSERT(size <= SeqAsciiString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (size > kMaxObjectSizeInNewSpace) {
// Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
} else if (size > MaxObjectSizeInPagedSpace()) {
// Allocate in new space, retry in large object space.
retry_space = LO_SPACE;
}
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
space = LO_SPACE;
}
Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@ -2730,22 +2810,26 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
// New space can't cope with forced allocation.
if (always_allocate()) space = OLD_DATA_SPACE;
if (length < 0 || length > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRaw(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (size > kMaxObjectSizeInNewSpace) {
// Allocate in large object space, retry space will be ignored.
space = LO_SPACE;
} else if (size > MaxObjectSizeInPagedSpace()) {
// Allocate in new space, retry in large object space.
retry_space = LO_SPACE;
}
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
space = LO_SPACE;
}
Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@ -2769,6 +2853,9 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Use the general function if we're forced to always allocate.
if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
@ -2820,29 +2907,47 @@ Object* Heap::AllocateFixedArray(int length) {
Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
ASSERT(length >= 0);
ASSERT(empty_fixed_array()->IsFixedArray());
if (length < 0 || length > FixedArray::kMaxLength) {
return Failure::OutOfMemoryException();
}
if (length == 0) return empty_fixed_array();
// New space can't cope with forced allocation.
if (always_allocate()) pretenure = TENURED;
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
int size = FixedArray::SizeFor(length);
Object* result = Failure::OutOfMemoryException();
if (pretenure != TENURED) {
result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRawFixedArray(size);
if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
// Too big for new space.
space = LO_SPACE;
} else if (space == OLD_POINTER_SPACE &&
size > MaxObjectSizeInPagedSpace()) {
// Too big for old pointer space.
space = LO_SPACE;
}
if (result->IsFailure()) {
if (size > MaxObjectSizeInPagedSpace()) {
// Specialize allocation for the space.
Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
// We cannot use Heap::AllocateRaw() because it will not properly
// allocate extra remembered set bits if always_allocate() is true and
// new space allocation fails.
result = new_space_.AllocateRaw(size);
if (result->IsFailure() && always_allocate()) {
if (size <= MaxObjectSizeInPagedSpace()) {
result = old_pointer_space_->AllocateRaw(size);
} else {
result = lo_space_->AllocateRawFixedArray(size);
}
}
} else if (space == OLD_POINTER_SPACE) {
result = old_pointer_space_->AllocateRaw(size);
} else {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
result = AllocateRaw(size, space, OLD_POINTER_SPACE);
ASSERT(space == LO_SPACE);
result = lo_space_->AllocateRawFixedArray(size);
}
if (result->IsFailure()) return result;
}
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
@ -3437,7 +3542,10 @@ bool Heap::Setup(bool create_heap_objects) {
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
map_space_ = new MapSpace(FLAG_use_big_map_space
? max_old_generation_size_
: (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;

47
deps/v8/src/heap.h

@ -804,9 +804,27 @@ class Heap : public AllStatic {
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
// Update an old object's remembered set
static int UpdateRSet(HeapObject* obj);
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
// Support for partial snapshots. After calling this we can allocate a
// certain number of bytes using only linear allocation (with a
// LinearAllocationScope and an AlwaysAllocateScope) without using freelists
// or causing a GC. It returns true of space was reserved or false if a GC is
// needed. For paged spaces the space requested must include the space wasted
// at the end of each page when allocating linearly.
static void ReserveSpace(
int new_space_size,
int pointer_space_size,
int data_space_size,
int code_space_size,
int map_space_size,
int cell_space_size,
int large_object_size);
//
// Support for the API.
//
@ -820,9 +838,6 @@ class Heap : public AllStatic {
// Update the cache with a new number-string pair.
static void SetNumberStringCache(Object* number, String* str);
// Entries in the cache. Must be a power of 2.
static const int kNumberStringCacheSize = 64;
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
@ -837,11 +852,15 @@ class Heap : public AllStatic {
> old_gen_promotion_limit_;
}
static intptr_t OldGenerationSpaceAvailable() {
return old_gen_allocation_limit_ -
(PromotedSpaceSize() + PromotedExternalMemorySize());
}
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
static bool OldGenerationAllocationLimitReached() {
return (PromotedSpaceSize() + PromotedExternalMemorySize())
> old_gen_allocation_limit_;
return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
@ -890,11 +909,6 @@ class Heap : public AllStatic {
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
// The number of MapSpace pages is limited by the way we pack
// Map pointers during GC.
static const int kMaxMapSpaceSize =
(1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
#else
@ -1060,7 +1074,7 @@ class Heap : public AllStatic {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
static HeapObject* MigrateObject(HeapObject* source,
static inline HeapObject* MigrateObject(HeapObject* source,
HeapObject* target,
int size);
@ -1078,9 +1092,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
// Update an old object's remembered set
static int UpdateRSet(HeapObject* obj);
// Rebuild remembered set in an old space.
static void RebuildRSets(PagedSpace* space);
@ -1103,6 +1114,12 @@ class Heap : public AllStatic {
SharedFunctionInfo* shared,
Object* prototype);
// Initializes the number to string cache based on the max semispace size.
static Object* InitializeNumberStringCache();
// Flush the number to string cache.
static void FlushNumberStringCache();
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@ -1234,7 +1251,7 @@ class OldSpaces BASE_EMBEDDED {
// Space iterator for iterating over all the paged spaces of the heap:
// Map space, old pointer space, old data space and code space.
// Map space, old pointer space, old data space, code space and cell space.
// Returns each space in turn, and null when it is done.
class PagedSpaces BASE_EMBEDDED {
public:

73
deps/v8/src/ia32/assembler-ia32.cc

@ -575,6 +575,7 @@ void Assembler::leave() {
void Assembler::mov_b(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x8A);
@ -592,6 +593,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, Register src) {
ASSERT(src.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x88);
@ -752,6 +754,14 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
EMIT(0xA5);
}
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1199,6 +1209,15 @@ void Assembler::sub(Register dst, const Operand& src) {
}
void Assembler::subb(Register dst, const Operand& src) {
ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x2A);
emit_operand(dst, src);
}
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1586,7 +1605,6 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
// FPU instructions
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -1675,6 +1693,15 @@ void Assembler::fisttp_s(const Operand& adr) {
}
void Assembler::fisttp_d(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xDD);
emit_operand(ecx, adr);
}
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@ -2026,6 +2053,50 @@ void Assembler::comisd(XMMRegister dst, XMMRegister src) {
}
void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x7F);
emit_sse_operand(src, dst);
}
void Assembler::movdqa(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x6F);
emit_sse_operand(dst, src);
}
void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x7F);
emit_sse_operand(src, dst);
}
void Assembler::movdqu(XMMRegister dst, const Operand& src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0xF3);
EMIT(0x0F);
EMIT(0x6F);
emit_sse_operand(dst, src);
}
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;

10
deps/v8/src/ia32/assembler-ia32.h

@ -540,6 +540,9 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
// Repetitive string instructions.
void rep_movs();
// Exchange two registers
void xchg(Register dst, Register src);
@ -614,6 +617,7 @@ class Assembler : public Malloced {
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
@ -693,6 +697,7 @@ class Assembler : public Malloced {
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
void fisttp_d(const Operand& adr);
void fabs();
void fchs();
@ -749,6 +754,11 @@ class Assembler : public Malloced {
void comisd(XMMRegister dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void movdqu(const Operand& dst, XMMRegister src);
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);

15
deps/v8/src/ia32/builtins-ia32.cc

@ -86,7 +86,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
__ shl(eax, kSmiTagSize);
__ SmiTag(eax);
__ push(eax);
// Push the function to invoke on the stack.
@ -255,7 +255,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
__ shr(eax, kSmiTagSize);
__ SmiUntag(eax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@ -440,8 +440,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ EnterInternalFrame(); // preserves eax, ebx, edi
// Store the arguments count on the stack (smi tagged).
ASSERT(kSmiTag == 0);
__ shl(eax, kSmiTagSize);
__ SmiTag(eax);
__ push(eax);
__ push(edi); // save edi across the call
@ -452,7 +451,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Get the arguments count and untag it.
__ pop(eax);
__ shr(eax, kSmiTagSize);
__ SmiUntag(eax);
__ LeaveInternalFrame();
__ jmp(&patch_receiver);
@ -634,7 +633,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
ParameterCount actual(eax);
__ shr(eax, kSmiTagSize);
__ SmiUntag(eax);
__ mov(edi, Operand(ebp, 4 * kPointerSize));
__ InvokeFunction(edi, actual, CALL_FUNCTION);
@ -831,7 +830,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
__ shr(array_size, kSmiTagSize); // Convert from smi to value.
__ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
@ -960,7 +959,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
ASSERT(kSmiTag == 0);
__ shl(eax, kSmiTagSize); // Convet argc to a smi.
__ SmiTag(eax); // Convet argc to a smi.
// eax: array_size (smi)
// edi: constructor
// esp[0] : argc

10
deps/v8/src/ia32/codegen-ia32-inl.h

@ -39,16 +39,6 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
GenerateFastMathOp(SIN, args);
}
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
GenerateFastMathOp(COS, args);
}
#undef __
} } // namespace v8::internal

1769
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

83
deps/v8/src/ia32/codegen-ia32.h

@ -541,15 +541,18 @@ class CodeGenerator: public AstVisitor {
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
// Fast support for Math.sin and Math.cos.
enum MathOp { SIN, COS };
void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
// Fast support for SubString.
void GenerateSubString(ZoneList<Expression*>* args);
// Fast support for StringCompare.
void GenerateStringCompare(ZoneList<Expression*>* args);
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@ -750,7 +753,32 @@ enum StringAddFlags {
};
class StringAddStub: public CodeStub {
class StringStubBase: public CodeStub {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
// overhead. Copying of overlapping regions is not supported.
void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying characters using the rep movs instruction.
// Copies ecx characters from esi to edi. Copying of overlapping regions is
// not supported.
void GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest, // Must be edi.
Register src, // Must be esi.
Register count, // Must be ecx.
Register scratch, // Neither of the above.
bool ascii);
};
class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@ -762,18 +790,45 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
void GenerateCopyCharacters(MacroAssembler* masm,
Register desc,
Register src,
Register count,
Register scratch,
bool ascii);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public StringStubBase {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public StringStubBase {
public:
explicit StringCompareStub() {
}
// Compare two flat ascii strings and returns result in eax after popping two
// arguments from the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

55
deps/v8/src/ia32/disasm-ia32.cc

@ -61,6 +61,7 @@ static ByteMnemonic two_operands_instr[] = {
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
{0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
@ -1007,7 +1008,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x80:
{ data++;
AppendToBuffer("%s ", "cmpb");
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
const char* mnem = NULL;
printf("%d\n", regop);
switch (regop) {
case 5: mnem = "subb"; break;
case 7: mnem = "cmpb"; break;
default: UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
data += PrintRightOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
@ -1057,6 +1067,19 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x6F) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
}
@ -1093,6 +1116,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
case 0x2C:
AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
data += 2;
break;
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;
@ -1163,9 +1191,29 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF3:
if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
if (*(data+1) == 0x0F) {
if (*(data+2) == 0x2C) {
data += 3;
data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
} else if (*(data+2) == 0x6F) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
data += PrintRightOperand(data);
} else if (*(data+2) == 0x7F) {
AppendToBuffer("movdqu ");
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
}
} else if (*(data+1) == 0xA5) {
data += 2;
AppendToBuffer("rep_movs");
} else {
UnimplementedInstruction();
}
@ -1185,6 +1233,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
int instr_len = data - instr;
if (instr_len == 0) {
printf("%02x", *data);
}
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;

657
deps/v8/src/ia32/fast-codegen-ia32.cc

File diff suppressed because it is too large

43
deps/v8/src/ia32/ic-ia32.cc

@ -313,6 +313,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Is the string a symbol?
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
@ -391,6 +392,48 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- esp[0] : return address
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
Label miss, index_ok;
// Pop return address.
// Performing the load early is better in the common case.
__ pop(eax);
__ mov(ebx, Operand(esp, 1 * kPointerSize));
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &miss);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
__ test(ecx, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Check if key is a smi or a heap number.
__ mov(edx, Operand(esp, 0));
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &index_ok);
__ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(ecx, Factory::heap_number_map());
__ j(not_equal, &miss);
__ bind(&index_ok);
// Duplicate receiver and key since they are expected on the stack after
// the KeyedLoadIC call.
__ push(ebx); // receiver
__ push(edx); // key
__ push(eax); // return address
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
__ bind(&miss);
__ push(eax);
GenerateMiss(masm);
}
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------

35
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -325,6 +325,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
ASSERT(kNotStringTag != 0);
test(instance_type, Immediate(kIsNotStringMask));
return zero;
}
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
@ -729,13 +740,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
or_(Operand(result), Immediate(kHeapObjectTag));
lea(result, Operand(result, kHeapObjectTag));
}
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
@ -759,13 +770,13 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
or_(Operand(result), Immediate(kHeapObjectTag));
lea(result, Operand(result, kHeapObjectTag));
}
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}
@ -790,13 +801,13 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
or_(Operand(result), Immediate(kHeapObjectTag));
lea(result, Operand(result, kHeapObjectTag));
}
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
}

25
deps/v8/src/ia32/macro-assembler-ia32.h

@ -33,10 +33,13 @@
namespace v8 {
namespace internal {
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@ -138,10 +141,28 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
// contains the instance_type. The registers map and instance_type can be the
// same in which case it contains the instance type afterwards. Either of the
// registers map and instance_type can be the same as heap_object.
Condition IsObjectStringType(Register heap_object,
Register map,
Register instance_type);
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
// Smi tagging support.
void SmiTag(Register reg) {
ASSERT(kSmiTag == 0);
shl(reg, kSmiTagSize);
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
}
// ---------------------------------------------------------------------------
// Exception handling
@ -347,6 +368,8 @@ class MacroAssembler: public Assembler {
void Ret();
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the esp register.
void Drop(int element_count);
void Call(Label* target) { call(target); }

143
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -55,13 +55,17 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
* - direct_call (if 1, direct call from JavaScript code, if 0
* call through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - at_start (if 1, start at start of string, if 0, don't)
* - at_start (if 1, we are starting at the start of the
* string, otherwise 0)
* - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - void* input_string (location of a handle containing the string)
* - start index (character index of start)
* - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
* - return address
* ebp-> - old ebp
@ -81,11 +85,13 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
* byte* stack_area_base)
* byte* stack_area_base,
* bool direct_call)
*/
#define __ ACCESS_MASM(masm_)
@ -471,8 +477,6 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@ -481,17 +485,12 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
Label success;
__ cmp(current_character(), ' ');
__ j(equal, &success);
// Check range 0x09..0x0d
__ sub(Operand(current_character()), Immediate('\t'));
__ cmp(current_character(), '\r' - '\t');
__ lea(eax, Operand(current_character(), -'\t'));
__ cmp(eax, '\r' - '\t');
BranchOrBacktrack(above, on_no_match);
__ bind(&success);
return true;
@ -499,72 +498,118 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return false;
case 'S':
// Match non-space characters.
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), ' ');
BranchOrBacktrack(equal, on_no_match);
__ sub(Operand(current_character()), Immediate('\t'));
__ cmp(current_character(), '\r' - '\t');
__ lea(eax, Operand(current_character(), -'\t'));
__ cmp(eax, '\r' - '\t');
BranchOrBacktrack(below_equal, on_no_match);
return true;
}
return false;
case 'd':
// Match ASCII digits ('0'..'9')
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ sub(Operand(current_character()), Immediate('0'));
__ cmp(current_character(), '9' - '0');
__ lea(eax, Operand(current_character(), -'0'));
__ cmp(eax, '9' - '0');
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ sub(Operand(current_character()), Immediate('0'));
__ cmp(current_character(), '9' - '0');
__ lea(eax, Operand(current_character(), -'0'));
__ cmp(eax, '9' - '0');
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
if (check_offset) {
LoadCurrentCharacter(cp_offset, on_no_match, 1);
} else {
LoadCurrentCharacterUnchecked(cp_offset, 1);
}
__ xor_(Operand(current_character()), Immediate(0x01));
__ mov(Operand(eax), current_character());
__ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(Operand(current_character()), Immediate(0x0b));
__ cmp(current_character(), 0x0c - 0x0b);
__ sub(Operand(eax), Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
__ sub(Operand(current_character()), Immediate(0x2028 - 0x0b));
__ cmp(current_character(), 1);
__ sub(Operand(eax), Immediate(0x2028 - 0x0b));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
case 'w': {
Label done, check_digits;
__ cmp(Operand(current_character()), Immediate('9'));
__ j(less_equal, &check_digits);
__ cmp(Operand(current_character()), Immediate('_'));
__ j(equal, &done);
// Convert to lower case if letter.
__ mov(Operand(eax), current_character());
__ or_(eax, 0x20);
// check current character in range ['a'..'z'], nondestructively.
__ sub(Operand(eax), Immediate('a'));
__ cmp(Operand(eax), Immediate('z' - 'a'));
BranchOrBacktrack(above, on_no_match);
__ jmp(&done);
__ bind(&check_digits);
// Check current character in range ['0'..'9'].
__ cmp(Operand(current_character()), Immediate('0'));
BranchOrBacktrack(below, on_no_match);
__ bind(&done);
return true;
}
case 'W': {
Label done, check_digits;
__ cmp(Operand(current_character()), Immediate('9'));
__ j(less_equal, &check_digits);
__ cmp(Operand(current_character()), Immediate('_'));
BranchOrBacktrack(equal, on_no_match);
// Convert to lower case if letter.
__ mov(Operand(eax), current_character());
__ or_(eax, 0x20);
// check current character in range ['a'..'z'], nondestructively.
__ sub(Operand(eax), Immediate('a'));
__ cmp(Operand(eax), Immediate('z' - 'a'));
BranchOrBacktrack(below_equal, on_no_match);
__ jmp(&done);
__ bind(&check_digits);
// Check current character in range ['0'..'9'].
__ cmp(Operand(current_character()), Immediate('0'));
BranchOrBacktrack(above_equal, on_no_match);
__ bind(&done);
return true;
}
// Non-standard classes (with no syntactic shorthand) used internally.
case '*':
// Match any character.
if (check_offset) {
CheckPosition(cp_offset, on_no_match);
return true;
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
__ mov(Operand(eax), current_character());
__ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(Operand(eax), Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
if (mode_ == ASCII) {
BranchOrBacktrack(above, on_no_match);
} else {
Label done;
BranchOrBacktrack(below_equal, &done);
ASSERT_EQ(UC16, mode_);
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
__ sub(Operand(eax), Immediate(0x2028 - 0x0b));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
}
return true;
// No custom implementation (yet): w, W, s(UC16), S(UC16).
}
// No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
@ -942,6 +987,12 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
// If this is a direct call from JavaScript retry the RegExp forcing the call
// through the runtime system. Currently the direct call cannot handle a GC.
if (frame_entry<int>(re_frame, kDirectCall) == 1) {
return RETRY;
}
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);

6
deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -78,10 +78,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match);
virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
@ -128,6 +125,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.

6
deps/v8/src/ia32/simulator-ia32.h

@ -52,9 +52,9 @@ class SimulatorStack : public v8::internal::AllStatic {
};
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
entry(p0, p1, p2, p3, p4, p5, p6)
// expect eight int/pointer sized arguments and return an int.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)

18
deps/v8/src/ia32/stub-cache-ia32.cc

@ -236,7 +236,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Load length from the string and convert to a smi.
__ bind(&load_length);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
__ shl(eax, kSmiTagSize);
__ SmiTag(eax);
__ ret(0);
// Check if the object is a JSValue wrapper.
@ -1900,17 +1900,23 @@ Object* ConstructStubCompiler::CompileConstructStub(
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
Label not_passed;
// Set the property to undefined.
__ mov(Operand(edx, i * kPointerSize), edi);
// Check if the argument assigned to the property is actually passed.
// If argument is not passed the property is set to undefined,
// otherwise find it on the stack.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
__ mov(ebx, edi);
__ cmp(eax, arg_number);
if (CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope use_cmov(CMOV);
__ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
} else {
Label not_passed;
__ j(below_equal, &not_passed);
// Argument passed - find it on the stack.
__ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
__ mov(Operand(edx, i * kPointerSize), ebx);
__ bind(&not_passed);
}
// Store value in the property.
__ mov(Operand(edx, i * kPointerSize), ebx);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));

22
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -513,13 +513,33 @@ void VirtualFrame::AllocateStackSlots() {
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
if (count == 1) {
__ push(Immediate(undefined));
} else if (count < kLocalVarBound) {
// For less locals the unrolled loop is more compact.
Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid());
__ Set(temp.reg(), Immediate(undefined));
for (int i = 0; i < count; i++) {
__ push(temp.reg());
}
} else {
// For more locals a loop in generated code is more compact.
Label alloc_locals_loop;
Result cnt = cgen()->allocator()->Allocate();
Result tmp = cgen()->allocator()->Allocate();
ASSERT(cnt.is_valid());
ASSERT(tmp.is_valid());
__ mov(cnt.reg(), Immediate(count));
__ mov(tmp.reg(), Immediate(undefined));
__ bind(&alloc_locals_loop);
__ push(tmp.reg());
__ dec(cnt.reg());
__ j(not_zero, &alloc_locals_loop);
}
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
__ push(temp.reg());
}
}
}

5
deps/v8/src/ia32/virtual-frame-ia32.h

@ -199,6 +199,9 @@ class VirtualFrame: public ZoneObject {
// shared return site. Emits code for spills.
void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 10;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@ -392,6 +395,8 @@ class VirtualFrame: public ZoneObject {
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
// This assert will trigger if you try to push the same value twice.
ASSERT(result->is_valid());
if (result->is_register()) {
Push(result->reg());
} else {

4
deps/v8/src/ic.cc

@ -874,7 +874,9 @@ Object* KeyedLoadIC::Load(State state,
if (use_ic) {
Code* stub = generic_stub();
if (object->IsJSObject()) {
if (object->IsString() && key->IsNumber()) {
stub = string_stub();
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());

4
deps/v8/src/ic.h

@ -280,6 +280,7 @@ class KeyedLoadIC: public IC {
static void GenerateInitialize(MacroAssembler* masm);
static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
@ -313,6 +314,9 @@ class KeyedLoadIC: public IC {
static Code* pre_monomorphic_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
}
static Code* string_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_String);
}
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target);

706
deps/v8/src/jsregexp.cc

@ -112,37 +112,6 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
// Generic RegExp methods. Dispatches to implementation specific methods.
class OffsetsVector {
public:
inline OffsetsVector(int num_registers)
: offsets_vector_length_(num_registers) {
if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
vector_ = NewArray<int>(offsets_vector_length_);
} else {
vector_ = static_offsets_vector_;
}
}
inline ~OffsetsVector() {
if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
DeleteArray(vector_);
vector_ = NULL;
}
}
inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; }
private:
int* vector_;
int offsets_vector_length_;
static const int kStaticOffsetsVectorSize = 50;
static int static_offsets_vector_[kStaticOffsetsVectorSize];
};
int OffsetsVector::static_offsets_vector_[
OffsetsVector::kStaticOffsetsVectorSize];
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> flag_str) {
@ -448,6 +417,14 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
// Capture values are relative to start_offset only.
// Convert them to be relative to start of string.
if (captures_vector[i] >= 0) {
captures_vector[i] += previous_index;
}
if (captures_vector[i + 1] >= 0) {
captures_vector[i + 1] += previous_index;
}
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
@ -1431,14 +1408,6 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
int cp_offset,
bool check_offset,
bool preloaded) {
if (cc->is_standard() &&
macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
cp_offset,
check_offset,
on_failure)) {
return;
}
ZoneList<CharacterRange>* ranges = cc->ranges();
int max_char;
if (ascii) {
@ -1489,6 +1458,12 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
if (cc->is_standard() &&
macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
on_failure)) {
return;
}
for (int i = 0; i < last_valid_range; i++) {
CharacterRange& range = ranges->at(i);
Label next_range;
@ -2049,6 +2024,12 @@ static void EmitWordCheck(RegExpMacroAssembler* assembler,
Label* word,
Label* non_word,
bool fall_through_on_word) {
if (assembler->CheckSpecialCharacterClass(
fall_through_on_word ? 'w' : 'W',
fall_through_on_word ? non_word : word)) {
// Optimized implementation available.
return;
}
assembler->CheckCharacterGT('z', non_word);
assembler->CheckCharacterLT('0', non_word);
assembler->CheckCharacterGT('a' - 1, word);
@ -2085,17 +2066,60 @@ static void EmitHat(RegExpCompiler* compiler,
assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
new_trace.backtrack(),
false);
if (!assembler->CheckSpecialCharacterClass('n',
new_trace.backtrack())) {
// Newline means \n, \r, 0x2028 or 0x2029.
if (!compiler->ascii()) {
assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
}
assembler->CheckCharacter('\n', &ok);
assembler->CheckNotCharacter('\r', new_trace.backtrack());
}
assembler->Bind(&ok);
on_success->Emit(compiler, &new_trace);
}
// Emit the code to handle \b and \B (word-boundary or non-word-boundary)
// when we know whether the next character must be a word character or not.
static void EmitHalfBoundaryCheck(AssertionNode::AssertionNodeType type,
RegExpCompiler* compiler,
RegExpNode* on_success,
Trace* trace) {
RegExpMacroAssembler* assembler = compiler->macro_assembler();
Label done;
Trace new_trace(*trace);
bool expect_word_character = (type == AssertionNode::AFTER_WORD_CHARACTER);
Label* on_word = expect_word_character ? &done : new_trace.backtrack();
Label* on_non_word = expect_word_character ? new_trace.backtrack() : &done;
// Check whether previous character was a word character.
switch (trace->at_start()) {
case Trace::TRUE:
if (expect_word_character) {
assembler->GoTo(on_non_word);
}
break;
case Trace::UNKNOWN:
ASSERT_EQ(0, trace->cp_offset());
assembler->CheckAtStart(on_non_word);
// Fall through.
case Trace::FALSE:
int prev_char_offset = trace->cp_offset() - 1;
assembler->LoadCurrentCharacter(prev_char_offset, NULL, false, 1);
EmitWordCheck(assembler, on_word, on_non_word, expect_word_character);
// We may or may not have loaded the previous character.
new_trace.InvalidateCurrentCharacter();
}
assembler->Bind(&done);
on_success->Emit(compiler, &new_trace);
}
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
RegExpCompiler* compiler,
@ -2205,11 +2229,16 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
case AFTER_NEWLINE:
EmitHat(compiler, on_success(), trace);
return;
case AT_NON_BOUNDARY:
case AT_BOUNDARY:
case AT_NON_BOUNDARY: {
EmitBoundaryCheck(type_, compiler, on_success(), trace);
return;
}
case AFTER_WORD_CHARACTER:
case AFTER_NONWORD_CHARACTER: {
EmitHalfBoundaryCheck(type_, compiler, on_success(), trace);
}
}
on_success()->Emit(compiler, trace);
}
@ -2791,7 +2820,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// to generate probably can't use it.
if (i != first_normal_choice) {
alt_gen->expects_preload = false;
new_trace.set_characters_preloaded(0);
new_trace.InvalidateCurrentCharacter();
}
if (i < choice_count - 1) {
new_trace.set_backtrack(&alt_gen->after);
@ -3282,6 +3311,12 @@ void DotPrinter::VisitAssertion(AssertionNode* that) {
case AssertionNode::AFTER_NEWLINE:
stream()->Add("label=\"(?<=\\n)\", shape=septagon");
break;
case AssertionNode::AFTER_WORD_CHARACTER:
stream()->Add("label=\"(?<=\\w)\", shape=septagon");
break;
case AssertionNode::AFTER_NONWORD_CHARACTER:
stream()->Add("label=\"(?<=\\W)\", shape=septagon");
break;
}
stream()->Add("];\n");
PrintAttributes(that);
@ -3484,6 +3519,20 @@ bool RegExpCharacterClass::is_standard() {
set_.set_standard_set_type('.');
return true;
}
if (CompareRanges(set_.ranges(),
kLineTerminatorRanges,
kLineTerminatorRangeCount)) {
set_.set_standard_set_type('n');
return true;
}
if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('w');
return true;
}
if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
set_.set_standard_set_type('W');
return true;
}
return false;
}
@ -4010,6 +4059,101 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
}
bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
ASSERT_NOT_NULL(ranges);
int n = ranges->length();
if (n <= 1) return true;
int max = ranges->at(0).to();
for (int i = 1; i < n; i++) {
CharacterRange next_range = ranges->at(i);
if (next_range.from() <= max + 1) return false;
max = next_range.to();
}
return true;
}
SetRelation CharacterRange::WordCharacterRelation(
ZoneList<CharacterRange>* range) {
ASSERT(IsCanonical(range));
int i = 0; // Word character range index.
int j = 0; // Argument range index.
ASSERT_NE(0, kWordRangeCount);
SetRelation result;
if (range->length() == 0) {
result.SetElementsInSecondSet();
return result;
}
CharacterRange argument_range = range->at(0);
CharacterRange word_range = CharacterRange(kWordRanges[0], kWordRanges[1]);
while (i < kWordRangeCount && j < range->length()) {
// Check the two ranges for the five cases:
// - no overlap.
// - partial overlap (there are elements in both ranges that isn't
// in the other, and there are also elements that are in both).
// - argument range entirely inside word range.
// - word range entirely inside argument range.
// - ranges are completely equal.
// First check for no overlap. The earlier range is not in the other set.
if (argument_range.from() > word_range.to()) {
// Ranges are disjoint. The earlier word range contains elements that
// cannot be in the argument set.
result.SetElementsInSecondSet();
} else if (word_range.from() > argument_range.to()) {
// Ranges are disjoint. The earlier argument range contains elements that
// cannot be in the word set.
result.SetElementsInFirstSet();
} else if (word_range.from() <= argument_range.from() &&
word_range.to() >= argument_range.from()) {
result.SetElementsInBothSets();
// argument range completely inside word range.
if (word_range.from() < argument_range.from() ||
word_range.to() > argument_range.from()) {
result.SetElementsInSecondSet();
}
} else if (word_range.from() >= argument_range.from() &&
word_range.to() <= argument_range.from()) {
result.SetElementsInBothSets();
result.SetElementsInFirstSet();
} else {
// There is overlap, and neither is a subrange of the other
result.SetElementsInFirstSet();
result.SetElementsInSecondSet();
result.SetElementsInBothSets();
}
if (result.NonTrivialIntersection()) {
// The result is as (im)precise as we can possibly make it.
return result;
}
// Progress the range(s) with minimal to-character.
uc16 word_to = word_range.to();
uc16 argument_to = argument_range.to();
if (argument_to <= word_to) {
j++;
if (j < range->length()) {
argument_range = range->at(j);
}
}
if (word_to <= argument_to) {
i += 2;
if (i < kWordRangeCount) {
word_range = CharacterRange(kWordRanges[i], kWordRanges[i + 1]);
}
}
}
// Check if anything wasn't compared in the loop.
if (i < kWordRangeCount) {
// word range contains something not in argument range.
result.SetElementsInSecondSet();
} else if (j < range->length()) {
// Argument range contains something not in word range.
result.SetElementsInFirstSet();
}
return result;
}
static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
int bottom,
int top) {
@ -4119,6 +4263,287 @@ ZoneList<CharacterRange>* CharacterSet::ranges() {
}
// Move a number of elements in a zonelist to another position
// in the same list. Handles overlapping source and target areas.
static void MoveRanges(ZoneList<CharacterRange>* list,
int from,
int to,
int count) {
// Ranges are potentially overlapping.
if (from < to) {
for (int i = count - 1; i >= 0; i--) {
list->at(to + i) = list->at(from + i);
}
} else {
for (int i = 0; i < count; i++) {
list->at(to + i) = list->at(from + i);
}
}
}
static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
int count,
CharacterRange insert) {
// Inserts a range into list[0..count[, which must be sorted
// by from value and non-overlapping and non-adjacent, using at most
// list[0..count] for the result. Returns the number of resulting
// canonicalized ranges. Inserting a range may collapse existing ranges into
// fewer ranges, so the return value can be anything in the range 1..count+1.
uc16 from = insert.from();
uc16 to = insert.to();
int start_pos = 0;
int end_pos = count;
for (int i = count - 1; i >= 0; i--) {
CharacterRange current = list->at(i);
if (current.from() > to + 1) {
end_pos = i;
} else if (current.to() + 1 < from) {
start_pos = i + 1;
break;
}
}
// Inserted range overlaps, or is adjacent to, ranges at positions
// [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
// not affected by the insertion.
// If start_pos == end_pos, the range must be inserted before start_pos.
// if start_pos < end_pos, the entire range from start_pos to end_pos
// must be merged with the insert range.
if (start_pos == end_pos) {
// Insert between existing ranges at position start_pos.
if (start_pos < count) {
MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
}
list->at(start_pos) = insert;
return count + 1;
}
if (start_pos + 1 == end_pos) {
// Replace single existing range at position start_pos.
CharacterRange to_replace = list->at(start_pos);
int new_from = Min(to_replace.from(), from);
int new_to = Max(to_replace.to(), to);
list->at(start_pos) = CharacterRange(new_from, new_to);
return count;
}
// Replace a number of existing ranges from start_pos to end_pos - 1.
// Move the remaining ranges down.
int new_from = Min(list->at(start_pos).from(), from);
int new_to = Max(list->at(end_pos - 1).to(), to);
if (end_pos < count) {
MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
}
list->at(start_pos) = CharacterRange(new_from, new_to);
return count - (end_pos - start_pos) + 1;
}
void CharacterSet::Canonicalize() {
// Special/default classes are always considered canonical. The result
// of calling ranges() will be sorted.
if (ranges_ == NULL) return;
CharacterRange::Canonicalize(ranges_);
}
void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
if (character_ranges->length() <= 1) return;
// Check whether ranges are already canonical (increasing, non-overlapping,
// non-adjacent).
int n = character_ranges->length();
int max = character_ranges->at(0).to();
int i = 1;
while (i < n) {
CharacterRange current = character_ranges->at(i);
if (current.from() <= max + 1) {
break;
}
max = current.to();
i++;
}
// Canonical until the i'th range. If that's all of them, we are done.
if (i == n) return;
// The ranges at index i and forward are not canonicalized. Make them so by
// doing the equivalent of insertion sort (inserting each into the previous
// list, in order).
// Notice that inserting a range can reduce the number of ranges in the
// result due to combining of adjacent and overlapping ranges.
int read = i; // Range to insert.
int num_canonical = i; // Length of canonicalized part of list.
do {
num_canonical = InsertRangeInCanonicalList(character_ranges,
num_canonical,
character_ranges->at(read));
read++;
} while (read < n);
character_ranges->Rewind(num_canonical);
ASSERT(CharacterRange::IsCanonical(character_ranges));
}
// Utility function for CharacterRange::Merge. Adds a range at the end of
// a canonicalized range list, if necessary merging the range with the last
// range of the list.
static void AddRangeToSet(ZoneList<CharacterRange>* set, CharacterRange range) {
if (set == NULL) return;
ASSERT(set->length() == 0 || set->at(set->length() - 1).to() < range.from());
int n = set->length();
if (n > 0) {
CharacterRange lastRange = set->at(n - 1);
if (lastRange.to() == range.from() - 1) {
set->at(n - 1) = CharacterRange(lastRange.from(), range.to());
return;
}
}
set->Add(range);
}
static void AddRangeToSelectedSet(int selector,
ZoneList<CharacterRange>* first_set,
ZoneList<CharacterRange>* second_set,
ZoneList<CharacterRange>* intersection_set,
CharacterRange range) {
switch (selector) {
case kInsideFirst:
AddRangeToSet(first_set, range);
break;
case kInsideSecond:
AddRangeToSet(second_set, range);
break;
case kInsideBoth:
AddRangeToSet(intersection_set, range);
break;
}
}
void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
ZoneList<CharacterRange>* second_set,
ZoneList<CharacterRange>* first_set_only_out,
ZoneList<CharacterRange>* second_set_only_out,
ZoneList<CharacterRange>* both_sets_out) {
// Inputs are canonicalized.
ASSERT(CharacterRange::IsCanonical(first_set));
ASSERT(CharacterRange::IsCanonical(second_set));
// Outputs are empty, if applicable.
ASSERT(first_set_only_out == NULL || first_set_only_out->length() == 0);
ASSERT(second_set_only_out == NULL || second_set_only_out->length() == 0);
ASSERT(both_sets_out == NULL || both_sets_out->length() == 0);
// Merge sets by iterating through the lists in order of lowest "from" value,
// and putting intervals into one of three sets.
if (first_set->length() == 0) {
second_set_only_out->AddAll(*second_set);
return;
}
if (second_set->length() == 0) {
first_set_only_out->AddAll(*first_set);
return;
}
// Indices into input lists.
int i1 = 0;
int i2 = 0;
// Cache length of input lists.
int n1 = first_set->length();
int n2 = second_set->length();
// Current range. May be invalid if state is kInsideNone.
int from = 0;
int to = -1;
// Where current range comes from.
int state = kInsideNone;
while (i1 < n1 || i2 < n2) {
CharacterRange next_range;
int range_source;
if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
next_range = first_set->at(i1++);
range_source = kInsideFirst;
} else {
next_range = second_set->at(i2++);
range_source = kInsideSecond;
}
if (to < next_range.from()) {
// Ranges disjoint: |current| |next|
AddRangeToSelectedSet(state,
first_set_only_out,
second_set_only_out,
both_sets_out,
CharacterRange(from, to));
from = next_range.from();
to = next_range.to();
state = range_source;
} else {
if (from < next_range.from()) {
AddRangeToSelectedSet(state,
first_set_only_out,
second_set_only_out,
both_sets_out,
CharacterRange(from, next_range.from()-1));
}
if (to < next_range.to()) {
// Ranges overlap: |current|
// |next|
AddRangeToSelectedSet(state | range_source,
first_set_only_out,
second_set_only_out,
both_sets_out,
CharacterRange(next_range.from(), to));
from = to + 1;
to = next_range.to();
state = range_source;
} else {
// Range included: |current| , possibly ending at same character.
// |next|
AddRangeToSelectedSet(
state | range_source,
first_set_only_out,
second_set_only_out,
both_sets_out,
CharacterRange(next_range.from(), next_range.to()));
from = next_range.to() + 1;
// If ranges end at same character, both ranges are consumed completely.
if (next_range.to() == to) state = kInsideNone;
}
}
}
AddRangeToSelectedSet(state,
first_set_only_out,
second_set_only_out,
both_sets_out,
CharacterRange(from, to));
}
void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
ZoneList<CharacterRange>* negated_ranges) {
ASSERT(CharacterRange::IsCanonical(ranges));
ASSERT_EQ(0, negated_ranges->length());
int range_count = ranges->length();
uc16 from = 0;
int i = 0;
if (range_count > 0 && ranges->at(0).from() == 0) {
from = ranges->at(0).to();
i = 1;
}
while (i < range_count) {
CharacterRange range = ranges->at(i);
negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
from = range.to();
i++;
}
if (from < String::kMaxUC16CharCode) {
negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
}
}
// -------------------------------------------------------------------
// Interest propagation
@ -4410,9 +4835,203 @@ void Analysis::VisitBackReference(BackReferenceNode* that) {
void Analysis::VisitAssertion(AssertionNode* that) {
EnsureAnalyzed(that->on_success());
AssertionNode::AssertionNodeType type = that->type();
if (type == AssertionNode::AT_BOUNDARY ||
type == AssertionNode::AT_NON_BOUNDARY) {
// Check if the following character is known to be a word character
// or known to not be a word character.
ZoneList<CharacterRange>* following_chars = that->FirstCharacterSet();
CharacterRange::Canonicalize(following_chars);
SetRelation word_relation =
CharacterRange::WordCharacterRelation(following_chars);
if (word_relation.ContainedIn()) {
// Following character is definitely a word character.
type = (type == AssertionNode::AT_BOUNDARY) ?
AssertionNode::AFTER_NONWORD_CHARACTER :
AssertionNode::AFTER_WORD_CHARACTER;
that->set_type(type);
} else if (word_relation.Disjoint()) {
// Following character is definitely *not* a word character.
type = (type == AssertionNode::AT_BOUNDARY) ?
AssertionNode::AFTER_WORD_CHARACTER :
AssertionNode::AFTER_NONWORD_CHARACTER;
that->set_type(type);
}
}
}
ZoneList<CharacterRange>* RegExpNode::FirstCharacterSet() {
if (first_character_set_ == NULL) {
if (ComputeFirstCharacterSet(kFirstCharBudget) < 0) {
// If we can't find an exact solution within the budget, we
// set the value to the set of every character, i.e., all characters
// are possible.
ZoneList<CharacterRange>* all_set = new ZoneList<CharacterRange>(1);
all_set->Add(CharacterRange::Everything());
first_character_set_ = all_set;
}
}
return first_character_set_;
}
int RegExpNode::ComputeFirstCharacterSet(int budget) {
// Default behavior is to not be able to determine the first character.
return kComputeFirstCharacterSetFail;
}
int LoopChoiceNode::ComputeFirstCharacterSet(int budget) {
budget--;
if (budget >= 0) {
// Find loop min-iteration. It's the value of the guarded choice node
// with a GEQ guard, if any.
int min_repetition = 0;
for (int i = 0; i <= 1; i++) {
GuardedAlternative alternative = alternatives()->at(i);
ZoneList<Guard*>* guards = alternative.guards();
if (guards != NULL && guards->length() > 0) {
Guard* guard = guards->at(0);
if (guard->op() == Guard::GEQ) {
min_repetition = guard->value();
break;
}
}
}
budget = loop_node()->ComputeFirstCharacterSet(budget);
if (budget >= 0) {
ZoneList<CharacterRange>* character_set =
loop_node()->first_character_set();
if (body_can_be_zero_length() || min_repetition == 0) {
budget = continue_node()->ComputeFirstCharacterSet(budget);
if (budget < 0) return budget;
ZoneList<CharacterRange>* body_set =
continue_node()->first_character_set();
ZoneList<CharacterRange>* union_set =
new ZoneList<CharacterRange>(Max(character_set->length(),
body_set->length()));
CharacterRange::Merge(character_set,
body_set,
union_set,
union_set,
union_set);
character_set = union_set;
}
set_first_character_set(character_set);
}
}
return budget;
}
int NegativeLookaheadChoiceNode::ComputeFirstCharacterSet(int budget) {
budget--;
if (budget >= 0) {
GuardedAlternative successor = this->alternatives()->at(1);
RegExpNode* successor_node = successor.node();
budget = successor_node->ComputeFirstCharacterSet(budget);
if (budget >= 0) {
set_first_character_set(successor_node->first_character_set());
}
}
return budget;
}
// The first character set of an EndNode is unknowable. Just use the
// default implementation that fails and returns all characters as possible.
int AssertionNode::ComputeFirstCharacterSet(int budget) {
budget -= 1;
if (budget >= 0) {
switch (type_) {
case AT_END: {
set_first_character_set(new ZoneList<CharacterRange>(0));
break;
}
case AT_START:
case AT_BOUNDARY:
case AT_NON_BOUNDARY:
case AFTER_NEWLINE:
case AFTER_NONWORD_CHARACTER:
case AFTER_WORD_CHARACTER: {
ASSERT_NOT_NULL(on_success());
budget = on_success()->ComputeFirstCharacterSet(budget);
set_first_character_set(on_success()->first_character_set());
break;
}
}
}
return budget;
}
int ActionNode::ComputeFirstCharacterSet(int budget) {
if (type_ == POSITIVE_SUBMATCH_SUCCESS) return kComputeFirstCharacterSetFail;
budget--;
if (budget >= 0) {
ASSERT_NOT_NULL(on_success());
budget = on_success()->ComputeFirstCharacterSet(budget);
if (budget >= 0) {
set_first_character_set(on_success()->first_character_set());
}
}
return budget;
}
int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
// We don't know anything about the first character of a backreference
// at this point.
return kComputeFirstCharacterSetFail;
}
int TextNode::ComputeFirstCharacterSet(int budget) {
budget--;
if (budget >= 0) {
ASSERT_NE(0, elements()->length());
TextElement text = elements()->at(0);
if (text.type == TextElement::ATOM) {
RegExpAtom* atom = text.data.u_atom;
ASSERT_NE(0, atom->length());
uc16 first_char = atom->data()[0];
ZoneList<CharacterRange>* range = new ZoneList<CharacterRange>(1);
range->Add(CharacterRange(first_char, first_char));
set_first_character_set(range);
} else {
ASSERT(text.type == TextElement::CHAR_CLASS);
RegExpCharacterClass* char_class = text.data.u_char_class;
if (char_class->is_negated()) {
ZoneList<CharacterRange>* ranges = char_class->ranges();
int length = ranges->length();
int new_length = length + 1;
if (length > 0) {
if (ranges->at(0).from() == 0) new_length--;
if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
new_length--;
}
}
ZoneList<CharacterRange>* negated_ranges =
new ZoneList<CharacterRange>(new_length);
CharacterRange::Negate(ranges, negated_ranges);
set_first_character_set(negated_ranges);
} else {
set_first_character_set(char_class->ranges());
}
}
}
return budget;
}
// -------------------------------------------------------------------
// Dispatch table construction
@ -4471,7 +5090,6 @@ void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
}
static int CompareRangeByFrom(const CharacterRange* a,
const CharacterRange* b) {
return Compare<uc16>(a->from(), b->from());
@ -4606,4 +5224,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
pattern);
}
int OffsetsVector::static_offsets_vector_[
OffsetsVector::kStaticOffsetsVectorSize];
}} // namespace v8::internal

183
deps/v8/src/jsregexp.h

@ -75,13 +75,6 @@ class RegExpImpl {
int index,
Handle<JSArray> lastMatchInfo);
// Call RegExp.prototyp.exec(string) in a loop.
// Used by String.prototype.match and String.prototype.replace.
// This function calls the garbage collector if necessary.
static Handle<Object> ExecGlobal(Handle<JSRegExp> regexp,
Handle<String> subject,
Handle<JSArray> lastMatchInfo);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpPrepare(Handle<JSRegExp> re,
Handle<String> pattern,
@ -108,13 +101,23 @@ class RegExpImpl {
int index,
Handle<JSArray> lastMatchInfo);
// Offsets in the lastMatchInfo array.
// Array index in the lastMatchInfo array.
static const int kLastCaptureCount = 0;
static const int kLastSubject = 1;
static const int kLastInput = 2;
static const int kFirstCapture = 3;
static const int kLastMatchOverhead = 3;
// Direct offset into the lastMatchInfo array.
static const int kLastCaptureCountOffset =
FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
static const int kLastSubjectOffset =
FixedArray::kHeaderSize + kLastSubject * kPointerSize;
static const int kLastInputOffset =
FixedArray::kHeaderSize + kLastInput * kPointerSize;
static const int kFirstCaptureOffset =
FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
// Used to access the lastMatchInfo array.
static int GetCapture(FixedArray* array, int index) {
return Smi::cast(array->get(index + kFirstCapture))->value();
@ -174,6 +177,57 @@ class RegExpImpl {
};
// Represents the location of one element relative to the intersection of
// two sets. Corresponds to the four areas of a Venn diagram.
enum ElementInSetsRelation {
kInsideNone = 0,
kInsideFirst = 1,
kInsideSecond = 2,
kInsideBoth = 3
};
// Represents the relation of two sets.
// Sets can be either disjoint, partially or fully overlapping, or equal.
class SetRelation BASE_EMBEDDED {
public:
// Relation is represented by a bit saying whether there are elements in
// one set that is not in the other, and a bit saying that there are elements
// that are in both sets.
// Location of an element. Corresponds to the internal areas of
// a Venn diagram.
enum {
kInFirst = 1 << kInsideFirst,
kInSecond = 1 << kInsideSecond,
kInBoth = 1 << kInsideBoth
};
SetRelation() : bits_(0) {}
~SetRelation() {}
// Add the existence of objects in a particular
void SetElementsInFirstSet() { bits_ |= kInFirst; }
void SetElementsInSecondSet() { bits_ |= kInSecond; }
void SetElementsInBothSets() { bits_ |= kInBoth; }
// Check the currently known relation of the sets (common functions only,
// for other combinations, use value() to get the bits and check them
// manually).
// Sets are completely disjoint.
bool Disjoint() { return (bits_ & kInBoth) == 0; }
// Sets are equal.
bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
// First set contains second.
bool Contains() { return (bits_ & kInSecond) == 0; }
// Second set contains first.
bool ContainedIn() { return (bits_ & kInFirst) == 0; }
bool NonTrivialIntersection() {
return (bits_ == (kInFirst | kInSecond | kInBoth));
}
int value() { return bits_; }
private:
int bits_;
};
class CharacterRange {
public:
CharacterRange() : from_(0), to_(0) { }
@ -205,7 +259,39 @@ class CharacterRange {
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
ZoneList<CharacterRange>** excluded);
// Whether a range list is in canonical form: Ranges ordered by from value,
// and ranges non-overlapping and non-adjacent.
static bool IsCanonical(ZoneList<CharacterRange>* ranges);
// Convert range list to canonical form. The characters covered by the ranges
// will still be the same, but no character is in more than one range, and
// adjacent ranges are merged. The resulting list may be shorter than the
// original, but cannot be longer.
static void Canonicalize(ZoneList<CharacterRange>* ranges);
// Check how the set of characters defined by a CharacterRange list relates
// to the set of word characters. List must be in canonical form.
static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
// Takes two character range lists (representing character sets) in canonical
// form and merges them.
// The characters that are only covered by the first set are added to
// first_set_only_out. the characters that are only in the second set are
// added to second_set_only_out, and the characters that are in both are
// added to both_sets_out.
// The pointers to first_set_only_out, second_set_only_out and both_sets_out
// should be to empty lists, but they need not be distinct, and may be NULL.
// If NULL, the characters are dropped, and if two arguments are the same
// pointer, the result is the union of the two sets that would be created
// if the pointers had been distinct.
// This way, the Merge function can compute all the usual set operations:
// union (all three out-sets are equal), intersection (only both_sets_out is
// non-NULL), and set difference (only first_set is non-NULL).
static void Merge(ZoneList<CharacterRange>* first_set,
ZoneList<CharacterRange>* second_set,
ZoneList<CharacterRange>* first_set_only_out,
ZoneList<CharacterRange>* second_set_only_out,
ZoneList<CharacterRange>* both_sets_out);
// Negate the contents of a character range in canonical form.
static void Negate(ZoneList<CharacterRange>* src,
ZoneList<CharacterRange>* dst);
static const int kRangeCanonicalizeMax = 0x346;
static const int kStartMarker = (1 << 24);
static const int kPayloadMask = (1 << 24) - 1;
@ -479,7 +565,7 @@ class QuickCheckDetails {
class RegExpNode: public ZoneObject {
public:
RegExpNode() : trace_count_(0) { }
RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
virtual ~RegExpNode();
virtual void Accept(NodeVisitor* visitor) = 0;
// Generates a goto to this node or actually generates the code at this point.
@ -530,8 +616,29 @@ class RegExpNode: public ZoneObject {
SiblingList* siblings() { return &siblings_; }
void set_siblings(SiblingList* other) { siblings_ = *other; }
// Return the set of possible next characters recognized by the regexp
// (or a safe subset, potentially the set of all characters).
ZoneList<CharacterRange>* FirstCharacterSet();
// Compute (if possible within the budget of traversed nodes) the
// possible first characters of the input matched by this node and
// its continuation. Returns the remaining budget after the computation.
// If the budget is spent, the result is negative, and the cached
// first_character_set_ value isn't set.
virtual int ComputeFirstCharacterSet(int budget);
// Get and set the cached first character set value.
ZoneList<CharacterRange>* first_character_set() {
return first_character_set_;
}
void set_first_character_set(ZoneList<CharacterRange>* character_set) {
first_character_set_ = character_set;
}
protected:
enum LimitResult { DONE, CONTINUE };
static const int kComputeFirstCharacterSetFail = -1;
LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
// Returns a sibling of this node whose interests and assumptions
@ -552,9 +659,11 @@ class RegExpNode: public ZoneObject {
virtual RegExpNode* Clone() = 0;
private:
static const int kFirstCharBudget = 10;
Label label_;
NodeInfo info_;
SiblingList siblings_;
ZoneList<CharacterRange>* first_character_set_;
// This variable keeps track of how many times code has been generated for
// this node (in different traces). We don't keep track of where the
// generated code is located unless the code is generated at the start of
@ -645,7 +754,7 @@ class ActionNode: public SeqRegExpNode {
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
virtual ActionNode* Clone() { return new ActionNode(*this); }
virtual int ComputeFirstCharacterSet(int budget);
private:
union {
struct {
@ -711,7 +820,7 @@ class TextNode: public SeqRegExpNode {
return result;
}
void CalculateOffsets();
virtual int ComputeFirstCharacterSet(int budget);
private:
enum TextEmitPassType {
NON_ASCII_MATCH, // Check for characters that can't match.
@ -741,7 +850,12 @@ class AssertionNode: public SeqRegExpNode {
AT_START,
AT_BOUNDARY,
AT_NON_BOUNDARY,
AFTER_NEWLINE
AFTER_NEWLINE,
// Types not directly expressible in regexp syntax.
// Used for modifying a boundary node if its following character is
// known to be word and/or non-word.
AFTER_NONWORD_CHARACTER,
AFTER_WORD_CHARACTER
};
static AssertionNode* AtEnd(RegExpNode* on_success) {
return new AssertionNode(AT_END, on_success);
@ -765,8 +879,10 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
virtual int ComputeFirstCharacterSet(int budget);
virtual AssertionNode* Clone() { return new AssertionNode(*this); }
AssertionNodeType type() { return type_; }
void set_type(AssertionNodeType type) { type_ = type; }
private:
AssertionNode(AssertionNodeType t, RegExpNode* on_success)
: SeqRegExpNode(on_success), type_(t) { }
@ -794,7 +910,7 @@ class BackReferenceNode: public SeqRegExpNode {
return;
}
virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
virtual int ComputeFirstCharacterSet(int budget);
private:
int start_reg_;
int end_reg_;
@ -816,7 +932,6 @@ class EndNode: public RegExpNode {
UNREACHABLE();
}
virtual EndNode* Clone() { return new EndNode(*this); }
private:
Action action_;
};
@ -950,6 +1065,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
virtual int ComputeFirstCharacterSet(int budget);
};
@ -968,6 +1084,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual int ComputeFirstCharacterSet(int budget);
virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
@ -1123,7 +1240,7 @@ class Trace {
void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
void set_stop_node(RegExpNode* node) { stop_node_ = node; }
void set_loop_label(Label* label) { loop_label_ = label; }
void set_characters_preloaded(int cpre) { characters_preloaded_ = cpre; }
void set_characters_preloaded(int count) { characters_preloaded_ = count; }
void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
void set_flush_budget(int to) { flush_budget_ = to; }
void set_quick_check_performed(QuickCheckDetails* d) {
@ -1283,6 +1400,40 @@ class RegExpEngine: public AllStatic {
};
class OffsetsVector {
public:
inline OffsetsVector(int num_registers)
: offsets_vector_length_(num_registers) {
if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
vector_ = NewArray<int>(offsets_vector_length_);
} else {
vector_ = static_offsets_vector_;
}
}
inline ~OffsetsVector() {
if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
DeleteArray(vector_);
vector_ = NULL;
}
}
inline int* vector() { return vector_; }
inline int length() { return offsets_vector_length_; }
static const int kStaticOffsetsVectorSize = 50;
private:
static Address static_offsets_vector_address() {
return reinterpret_cast<Address>(&static_offsets_vector_);
}
int* vector_;
int offsets_vector_length_;
static int static_offsets_vector_[kStaticOffsetsVectorSize];
friend class ExternalReference;
};
} } // namespace v8::internal
#endif // V8_JSREGEXP_H_

3
deps/v8/src/jump-target.h

@ -112,7 +112,8 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
// code after the branch.
// code after the branch. The arg is a result that is live both at
// the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);

13
deps/v8/src/macros.py

@ -92,12 +92,13 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro FLOOR(arg) = %Math_floor(arg);
macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInt32(arg));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
@ -117,6 +118,14 @@ macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
macro DAY(time) = ($floor(time / 86400000));
macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
macro MS_FROM_TIME(time) = (Modulo(time, 1000));
# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);

245
deps/v8/src/mark-compact.cc

@ -116,6 +116,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
if (!Heap::map_space()->MapPointersEncodable())
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
@ -789,7 +791,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
// All of these actions are carried out only on maps of JSObects
// All of these actions are carried out only on maps of JSObjects
// and related subtypes.
while (map_iterator.has_next()) {
Map* map = reinterpret_cast<Map*>(map_iterator.next());
@ -1166,7 +1168,7 @@ void MarkCompactCollector::DeallocateCodeBlock(Address start,
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes) {
// Objects in map space are frequently assumed to have size Map::kSize and a
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
@ -1240,6 +1242,225 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
}
class MapIterator : public HeapObjectIterator {
public:
MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
explicit MapIterator(Address start)
: HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
private:
static int SizeCallback(HeapObject* unused) {
USE(unused);
return Map::kSize;
}
};
class MapCompact {
public:
explicit MapCompact(int live_maps)
: live_maps_(live_maps),
to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
map_to_evacuate_it_(to_evacuate_start_),
first_map_to_evacuate_(
reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
}
void CompactMaps() {
// As we know the number of maps to evacuate beforehand,
// we stop then there is no more vacant maps.
for (Map* next_vacant_map = NextVacantMap();
next_vacant_map;
next_vacant_map = NextVacantMap()) {
EvacuateMap(next_vacant_map, NextMapToEvacuate());
}
#ifdef DEBUG
CheckNoMapsToEvacuate();
#endif
}
void UpdateMapPointersInRoots() {
Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
void FinishMapSpace() {
// Iterate through to space and finish move.
MapIterator it;
HeapObject* o = it.next();
for (; o != first_map_to_evacuate_; o = it.next()) {
Map* map = reinterpret_cast<Map*>(o);
ASSERT(!map->IsMarked());
ASSERT(!map->IsOverflowed());
ASSERT(map->IsMap());
Heap::UpdateRSet(map);
}
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
}
}
void UpdateMapPointersInNewSpace() {
NewSpace* space = Heap::new_space();
UpdateMapPointersInRange(space->bottom(), space->top());
}
void UpdateMapPointersInLargeObjectSpace() {
LargeObjectIterator it(Heap::lo_space());
while (true) {
if (!it.has_next()) break;
UpdateMapPointersInObject(it.next());
}
}
void Finish() {
Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
}
private:
int live_maps_;
Address to_evacuate_start_;
MapIterator vacant_map_it_;
MapIterator map_to_evacuate_it_;
Map* first_map_to_evacuate_;
// Helper class for updating map pointers in HeapObjects.
class MapUpdatingVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) {
UpdateMapPointer(p);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) UpdateMapPointer(p);
}
private:
void UpdateMapPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
// Moved maps are tagged with overflowed map word. They are the only
// objects those map word is overflowed as marking is already complete.
MapWord map_word = old_map->map_word();
if (!map_word.IsOverflowed()) return;
*p = GetForwardedMap(map_word);
}
};
static MapUpdatingVisitor map_updating_visitor_;
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
while (true) {
ASSERT(it->has_next());
HeapObject* next = it->next();
if (next == last)
return NULL;
ASSERT(!next->IsOverflowed());
ASSERT(!next->IsMarked());
ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
if (next->IsMap() == live)
return reinterpret_cast<Map*>(next);
}
}
Map* NextVacantMap() {
Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
return map;
}
Map* NextMapToEvacuate() {
Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
ASSERT(map != NULL);
ASSERT(map->IsMap());
return map;
}
static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
ASSERT(FreeListNode::IsFreeListNode(vacant_map));
ASSERT(map_to_evacuate->IsMap());
memcpy(
reinterpret_cast<void*>(vacant_map->address()),
reinterpret_cast<void*>(map_to_evacuate->address()),
Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
forwarding_map_word.SetOverflow();
map_to_evacuate->set_map_word(forwarding_map_word);
ASSERT(map_to_evacuate->map_word().IsOverflowed());
ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
}
static Map* GetForwardedMap(MapWord map_word) {
ASSERT(map_word.IsOverflowed());
map_word.ClearOverflow();
Map* new_map = map_word.ToMap();
ASSERT_MAP_ALIGNED(new_map->address());
return new_map;
}
static int UpdateMapPointersInObject(HeapObject* obj) {
ASSERT(!obj->IsMarked());
Map* map = obj->map();
ASSERT(Heap::map_space()->Contains(map));
MapWord map_word = map->map_word();
ASSERT(!map_word.IsMarked());
if (map_word.IsOverflowed()) {
Map* new_map = GetForwardedMap(map_word);
ASSERT(Heap::map_space()->Contains(new_map));
obj->set_map(new_map);
#ifdef DEBUG
if (FLAG_gc_verbose) {
PrintF("update %p : %p -> %p\n", obj->address(),
map, new_map);
}
#endif
}
int size = obj->SizeFromMap(map);
obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
return size;
}
static void UpdateMapPointersInRange(Address start, Address end) {
HeapObject* object;
int size;
for (Address current = start; current < end; current += size) {
object = HeapObject::FromAddress(current);
size = UpdateMapPointersInObject(object);
ASSERT(size > 0);
}
}
#ifdef DEBUG
void CheckNoMapsToEvacuate() {
if (!FLAG_enable_slow_asserts)
return;
while (map_to_evacuate_it_.has_next())
ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
}
#endif
};
MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
void MarkCompactCollector::SweepSpaces() {
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
@ -1254,6 +1475,26 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
int live_maps = Heap::map_space()->Size() / Map::kSize;
ASSERT(live_map_objects_ == live_maps);
if (Heap::map_space()->NeedsCompaction(live_maps)) {
MapCompact map_compact(live_maps);
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
map_compact.FinishMapSpace();
PagedSpaces spaces;
while (PagedSpace* space = spaces.next()) {
if (space == Heap::map_space()) continue;
map_compact.UpdateMapPointersInPagedSpace(space);
}
map_compact.UpdateMapPointersInNewSpace();
map_compact.UpdateMapPointersInLargeObjectSpace();
map_compact.Finish();
}
}

10
deps/v8/src/mark-compact.h

@ -92,7 +92,15 @@ class MarkCompactCollector: public AllStatic {
static bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
static bool IsCompacting() { return compacting_collection_; }
static bool IsCompacting() {
#ifdef DEBUG
// For the purposes of asserts we don't want this to keep returning true
// after the collection is completed.
return state_ != IDLE && compacting_collection_;
#else
return compacting_collection_;
#endif
}
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).

8
deps/v8/src/math.js

@ -84,7 +84,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_cos(x);
return %Math_cos(x);
}
// ECMA 262 - 15.8.2.8
@ -98,12 +98,12 @@ function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x <= 0x7FFFFFFF && x > 0) {
if (x < 0x80000000 && x > 0) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
// has to be -0, which wouldn't be the case with the shift.
return x << 0;
return TO_UINT32(x);
} else {
return %Math_floor(x);
}
@ -176,7 +176,7 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
return %_Math_sin(x);
return %Math_sin(x);
}
// ECMA 262 - 15.8.2.17

4
deps/v8/src/messages.js

@ -178,7 +178,9 @@ function FormatMessage(message) {
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
object_keys_non_object: "Object.keys called on non-object"
object_keys_non_object: "Object.keys called on non-object",
object_get_prototype_non_object: "Object.getPrototypeOf called on non-object",
array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
};
}
var format = kMessages[message.type];

1
deps/v8/src/mksnapshot.cc

@ -151,6 +151,7 @@ int main(int argc, char** argv) {
}
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
ASSERT(!context.IsEmpty());
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {

1
deps/v8/src/objects-debug.cc

@ -587,7 +587,6 @@ static const char* TypeToString(InstanceType type) {
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY";
case SMI_TYPE: return "SMI";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE

15
deps/v8/src/objects-inl.h

@ -150,8 +150,12 @@ bool Object::IsString() {
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
return (type & (kIsNotStringMask | kIsSymbolMask)) ==
(kStringTag | kSymbolTag);
// Because the symbol tag is non-zero and no non-string types have the
// symbol bit set we can test for symbols with a very simple test
// operation.
ASSERT(kSymbolTag != 0);
ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
return (type & kIsSymbolMask) != 0;
}
@ -226,7 +230,8 @@ StringShape::StringShape(InstanceType t)
bool StringShape::IsSymbol() {
ASSERT(valid());
return (type_ & kIsSymbolMask) == kSymbolTag;
ASSERT(kSymbolTag != 0);
return (type_ & kIsSymbolMask) != 0;
}
@ -336,8 +341,8 @@ bool Object::IsExternalArray() {
return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
}

67
deps/v8/src/objects.cc

@ -6834,43 +6834,36 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::Allocate(
int at_least_space_for) {
Object* HashTable<Shape, Key>::Allocate(int at_least_space_for) {
int capacity = RoundUpToPowerOf2(at_least_space_for);
if (capacity < 4) capacity = 4; // Guarantee min capacity.
if (capacity < 4) {
capacity = 4; // Guarantee min capacity.
} else if (capacity > HashTable::kMaxCapacity) {
return Failure::OutOfMemoryException();
}
Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
if (!obj->IsFailure()) {
HashTable::cast(obj)->SetNumberOfElements(0);
HashTable::cast(obj)->SetNumberOfDeletedElements(0);
HashTable::cast(obj)->SetCapacity(capacity);
}
return obj;
}
// Find entry for key otherwise return -1.
// Find entry for key otherwise return kNotFound.
template<typename Shape, typename Key>
int HashTable<Shape, Key>::FindEntry(Key key) {
uint32_t nof = NumberOfElements();
if (nof == 0) return kNotFound; // Bail out if empty.
uint32_t capacity = Capacity();
uint32_t hash = Shape::Hash(key);
uint32_t entry = GetProbe(hash, 0, capacity);
uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
uint32_t passed_elements = 0;
if (!element->IsNull()) {
if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
if (++passed_elements == nof) return kNotFound;
}
for (uint32_t i = 1; !element->IsUndefined(); i++) {
entry = GetProbe(hash, i, capacity);
element = KeyAt(entry);
if (!element->IsNull()) {
if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
if (++passed_elements == nof) return kNotFound;
}
if (element->IsUndefined()) break; // Empty entry.
if (!element->IsNull() && Shape::IsMatch(key, element)) return entry;
entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
@ -6880,8 +6873,12 @@ template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
// Make sure 50% is free
if (nof + (nof >> 1) <= capacity) return this;
int nod = NumberOfDeletedElements();
// Return if:
// 50% is still free after adding n elements and
// at most 50% of the free elements are deleted elements.
if ((nof + (nof >> 1) <= capacity) &&
(nod <= (capacity - nof) >> 1)) return this;
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
@ -6908,21 +6905,23 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
}
table->SetNumberOfElements(NumberOfElements());
table->SetNumberOfDeletedElements(0);
return table;
}
template<typename Shape, typename Key>
uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
uint32_t entry = GetProbe(hash, 0, capacity);
uint32_t entry = FirstProbe(hash, capacity);
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
for (uint32_t i = 1; !(element->IsUndefined() || element->IsNull()); i++) {
entry = GetProbe(hash, i, capacity);
element = KeyAt(entry);
if (element->IsUndefined() || element->IsNull()) break;
entry = NextProbe(entry, count++, capacity);
}
return entry;
}
@ -7001,6 +7000,10 @@ int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
template
int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
template
int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@ -7703,7 +7706,7 @@ void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
}
// Update the number of elements.
SetNumberOfElements(NumberOfElements() - removed_entries);
ElementsRemoved(removed_entries);
}

145
deps/v8/src/objects.h

@ -204,14 +204,13 @@ enum PropertyNormalizationMode {
// instance_type is JS_OBJECT_TYPE.
//
// The names of the string instance types are intended to systematically
// mirror their encoding in the instance_type field of the map. The length
// (SHORT, MEDIUM, or LONG) is always mentioned. The default encoding is
// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
// mentioned explicitly in the name. Likewise, the default representation is
// considered sequential. It is not mentioned in the name. The other
// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
// STRING_TYPE (if it is not a symbol).
// mirror their encoding in the instance_type field of the map. The default
// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
// encoding is mentioned explicitly in the name. Likewise, the default
// representation is considered sequential. It is not mentioned in the
// name. The other representations (eg, CONS, EXTERNAL) are explicitly
// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
// symbol) or a STRING_TYPE (if it is not a symbol).
//
// NOTE: The following things are some that depend on the string types having
// instance_types that are less than those of all other types:
@ -237,11 +236,11 @@ enum PropertyNormalizationMode {
V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
\
V(MAP_TYPE) \
V(HEAP_NUMBER_TYPE) \
V(FIXED_ARRAY_TYPE) \
V(CODE_TYPE) \
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
V(ODDBALL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(PIXEL_ARRAY_TYPE) \
@ -257,6 +256,7 @@ enum PropertyNormalizationMode {
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@ -383,11 +383,12 @@ const uint32_t kIsNotStringMask = 0x80;
const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
// If bit 7 is clear, bit 5 indicates that the string is a symbol (if set) or
// not (if cleared).
const uint32_t kIsSymbolMask = 0x20;
// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
// There are not enough types that the non-string types (with bit 7 set) can
// have bit 6 set too.
const uint32_t kIsSymbolMask = 0x40;
const uint32_t kNotSymbolTag = 0x0;
const uint32_t kSymbolTag = 0x20;
const uint32_t kSymbolTag = 0x40;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
@ -418,6 +419,7 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
@ -433,56 +435,66 @@ enum InstanceType {
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
MAP_TYPE = kNotStringTag,
HEAP_NUMBER_TYPE,
FIXED_ARRAY_TYPE,
// Objects allocated in their own spaces (never in new space).
MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
HEAP_NUMBER_TYPE,
PROXY_TYPE,
BYTE_ARRAY_TYPE,
PIXEL_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE,
EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
EXTERNAL_INT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
EXTERNAL_FLOAT_ARRAY_TYPE,
FILLER_TYPE,
SMI_TYPE,
EXTERNAL_FLOAT_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
ACCESSOR_INFO_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
SHARED_FUNCTION_INFO_TYPE,
CALL_HANDLER_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
#endif
SCRIPT_TYPE,
JS_VALUE_TYPE,
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
JS_REGEXP_TYPE,
JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
JS_FUNCTION_TYPE,
// Pseudo-types
FIRST_NONSTRING_TYPE = MAP_TYPE,
FIRST_TYPE = 0x0,
INVALID_TYPE = FIRST_TYPE - 1,
LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
// Boundary for promotion to old data space/old pointer space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundaries for testing the type is a JavaScript "object". Note that
// function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included.
@ -1497,6 +1509,10 @@ class JSObject: public HeapObject {
#endif
Object* SlowReverseLookup(Object* value);
// Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
// Also maximal value of JSArray's length property.
static const uint32_t kMaxElementCount = 0xffffffffu;
static const uint32_t kMaxGap = 1024;
static const int kMaxFastElementsLength = 5000;
static const int kInitialMaxFastElementArray = 100000;
@ -1623,8 +1639,14 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
// Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
static const int kHeaderSize = Array::kAlignedSize;
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
// consumption.
static const int kMaxSize = 512 * MB;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
int FixedArraySize() { return SizeFor(length()); }
@ -1875,6 +1897,11 @@ class HashTable: public FixedArray {
return Smi::cast(get(kNumberOfElementsIndex))->value();
}
// Returns the number of deleted elements in the hash table.
int NumberOfDeletedElements() {
return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
}
// Returns the capacity of the hash table.
int Capacity() {
return Smi::cast(get(kCapacityIndex))->value();
@ -1886,8 +1913,14 @@ class HashTable: public FixedArray {
// ElementRemoved should be called whenever an element is removed from
// a hash table.
void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
void ElementRemoved() {
SetNumberOfElements(NumberOfElements() - 1);
SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
}
void ElementsRemoved(int n) {
SetNumberOfElements(NumberOfElements() - n);
SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
}
// Returns a new HashTable object. Might return Failure.
static Object* Allocate(int at_least_space_for);
@ -1914,8 +1947,9 @@ class HashTable: public FixedArray {
}
static const int kNumberOfElementsIndex = 0;
static const int kCapacityIndex = 1;
static const int kPrefixStartIndex = 2;
static const int kNumberOfDeletedElementsIndex = 1;
static const int kCapacityIndex = 2;
static const int kPrefixStartIndex = 3;
static const int kElementsStartIndex =
kPrefixStartIndex + Shape::kPrefixSize;
static const int kEntrySize = Shape::kEntrySize;
@ -1925,6 +1959,12 @@ class HashTable: public FixedArray {
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
// Maximal capacity of HashTable. Based on maximal length of underlying
// FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
// cannot overflow.
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
// Find entry for key otherwise return -1.
int FindEntry(Key key);
@ -1944,12 +1984,18 @@ class HashTable: public FixedArray {
fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
}
// Update the number of deleted elements in the hash table.
void SetNumberOfDeletedElements(int nod) {
fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
}
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
// use bit-wise AND with a mask, so the capacity must be positive
// and non-zero.
ASSERT(capacity > 0);
ASSERT(capacity <= kMaxCapacity);
fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
}
@ -1960,6 +2006,14 @@ class HashTable: public FixedArray {
return (hash + GetProbeOffset(number)) & (size - 1);
}
static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
return hash & (size - 1);
}
static uint32_t NextProbe(uint32_t last, uint32_t number, uint32_t size) {
return (last + number) & (size - 1);
}
// Ensure enough space for n additional elements.
Object* EnsureCapacity(int n, Key key);
};
@ -2289,6 +2343,11 @@ class ByteArray: public Array {
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
// Maximal memory consumption for a single ByteArray.
static const int kMaxSize = 512 * MB;
// Maximal length of a single ByteArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
};
@ -3575,6 +3634,14 @@ class JSRegExp: public JSObject {
static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
// Offsets directly into the data fixed array.
static const int kDataTagOffset =
FixedArray::kHeaderSize + kTagIndex * kPointerSize;
static const int kDataAsciiCodeOffset =
FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
@ -3998,6 +4065,12 @@ class SeqAsciiString: public SeqString {
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
// Maximal memory usage for a single sequential ASCII string.
static const int kMaxSize = 512 * MB;
// Maximal length of a single sequential ASCII string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize);
// Support for StringInputBuffer.
inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset,
@ -4044,6 +4117,12 @@ class SeqTwoByteString: public SeqString {
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
// Maximal memory usage for a single sequential two-byte string.
static const int kMaxSize = 512 * MB;
// Maximal length of a single sequential two-byte string.
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
// Support for StringInputBuffer.
inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,

27
deps/v8/src/parser.cc

@ -371,7 +371,7 @@ class RegExpBuilder: public ZoneObject {
void AddAtom(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
void AddQuantifierToAtom(int min, int max, bool is_greedy);
void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
RegExpTree* ToRegExp();
private:
void FlushCharacters();
@ -503,7 +503,9 @@ RegExpTree* RegExpBuilder::ToRegExp() {
}
void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
void RegExpBuilder::AddQuantifierToAtom(int min,
int max,
RegExpQuantifier::Type type) {
if (pending_empty_) {
pending_empty_ = false;
return;
@ -543,7 +545,7 @@ void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
UNREACHABLE();
return;
}
terms_.Add(new RegExpQuantifier(min, max, is_greedy, atom));
terms_.Add(new RegExpQuantifier(min, max, type, atom));
LAST(ADD_TERM);
}
@ -3332,7 +3334,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
ASSERT(array_literal != NULL && array_literal->is_simple());
result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
result->set(kElementsSlot, *array_literal->literals());
result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
@ -3596,7 +3598,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
top_scope_->NewUnresolved(function_name, inside_with());
fproxy->BindTo(fvar);
body.Add(new ExpressionStatement(
new Assignment(Token::INIT_VAR, fproxy,
new Assignment(Token::INIT_CONST, fproxy,
NEW(ThisFunction()),
RelocInfo::kNoPosition)));
}
@ -4278,12 +4280,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
default:
continue;
}
bool is_greedy = true;
RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
if (current() == '?') {
is_greedy = false;
type = RegExpQuantifier::NON_GREEDY;
Advance();
} else if (FLAG_regexp_possessive_quantifier && current() == '+') {
// FLAG_regexp_possessive_quantifier is a debug-only flag.
type = RegExpQuantifier::POSSESSIVE;
Advance();
}
builder->AddQuantifierToAtom(min, max, is_greedy);
builder->AddQuantifierToAtom(min, max, type);
}
}
@ -4705,6 +4711,11 @@ unsigned* ScriptDataImpl::Data() {
}
bool ScriptDataImpl::HasError() {
return has_error();
}
ScriptDataImpl* PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
v8::Extension* extension) {

1
deps/v8/src/parser.h

@ -91,6 +91,7 @@ class ScriptDataImpl : public ScriptData {
virtual ~ScriptDataImpl();
virtual int Length();
virtual unsigned* Data();
virtual bool HasError();
FunctionEntry GetFunctionEnd(int start);
bool SanityCheck();

24
deps/v8/src/platform-freebsd.cc

@ -73,6 +73,12 @@ double ceiling(double x) {
}
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@ -95,6 +101,24 @@ int OS::ActivationFrameAlignment() {
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in

24
deps/v8/src/platform-linux.cc

@ -73,6 +73,12 @@ double ceiling(double x) {
}
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@ -159,6 +165,24 @@ int OS::ActivationFrameAlignment() {
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in

24
deps/v8/src/platform-macos.cc

@ -86,6 +86,12 @@ double ceiling(double x) {
}
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@ -259,6 +265,24 @@ int OS::ActivationFrameAlignment() {
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
int OS::StackWalk(Vector<StackFrame> frames) {
// If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
if (backtrace == NULL)

24
deps/v8/src/platform-openbsd.cc

@ -72,6 +72,12 @@ double ceiling(double x) {
}
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@ -94,6 +100,24 @@ int OS::ActivationFrameAlignment() {
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in

25
deps/v8/src/platform-posix.cc

@ -61,13 +61,6 @@ double modulo(double x, double y) {
return fmod(x, y);
}
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
}
// ----------------------------------------------------------------------------
// POSIX date/time support.
//
@ -99,15 +92,6 @@ int64_t OS::Ticks() {
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return t->tm_zone;
}
double OS::DaylightSavingsOffset(double time) {
if (isnan(time)) return nan_value();
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@ -117,15 +101,6 @@ double OS::DaylightSavingsOffset(double time) {
}
double OS::LocalTimeOffset() {
time_t tv = time(NULL);
struct tm* t = localtime(&tv);
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
// ----------------------------------------------------------------------------
// POSIX stdio support.
//

686
deps/v8/src/platform-solaris.cc

@ -0,0 +1,686 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
// parts the implementation is in platform-posix.cc.
#include <sys/stack.h> // for stack alignment
#include <unistd.h> // getpagesize()
#include <sys/mman.h> // mmap()
#include <unistd.h> // usleep()
#include <execinfo.h> // backtrace(), backtrace_symbols()
#include <pthread.h>
#include <sched.h> // for sched_yield
#include <semaphore.h>
#include <time.h>
#include <sys/time.h> // gettimeofday(), timeradd()
#include <errno.h>
#include <ieeefp.h> // finite()
#include <signal.h> // sigemptyset(), etc
#undef MAP_TYPE
#include "v8.h"
#include "platform.h"
namespace v8 {
namespace internal {
int isfinite(double x) {
return finite(x) && !isnand(x);
}
} } // namespace v8::internal
// Test for infinity - usually defined in math.h
int isinf(double x) {
fpclass_t fpc = fpclass(x);
return (fpc == FP_NINF || fpc == FP_PINF);
}
// Test if x is less than y and both nominal - usually defined in math.h
int isless(double x, double y) {
return isnan(x) || isnan(y) ? 0 : x < y;
}
// Test if x is greater than y and both nominal - usually defined in math.h
int isgreater(double x, double y) {
return isnan(x) || isnan(y) ? 0 : x > y;
}
// Classify floating point number - usually defined in math.h#ifndef fpclassify
int fpclassify(double x) {
// Use the Solaris-specific fpclass() for classification.
fpclass_t fpc = fpclass(x);
switch (fpc) {
case FP_PNORM:
case FP_NNORM:
return FP_NORMAL;
case FP_PZERO:
case FP_NZERO:
return FP_ZERO;
case FP_PDENORM:
case FP_NDENORM:
return FP_SUBNORMAL;
case FP_PINF:
case FP_NINF:
return FP_INFINITE;
default:
// All cases should be covered by the code above.
ASSERT(fpc == FP_QNAN || fpc == FP_SNAN);
return FP_NAN;
}
}
int signbit(double x) {
// We need to take care of the special case of both positive
// and negative versions of zero.
if (x == 0)
return fpclass(x) == FP_NZERO;
else
return x < 0;
}
namespace v8 {
namespace internal {
// 0 is never a valid thread id on Solaris since the main thread is 1 and
// subsequent have their ids incremented from there
static const pthread_t kNoThread = (pthread_t) 0;
// TODO: Test to see if ceil() is correct on Solaris.
double ceiling(double x) {
return ceil(x);
}
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
// to an unsigned. Going directly will cause an overflow and the seed to be
// set to all ones. The seed will be identical for different instances that
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
}
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // Solaris runs on a lot of things.
}
double OS::nan_value() {
static double NAN = __builtin_nan("0x0");
return NAN;
}
int OS::ActivationFrameAlignment() {
return STACK_ALIGN;
}
const char* OS::LocalTimezone(double time) {
if (isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time/msPerSecond));
struct tm* t = localtime(&tv);
if (NULL == t) return "";
return tzname[0]; // the location of the timezone string on Solaris
}
double OS::LocalTimeOffset() {
int days, hours, minutes;
time_t tv = time(NULL);
// on Solaris, struct tm does not contain a tm_gmtoff field...
struct tm* loc = localtime(&tv);
struct tm* utc = gmtime(&tv);
// calulate the utc offset
days = loc->tm_yday = utc->tm_yday;
hours = ((days < -1 ? 24 : 1 < days ? -24 : days * 24) +
loc->tm_hour - utc->tm_hour);
minutes = hours * 60 + loc->tm_min - utc->tm_min;
// don't include any daylight savings offset in local time
if (loc->tm_isdst > 0) minutes -= 60;
// the result is in milliseconds
return static_cast<double>(minutes * 60 * msPerSecond);
}
// We keep the lowest and highest addresses mapped as a quick way of
// determining that pointers are outside the heap (used mostly in assertions
// and verification). The estimate is conservative, ie, not all addresses in
// 'allocated' space are actually allocated to our heap. The range is
// [lowest, highest), inclusive on the low and and exclusive on the high end.
static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
static void* highest_ever_allocated = reinterpret_cast<void*>(0);
static void UpdateAllocatedSpaceLimits(void* address, int size) {
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
}
bool OS::IsOutsideAllocatedSpace(void* address) {
return address < lowest_ever_allocated || address >= highest_ever_allocated;
}
size_t OS::AllocateAlignment() {
return (size_t)getpagesize();
}
void* OS::Allocate(const size_t requested,
size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, getpagesize());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) {
LOG(StringEvent("OS::Allocate", "mmap failed"));
return NULL;
}
*allocated = msize;
UpdateAllocatedSpaceLimits(mbase, msize);
return mbase;
}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
int result = munmap(address, size);
USE(result);
ASSERT(result == 0);
}
#ifdef ENABLE_HEAP_PROTECTION
void OS::Protect(void* address, size_t size) {
// TODO(1240712): mprotect has a return value which is ignored here.
mprotect(address, size, PROT_READ);
}
void OS::Unprotect(void* address, size_t size, bool is_executable) {
// TODO(1240712): mprotect has a return value which is ignored here.
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
mprotect(address, size, prot);
}
#endif
void OS::Sleep(int milliseconds) {
useconds_t ms = static_cast<useconds_t>(milliseconds);
usleep(1000 * ms);
}
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination
abort();
}
void OS::DebugBreak() {
asm("int $3");
}
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
private:
FILE* file_;
void* memory_;
int size_;
};
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
if (file == NULL) return NULL;
int result = fwrite(initial, size, 1, file);
if (result < 1) {
fclose(file);
return NULL;
}
void* memory =
mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
return new PosixMemoryMappedFile(file, memory, size);
}
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
if (memory_) munmap(memory_, size_);
fclose(file_);
}
void OS::LogSharedLibraryAddresses() {
#ifdef ENABLE_LOGGING_AND_PROFILING
UNIMPLEMENTED();
#endif
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size);
char** symbols;
symbols = backtrace_symbols(addresses, frames_count);
if (symbols == NULL) {
DeleteArray(addresses);
return kStackWalkError;
}
for (int i = 0; i < frames_count; i++) {
frames[i].address = addresses[i];
// Format a text representation of the frame based on the information
// available.
SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
"%s",
symbols[i]);
// Make sure line termination is in place.
frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
}
DeleteArray(addresses);
free(symbols);
return frames_count;
}
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory(size_t size) {
address_ = mmap(NULL, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
if (0 == munmap(address(), size())) address_ = MAP_FAILED;
}
}
bool VirtualMemory::IsReserved() {
return address_ != MAP_FAILED;
}
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
if (MAP_FAILED == mmap(address, size, prot,
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd, kMmapFdOffset)) {
return false;
}
UpdateAllocatedSpaceLimits(address, size);
return true;
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
return mmap(address, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
kMmapFd, kMmapFdOffset) != MAP_FAILED;
}
class ThreadHandle::PlatformData : public Malloced {
public:
explicit PlatformData(ThreadHandle::Kind kind) {
Initialize(kind);
}
void Initialize(ThreadHandle::Kind kind) {
switch (kind) {
case ThreadHandle::SELF: thread_ = pthread_self(); break;
case ThreadHandle::INVALID: thread_ = kNoThread; break;
}
}
pthread_t thread_; // Thread handle for pthread.
};
ThreadHandle::ThreadHandle(Kind kind) {
data_ = new PlatformData(kind);
}
void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
data_->Initialize(kind);
}
ThreadHandle::~ThreadHandle() {
delete data_;
}
bool ThreadHandle::IsSelf() const {
return pthread_equal(data_->thread_, pthread_self());
}
bool ThreadHandle::IsValid() const {
return data_->thread_ != kNoThread;
}
Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
}
Thread::~Thread() {
}
static void* ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
// This is also initialized by the first argument to pthread_create() but we
// don't know which thread will run first (the original thread or the new
// one) so we initialize it here too.
thread->thread_handle_data()->thread_ = pthread_self();
ASSERT(thread->IsValid());
thread->Run();
return NULL;
}
void Thread::Start() {
pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
ASSERT(IsValid());
}
void Thread::Join() {
pthread_join(thread_handle_data()->thread_, NULL);
}
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
return static_cast<LocalStorageKey>(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
}
void* Thread::GetThreadLocal(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_setspecific(pthread_key, value);
}
void Thread::YieldCPU() {
sched_yield();
}
class SolarisMutex : public Mutex {
public:
SolarisMutex() {
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex_, &attr);
}
~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
int Lock() { return pthread_mutex_lock(&mutex_); }
int Unlock() { return pthread_mutex_unlock(&mutex_); }
private:
pthread_mutex_t mutex_;
};
Mutex* OS::CreateMutex() {
return new SolarisMutex();
}
class SolarisSemaphore : public Semaphore {
public:
explicit SolarisSemaphore(int count) { sem_init(&sem_, 0, count); }
virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
virtual void Wait();
virtual bool Wait(int timeout);
virtual void Signal() { sem_post(&sem_); }
private:
sem_t sem_;
};
void SolarisSemaphore::Wait() {
while (true) {
int result = sem_wait(&sem_);
if (result == 0) return; // Successfully got semaphore.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
#ifndef TIMEVAL_TO_TIMESPEC
#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
(ts)->tv_sec = (tv)->tv_sec; \
(ts)->tv_nsec = (tv)->tv_usec * 1000; \
} while (false)
#endif
#ifndef timeradd
#define timeradd(a, b, result) \
do { \
(result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
(result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
if ((result)->tv_usec >= 1000000) { \
++(result)->tv_sec; \
(result)->tv_usec -= 1000000; \
} \
} while (0)
#endif
bool SolarisSemaphore::Wait(int timeout) {
const long kOneSecondMicros = 1000000; // NOLINT
// Split timeout into second and nanosecond parts.
struct timeval delta;
delta.tv_usec = timeout % kOneSecondMicros;
delta.tv_sec = timeout / kOneSecondMicros;
struct timeval current_time;
// Get the current time.
if (gettimeofday(&current_time, NULL) == -1) {
return false;
}
// Calculate time for end of timeout.
struct timeval end_time;
timeradd(&current_time, &delta, &end_time);
struct timespec ts;
TIMEVAL_TO_TIMESPEC(&end_time, &ts);
// Wait for semaphore signalled or timeout.
while (true) {
int result = sem_timedwait(&sem_, &ts);
if (result == 0) return true; // Successfully got semaphore.
if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
}
}
Semaphore* OS::CreateSemaphore(int count) {
return new SolarisSemaphore(count);
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
TickSample sample;
// We always sample the VM state.
sample.state = Logger::state();
active_sampler_->Tick(&sample);
}
class Sampler::PlatformData : public Malloced {
public:
PlatformData() {
signal_handler_installed_ = false;
}
bool signal_handler_installed_;
struct sigaction old_signal_handler_;
struct itimerval old_timer_value_;
};
Sampler::Sampler(int interval, bool profiling)
: interval_(interval), profiling_(profiling), active_(false) {
data_ = new PlatformData();
}
Sampler::~Sampler() {
delete data_;
}
void Sampler::Start() {
// There can only be one active sampler at the time on POSIX
// platforms.
if (active_sampler_ != NULL) return;
// Request profiling signals.
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO;
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
data_->signal_handler_installed_ = true;
// Set the itimer to generate a tick for each interval.
itimerval itimer;
itimer.it_interval.tv_sec = interval_ / 1000;
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
// Set this sampler as the active sampler.
active_sampler_ = this;
active_ = true;
}
void Sampler::Stop() {
// Restore old signal handler
if (data_->signal_handler_installed_) {
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
data_->signal_handler_installed_ = false;
}
// This sampler is no longer the active sampler.
active_sampler_ = NULL;
active_ = false;
}
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

14
deps/v8/src/platform.h

@ -46,6 +46,20 @@
#define V8_INFINITY INFINITY
#ifdef __sun
namespace v8 {
namespace internal {
int isfinite(double x);
} }
int isinf(double x);
int isless(double x, double y);
int isgreater(double x, double y);
int fpclassify(double x);
int signbit(double x);
#endif // __sun
// Windows specific stuff.
#ifdef WIN32

12
deps/v8/src/regexp-delay.js

@ -136,13 +136,7 @@ function CompileRegExp(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
return %RegExpExec(regexp, string, index, lastMatchInfo);
}
function DoRegExpExecGlobal(regexp, string) {
// Returns an array of arrays of substring indices.
return %RegExpExecGlobal(regexp, string, lastMatchInfo);
return %_RegExpExec(regexp, string, index, lastMatchInfo);
}
@ -170,7 +164,7 @@ function RegExpExec(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
@ -227,7 +221,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;

9
deps/v8/src/regexp-macro-assembler-tracer.cc

@ -307,18 +307,11 @@ void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match) {
bool supported = assembler_->CheckSpecialCharacterClass(type,
cp_offset,
check_offset,
on_no_match);
PrintF(" CheckSpecialCharacterClass(type='%c', offset=%d, "
"check_offset=%s, label[%08x]): %s;\n",
PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type,
cp_offset,
check_offset ? "true" : "false",
on_no_match,
supported ? "true" : "false");
return supported;

2
deps/v8/src/regexp-macro-assembler-tracer.h

@ -69,8 +69,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
uc16 and_with,
Label* on_not_equal);
virtual bool CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);

17
deps/v8/src/regexp-macro-assembler.cc

@ -143,17 +143,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
input_end,
offsets_vector,
previous_index == 0);
if (res == SUCCESS) {
// Capture values are relative to start_offset only.
// Convert them to be relative to start of string.
for (int i = 0; i < offsets_vector_length; i++) {
if (offsets_vector[i] >= 0) {
offsets_vector[i] += previous_index;
}
}
}
return res;
}
@ -167,7 +156,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int* output,
bool at_start) {
typedef int (*matcher)(String*, int, const byte*,
const byte*, int*, int, Address);
const byte*, int*, int, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
int at_start_val = at_start ? 1 : 0;
@ -176,6 +165,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
int direct_call = 0;
int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
input,
start_offset,
@ -183,7 +173,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_end,
output,
at_start_val,
stack_base);
stack_base,
direct_call);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);

2
deps/v8/src/regexp-macro-assembler.h

@ -123,8 +123,6 @@ class RegExpMacroAssembler {
// not have custom support.
// May clobber the current loaded character.
virtual bool CheckSpecialCharacterClass(uc16 type,
int cp_offset,
bool check_offset,
Label* on_no_match) {
return false;
}

12
deps/v8/src/regexp-stack.h

@ -98,12 +98,24 @@ class RegExpStack {
void Free();
};
// Address of allocated memory.
static Address memory_address() {
return reinterpret_cast<Address>(&thread_local_.memory_);
}
// Address of size of allocated memory.
static Address memory_size_address() {
return reinterpret_cast<Address>(&thread_local_.memory_size_);
}
// Resets the buffer if it has grown beyond the default/minimum size.
// After this, the buffer is either the default size, or it is empty, so
// you have to call EnsureCapacity before using it again.
static void Reset();
static ThreadLocal thread_local_;
friend class ExternalReference;
};
}} // namespace v8::internal

308
deps/v8/src/runtime.cc

@ -559,6 +559,73 @@ static Object* Runtime_IsConstructCall(Arguments args) {
}
// Recursively traverses hidden prototypes if property is not found
static void GetOwnPropertyImplementation(JSObject* obj,
String* name,
LookupResult* result) {
obj->LocalLookupRealNamedProperty(name, result);
if (!result->IsProperty()) {
Object* proto = obj->GetPrototype();
if (proto->IsJSObject() &&
JSObject::cast(proto)->map()->is_hidden_prototype())
GetOwnPropertyImplementation(JSObject::cast(proto),
name, result);
}
}
// Returns an array with the property description:
// if args[1] is not a property on args[0]
// returns undefined
// if args[1] is a data property on args[0]
// [false, value, Writeable, Enumerable, Configurable]
// if args[1] is an accessor on args[0]
// [true, GetFunction, SetFunction, Enumerable, Configurable]
static Object* Runtime_GetOwnProperty(Arguments args) {
HandleScope scope;
Handle<FixedArray> elms = Factory::NewFixedArray(5);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
// Use recursive implementation to also traverse hidden prototypes
GetOwnPropertyImplementation(obj, name, &result);
if (!result.IsProperty())
return Heap::undefined_value();
if (result.type() == CALLBACKS) {
Object* structure = result.GetCallbackObject();
if (structure->IsProxy()) {
// Property that is internally implemented as a callback.
Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder());
elms->set(0, Heap::false_value());
elms->set(1, value);
elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
} else if (structure->IsFixedArray()) {
// __defineGetter__/__defineSetter__ callback.
elms->set(0, Heap::true_value());
elms->set(1, FixedArray::cast(structure)->get(0));
elms->set(2, FixedArray::cast(structure)->get(1));
} else {
// TODO(ricow): Handle API callbacks.
return Heap::undefined_value();
}
} else {
elms->set(0, Heap::false_value());
elms->set(1, result.GetLazyValue());
elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
}
elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
return *desc;
}
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
@ -1158,6 +1225,7 @@ static Object* Runtime_RegExpExec(Arguments args) {
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
Counters::regexp_entry_runtime.Increment();
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
@ -1384,6 +1452,17 @@ static Object* CharCodeAt(String* subject, Object* index) {
}
static Object* CharFromCode(Object* char_code) {
uint32_t code;
if (Array::IndexFromObject(char_code, &code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
}
return Heap::empty_string();
}
static Object* Runtime_StringCharCodeAt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@ -1394,16 +1473,20 @@ static Object* Runtime_StringCharCodeAt(Arguments args) {
}
static Object* Runtime_StringCharAt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
CONVERT_CHECKED(String, subject, args[0]);
Object* index = args[1];
return CharFromCode(CharCodeAt(subject, index));
}
static Object* Runtime_CharFromCode(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
uint32_t code;
if (Array::IndexFromObject(args[0], &code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
}
return Heap::empty_string();
return CharFromCode(args[0]);
}
// Forward declarations.
@ -1509,7 +1592,7 @@ class ReplacementStringBuilder {
void IncrementCharacterCount(int by) {
if (character_count_ > Smi::kMaxValue - by) {
if (character_count_ > String::kMaxLength - by) {
V8::FatalProcessOutOfMemory("String.replace result too large.");
}
character_count_ += by;
@ -2473,6 +2556,7 @@ static Object* Runtime_SubString(Arguments args) {
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
Counters::sub_string_runtime.Increment();
return value->SubString(start, end);
}
@ -2724,7 +2808,6 @@ static Object* Runtime_GetProperty(Arguments args) {
}
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
static Object* Runtime_KeyedGetProperty(Arguments args) {
NoHandleAllocation ha;
@ -2776,6 +2859,13 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
// If value is the hole do the general lookup.
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
HandleScope scope;
Handle<String> str = args.at<String>(0);
int index = Smi::cast(args[1])->value();
Handle<Object> result = GetCharAt(str, index);
return *result;
}
// Fall back to GetObjectProperty.
@ -3362,6 +3452,7 @@ static Object* Runtime_URIEscape(Arguments args) {
escaped_length += 3;
}
// We don't allow strings that are longer than a maximal length.
ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
@ -3908,20 +3999,19 @@ static inline void StringBuilderConcatHelper(String* special,
static Object* Runtime_StringBuilderConcat(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
CONVERT_CHECKED(String, special, args[1]);
if (!args[1]->IsSmi()) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int array_length = Smi::cast(args[1])->value();
CONVERT_CHECKED(String, special, args[2]);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
int special_length = special->length();
Object* smi_array_length = array->length();
if (!smi_array_length->IsSmi()) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
int array_length = Smi::cast(smi_array_length)->value();
if (!array->HasFastElements()) {
return Top::Throw(Heap::illegal_argument_symbol());
}
@ -3939,6 +4029,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
bool ascii = special->IsAsciiRepresentation();
int position = 0;
int increment = 0;
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
@ -3951,10 +4042,10 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
if (pos + len > special_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
position += len;
increment = len;
} else {
// Position and length encoded in two smis.
position += (-len);
increment = (-len);
// Get the position and check that it is also a smi.
i++;
if (i >= array_length) {
@ -3968,17 +4059,18 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
position += element_length;
increment = element_length;
if (ascii && !element->IsAsciiRepresentation()) {
ascii = false;
}
} else {
return Top::Throw(Heap::illegal_argument_symbol());
}
if (position > String::kMaxLength) {
if (increment > String::kMaxLength - position) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
position += increment;
}
int length = position;
@ -4192,6 +4284,8 @@ static Object* Runtime_StringCompare(Arguments args) {
CONVERT_CHECKED(String, x, args[0]);
CONVERT_CHECKED(String, y, args[1]);
Counters::string_compare_runtime.Increment();
// A few fast case tests before we flatten.
if (x == y) return Smi::FromInt(EQUAL);
if (y->length() == 0) {
@ -5227,51 +5321,31 @@ static Object* Runtime_CompileString(Arguments args) {
}
static Handle<JSFunction> GetBuiltinFunction(String* name) {
LookupResult result;
Top::global_context()->builtins()->LocalLookup(name, &result);
return Handle<JSFunction>(JSFunction::cast(result.GetValue()));
static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
ASSERT(args.length() == 3);
if (!args[0]->IsJSFunction()) {
return MakePair(Top::ThrowIllegalOperation(), NULL);
}
static Object* CompileDirectEval(Handle<String> source) {
// Compute the eval context.
HandleScope scope;
Handle<JSFunction> callee = args.at<JSFunction>(0);
Handle<Object> receiver; // Will be overwritten.
// Compute the calling context.
Handle<Context> context = Handle<Context>(Top::context());
#ifdef DEBUG
// Make sure Top::context() agrees with the old code that traversed
// the stack frames to compute the context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
Handle<Context> context(Context::cast(frame->context()));
bool is_global = context->IsGlobalContext();
// Compile source string in the current context.
Handle<JSFunction> boilerplate = Compiler::CompileEval(
source,
context,
is_global,
Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return Failure::Exception();
Handle<JSFunction> fun =
Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
return *fun;
}
static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
CONVERT_ARG_CHECKED(JSFunction, callee, 0);
Handle<Object> receiver;
ASSERT(Context::cast(frame->context()) == *context);
#endif
// Find where the 'eval' symbol is bound. It is unaliased only if
// it is bound in the global context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
Handle<Context> context(Context::cast(frame->context()));
int index;
PropertyAttributes attributes;
while (!context.is_null()) {
int index = -1;
PropertyAttributes attributes = ABSENT;
while (true) {
receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
&index, &attributes);
// Stop search when eval is found or when the global context is
@ -5290,46 +5364,42 @@ static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
Handle<Object> name = Factory::eval_symbol();
Handle<Object> reference_error =
Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
return Top::Throw(*reference_error);
return MakePair(Top::Throw(*reference_error), NULL);
}
if (context->IsGlobalContext()) {
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
Handle<JSFunction> global_eval =
GetBuiltinFunction(Heap::global_eval_symbol());
if (global_eval.is_identical_to(callee)) {
// A direct eval call.
if (args[1]->IsString()) {
CONVERT_ARG_CHECKED(String, source, 1);
// A normal eval call on a string. Compile it and return the
// compiled function bound in the local context.
Object* compiled_source = CompileDirectEval(source);
if (compiled_source->IsFailure()) return compiled_source;
receiver = Handle<Object>(frame->receiver());
callee = Handle<JSFunction>(JSFunction::cast(compiled_source));
} else {
// An eval call that is not called on a string. Global eval
// deals better with this.
receiver = Handle<Object>(Top::global_context()->global());
}
} else {
// 'eval' is overwritten. Just call the function with the given arguments.
receiver = Handle<Object>(Top::global_context()->global());
}
} else {
if (!context->IsGlobalContext()) {
// 'eval' is not bound in the global context. Just call the function
// with the given arguments. This is not necessarily the global eval.
if (receiver->IsContext()) {
context = Handle<Context>::cast(receiver);
receiver = Handle<Object>(context->get(index));
} else if (receiver->IsJSContextExtensionObject()) {
receiver = Handle<JSObject>(Top::context()->global()->global_receiver());
}
return MakePair(*callee, *receiver);
}
Handle<FixedArray> call = Factory::NewFixedArray(2);
call->set(0, *callee);
call->set(1, *receiver);
return *call;
// 'eval' is bound in the global context, but it may have been overwritten.
// Compare it to the builtin 'GlobalEval' function to make sure.
if (*callee != Top::global_context()->global_eval_fun() ||
!args[1]->IsString()) {
return MakePair(*callee, Top::context()->global()->global_receiver());
}
// Deal with a normal eval call with a string argument. Compile it
// and return the compiled function bound in the local context.
Handle<String> source = args.at<String>(1);
Handle<JSFunction> boilerplate = Compiler::CompileEval(
source,
Handle<Context>(Top::context()),
Top::context()->IsGlobalContext(),
Compiler::DONT_VALIDATE_JSON);
if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL);
callee = Factory::NewFunctionFromBoilerplate(
boilerplate,
Handle<Context>(Top::context()),
NOT_TENURED);
return MakePair(*callee, args[2]);
}
@ -5386,11 +5456,11 @@ class ArrayConcatVisitor {
uint32_t index_limit,
bool fast_elements) :
storage_(storage), index_limit_(index_limit),
fast_elements_(fast_elements), index_offset_(0) { }
index_offset_(0), fast_elements_(fast_elements) { }
void visit(uint32_t i, Handle<Object> elm) {
uint32_t index = i + index_offset_;
if (index >= index_limit_) return;
if (i >= index_limit_ - index_offset_) return;
uint32_t index = index_offset_ + i;
if (fast_elements_) {
ASSERT(index < static_cast<uint32_t>(storage_->length()));
@ -5406,14 +5476,23 @@ class ArrayConcatVisitor {
}
void increase_index_offset(uint32_t delta) {
if (index_limit_ - index_offset_ < delta) {
index_offset_ = index_limit_;
} else {
index_offset_ += delta;
}
}
Handle<FixedArray> storage() { return storage_; }
private:
Handle<FixedArray> storage_;
// Limit on the accepted indices. Elements with indices larger than the
// limit are ignored by the visitor.
uint32_t index_limit_;
bool fast_elements_;
// Index after last seen index. Always less than or equal to index_limit_.
uint32_t index_offset_;
bool fast_elements_;
};
@ -5585,6 +5664,11 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
*
* If a ArrayConcatVisitor object is given, the visitor is called with
* parameters, element's index + visitor_index_offset and the element.
*
* The returned number of elements is an upper bound on the actual number
* of elements added. If the same element occurs in more than one object
* in the array's prototype chain, it will be counted more than once, but
* will only occur once in the result.
*/
static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
ArrayConcatVisitor* visitor) {
@ -5607,8 +5691,14 @@ static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
uint32_t nof_elements = 0;
for (int i = objects.length() - 1; i >= 0; i--) {
Handle<JSObject> obj = objects[i];
nof_elements +=
uint32_t encountered_elements =
IterateElements(Handle<JSObject>::cast(obj), range, visitor);
if (encountered_elements > JSObject::kMaxElementCount - nof_elements) {
nof_elements = JSObject::kMaxElementCount;
} else {
nof_elements += encountered_elements;
}
}
return nof_elements;
@ -5625,10 +5715,12 @@ static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
* elements. If an argument is not an Array object, the function
* visits the object as if it is an one-element array.
*
* If the result array index overflows 32-bit integer, the rounded
* If the result array index overflows 32-bit unsigned integer, the rounded
* non-negative number is used as new length. For example, if one
* array length is 2^32 - 1, second array length is 1, the
* concatenated array length is 0.
* TODO(lrn) Change length behavior to ECMAScript 5 specification (length
* is one more than the last array index to get a value assigned).
*/
static uint32_t IterateArguments(Handle<JSArray> arguments,
ArrayConcatVisitor* visitor) {
@ -5644,18 +5736,25 @@ static uint32_t IterateArguments(Handle<JSArray> arguments,
IterateArrayAndPrototypeElements(array, visitor);
// Total elements of array and its prototype chain can be more than
// the array length, but ArrayConcat can only concatenate at most
// the array length number of elements.
visited_elements += (nof_elements > len) ? len : nof_elements;
// the array length number of elements. We use the length as an estimate
// for the actual number of elements added.
uint32_t added_elements = (nof_elements > len) ? len : nof_elements;
if (JSArray::kMaxElementCount - visited_elements < added_elements) {
visited_elements = JSArray::kMaxElementCount;
} else {
visited_elements += added_elements;
}
if (visitor) visitor->increase_index_offset(len);
} else {
if (visitor) {
visitor->visit(0, obj);
visitor->increase_index_offset(1);
}
if (visited_elements < JSArray::kMaxElementCount) {
visited_elements++;
}
}
}
return visited_elements;
}
@ -5663,6 +5762,8 @@ static uint32_t IterateArguments(Handle<JSArray> arguments,
/**
* Array::concat implementation.
* See ECMAScript 262, 15.4.4.4.
* TODO(lrn): Fix non-compliance for very large concatenations and update to
* following the ECMAScript 5 specification.
*/
static Object* Runtime_ArrayConcat(Arguments args) {
ASSERT(args.length() == 1);
@ -5679,12 +5780,18 @@ static Object* Runtime_ArrayConcat(Arguments args) {
{ AssertNoAllocation nogc;
for (uint32_t i = 0; i < num_of_args; i++) {
Object* obj = arguments->GetElement(i);
uint32_t length_estimate;
if (obj->IsJSArray()) {
result_length +=
length_estimate =
static_cast<uint32_t>(JSArray::cast(obj)->length()->Number());
} else {
result_length++;
length_estimate = 1;
}
if (JSObject::kMaxElementCount - result_length < length_estimate) {
result_length = JSObject::kMaxElementCount;
break;
}
result_length += length_estimate;
}
}
@ -5718,7 +5825,8 @@ static Object* Runtime_ArrayConcat(Arguments args) {
IterateArguments(arguments, &visitor);
result->set_length(*len);
result->set_elements(*storage);
// Please note the storage might have changed in the visitor.
result->set_elements(*visitor.storage());
return *result;
}

7
deps/v8/src/runtime.h

@ -61,6 +61,8 @@ namespace internal {
\
F(IsConstructCall, 0, 1) \
\
F(GetOwnProperty, 2, 1) \
\
/* Utilities */ \
F(GetCalledFunction, 0, 1) \
F(GetFunctionDelegate, 1, 1) \
@ -103,7 +105,7 @@ namespace internal {
F(NumberUnaryMinus, 1, 1) \
\
F(StringAdd, 2, 1) \
F(StringBuilderConcat, 2, 1) \
F(StringBuilderConcat, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
@ -146,6 +148,7 @@ namespace internal {
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
F(StringCharAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
@ -202,7 +205,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
F(ResolvePossiblyDirectEval, 2, 1) \
F(ResolvePossiblyDirectEval, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \

50
deps/v8/src/runtime.js

@ -114,30 +114,33 @@ function STRICT_EQUALS(x) {
// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
// the result when either (or both) the operands are NaN.
function COMPARE(x, ncr) {
// Fast case for numbers and strings.
if (IS_NUMBER(this) && IS_NUMBER(x)) {
return %NumberCompare(this, x, ncr);
}
if (IS_STRING(this) && IS_STRING(x)) {
return %StringCompare(this, x);
}
var left;
// If one of the operands is undefined, it will convert to NaN and
// thus the result should be as if one of the operands was NaN.
if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
// Fast cases for string, numbers and undefined compares.
if (IS_STRING(this)) {
if (IS_STRING(x)) return %_StringCompare(this, x);
if (IS_UNDEFINED(x)) return ncr;
left = this;
} else if (IS_NUMBER(this)) {
if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
if (IS_UNDEFINED(x)) return ncr;
left = this;
} else if (IS_UNDEFINED(this)) {
return ncr;
} else {
if (IS_UNDEFINED(x)) return ncr;
left = %ToPrimitive(this, NUMBER_HINT);
}
// Default implementation.
var a = %ToPrimitive(this, NUMBER_HINT);
var b = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(a) && IS_STRING(b)) {
return %StringCompare(a, b);
var right = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(left) && IS_STRING(right)) {
return %_StringCompare(left, right);
} else {
var a_number = %ToNumber(a);
var b_number = %ToNumber(b);
if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
return %NumberCompare(a_number, b_number, ncr);
var left_number = %ToNumber(left);
var right_number = %ToNumber(right);
if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
return %NumberCompare(left_number, right_number, ncr);
}
}
@ -474,6 +477,17 @@ function TO_STRING() {
}
// Specialized version of String.charAt. It assumes string as
// the receiver type and that the index is a number.
function STRING_CHAR_AT(pos) {
var char_code = %_FastCharCodeAt(this, pos);
if (!%_IsSmi(char_code)) {
return %StringCharAt(this, pos);
}
return %CharFromCode(char_code);
}
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------

2
deps/v8/src/scopes.cc

@ -236,7 +236,7 @@ Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
return variables_.Declare(this, name, Variable::DYNAMIC, true,
return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, true,
Variable::NORMAL);
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save