Browse Source

Upgrade V8 to 2.2.3.1

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
41ef1717e0
  1. 23
      deps/v8/ChangeLog
  2. 37
      deps/v8/SConstruct
  3. 176
      deps/v8/include/v8-profiler.h
  4. 27
      deps/v8/include/v8.h
  5. 42
      deps/v8/samples/lineprocessor.cc
  6. 10
      deps/v8/src/SConscript
  7. 171
      deps/v8/src/api.cc
  8. 8
      deps/v8/src/arm/codegen-arm-inl.h
  9. 1667
      deps/v8/src/arm/codegen-arm.cc
  10. 98
      deps/v8/src/arm/codegen-arm.h
  11. 21
      deps/v8/src/arm/constants-arm.h
  12. 15
      deps/v8/src/arm/debug-arm.cc
  13. 61
      deps/v8/src/arm/disasm-arm.cc
  14. 4
      deps/v8/src/arm/full-codegen-arm.cc
  15. 23
      deps/v8/src/arm/ic-arm.cc
  16. 59
      deps/v8/src/arm/macro-assembler-arm.cc
  17. 25
      deps/v8/src/arm/macro-assembler-arm.h
  18. 51
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  19. 16
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  20. 3
      deps/v8/src/arm/register-allocator-arm-inl.h
  21. 3
      deps/v8/src/arm/register-allocator-arm.h
  22. 143
      deps/v8/src/arm/simulator-arm.cc
  23. 350
      deps/v8/src/arm/virtual-frame-arm.cc
  24. 351
      deps/v8/src/arm/virtual-frame-arm.h
  25. 106
      deps/v8/src/array.js
  26. 10
      deps/v8/src/assembler.cc
  27. 3
      deps/v8/src/assembler.h
  28. 134
      deps/v8/src/ast.cc
  29. 72
      deps/v8/src/ast.h
  30. 118
      deps/v8/src/bootstrapper.cc
  31. 176
      deps/v8/src/builtins.cc
  32. 4
      deps/v8/src/builtins.h
  33. 3
      deps/v8/src/circular-queue-inl.h
  34. 6
      deps/v8/src/circular-queue.cc
  35. 2
      deps/v8/src/circular-queue.h
  36. 2
      deps/v8/src/code-stubs.cc
  37. 3
      deps/v8/src/codegen.cc
  38. 13
      deps/v8/src/codegen.h
  39. 80
      deps/v8/src/compiler.cc
  40. 5
      deps/v8/src/compiler.h
  41. 2
      deps/v8/src/contexts.h
  42. 532
      deps/v8/src/conversions.cc
  43. 3
      deps/v8/src/conversions.h
  44. 54
      deps/v8/src/cpu-profiler-inl.h
  45. 297
      deps/v8/src/cpu-profiler.cc
  46. 152
      deps/v8/src/cpu-profiler.h
  47. 18
      deps/v8/src/d8-posix.cc
  48. 1
      deps/v8/src/d8.h
  49. 864
      deps/v8/src/data-flow.cc
  50. 59
      deps/v8/src/data-flow.h
  51. 23
      deps/v8/src/date.js
  52. 48
      deps/v8/src/debug-debugger.js
  53. 17
      deps/v8/src/debug.cc
  54. 11
      deps/v8/src/debug.h
  55. 21
      deps/v8/src/execution.cc
  56. 20
      deps/v8/src/execution.h
  57. 2
      deps/v8/src/flag-definitions.h
  58. 731
      deps/v8/src/flow-graph.cc
  59. 369
      deps/v8/src/flow-graph.h
  60. 53
      deps/v8/src/frames.cc
  61. 20
      deps/v8/src/frames.h
  62. 7
      deps/v8/src/globals.h
  63. 4
      deps/v8/src/handles.cc
  64. 28
      deps/v8/src/heap-inl.h
  65. 100
      deps/v8/src/heap.cc
  66. 54
      deps/v8/src/heap.h
  67. 4
      deps/v8/src/ia32/assembler-ia32.cc
  68. 665
      deps/v8/src/ia32/codegen-ia32.cc
  69. 25
      deps/v8/src/ia32/codegen-ia32.h
  70. 50
      deps/v8/src/ia32/debug-ia32.cc
  71. 2
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  72. 19
      deps/v8/src/ia32/virtual-frame-ia32.cc
  73. 4
      deps/v8/src/ia32/virtual-frame-ia32.h
  74. 27
      deps/v8/src/ic.cc
  75. 17
      deps/v8/src/jump-target-light.cc
  76. 134
      deps/v8/src/liveedit-debugger.js
  77. 367
      deps/v8/src/liveedit.cc
  78. 21
      deps/v8/src/liveedit.h
  79. 97
      deps/v8/src/log-inl.h
  80. 48
      deps/v8/src/log.cc
  81. 56
      deps/v8/src/log.h
  82. 409
      deps/v8/src/mark-compact.cc
  83. 28
      deps/v8/src/mark-compact.h
  84. 2
      deps/v8/src/math.js
  85. 24
      deps/v8/src/messages.js
  86. 2
      deps/v8/src/mips/codegen-mips.cc
  87. 2
      deps/v8/src/mips/codegen-mips.h
  88. 16
      deps/v8/src/mips/debug-mips.cc
  89. 26
      deps/v8/src/mirror-debugger.js
  90. 27
      deps/v8/src/objects.h
  91. 2
      deps/v8/src/platform-freebsd.cc
  92. 41
      deps/v8/src/platform-linux.cc
  93. 27
      deps/v8/src/platform-macos.cc
  94. 2
      deps/v8/src/platform-openbsd.cc
  95. 2
      deps/v8/src/platform-solaris.cc
  96. 33
      deps/v8/src/platform-win32.cc
  97. 8
      deps/v8/src/platform.h
  98. 54
      deps/v8/src/profile-generator-inl.h
  99. 247
      deps/v8/src/profile-generator.cc
  100. 104
      deps/v8/src/profile-generator.h

23
deps/v8/ChangeLog

@ -1,3 +1,26 @@
2010-04-14: Version 2.2.3
Added stack command and mem command to ARM simulator debugger.
Fixed scons snapshot and ARM build, and Windows X64 build issues.
Performance improvements on all platforms.
2010-04-12: Version 2.2.2
Introduced new profiler API.
Fixed random number generator to produce full 32 random bits.
2010-04-06: Version 2.2.1
Debugger improvements.
Fixed minor bugs.
2010-03-29: Version 2.2.0
Fixed a few minor bugs.

37
deps/v8/SConstruct

@ -1,4 +1,4 @@
# Copyright 2008 the V8 project authors. All rights reserved.
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -52,9 +52,10 @@ else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv5te',
'-mtune=xscale',
'-msoft-float',
ANDROID_FLAGS = ['-march=armv7-a',
'-mtune=cortex-a8',
'-mfloat-abi=softfp',
'-mfpu=vfp',
'-fpic',
'-mthumb-interwork',
'-funwind-tables',
@ -69,6 +70,8 @@ ANDROID_FLAGS = ['-march=armv5te',
'-fomit-frame-pointer',
'-fno-strict-aliasing',
'-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
@ -102,8 +105,17 @@ LIBRARY_FLAGS = {
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS']
},
'vmstate:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING'],
},
'protectheap:on': {
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_HEAP_PROTECTION'],
},
'profilingsupport:on': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPDEFINES': ['ENABLE_VMSTATE_TRACKING', 'ENABLE_LOGGING_AND_PROFILING'],
},
'cppprofilesprocessor:on': {
'CPPDEFINES': ['ENABLE_CPP_PROFILES_PROCESSOR'],
},
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
@ -668,11 +680,26 @@ SIMPLE_OPTIONS = {
'default': 'static',
'help': 'the type of library to produce'
},
'vmstate': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable VM state tracking'
},
'protectheap': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable heap protection'
},
'profilingsupport': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable profiling of JavaScript code'
},
'cppprofilesprocessor': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable C++ profiles processor'
},
'debuggersupport': {
'values': ['on', 'off'],
'default': 'on',

176
deps/v8/include/v8-profiler.h

@ -0,0 +1,176 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_
#include "v8.h"
#ifdef _WIN32
// Setup for Windows DLL export/import. See v8.h in this directory for
// information on how to build/use V8 as a DLL.
#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
build configuration to ensure that at most one of these is set
#endif
#ifdef BUILDING_V8_SHARED
#define V8EXPORT __declspec(dllexport)
#elif USING_V8_SHARED
#define V8EXPORT __declspec(dllimport)
#else
#define V8EXPORT
#endif
#else // _WIN32
// Setup for Linux shared library export. See v8.h in this directory for
// information on how to build/use V8 as shared library.
#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
#define V8EXPORT __attribute__ ((visibility("default")))
#else // defined(__GNUC__) && (__GNUC__ >= 4)
#define V8EXPORT
#endif // defined(__GNUC__) && (__GNUC__ >= 4)
#endif // _WIN32
/**
* Profiler support for the V8 JavaScript engine.
*/
namespace v8 {
/**
* CpuProfileNode represents a node in a call graph.
*/
class V8EXPORT CpuProfileNode {
public:
/** Returns function name (empty string for anonymous functions.) */
Handle<String> GetFunctionName() const;
/** Returns resource name for script from where the function originates. */
Handle<String> GetScriptResourceName() const;
/**
* Returns the number, 1-based, of the line where the function originates.
* kNoLineNumberInfo if no line number information is available.
*/
int GetLineNumber() const;
/**
* Returns total (self + children) execution time of the function,
* in milliseconds, estimated by samples count.
*/
double GetTotalTime() const;
/**
* Returns self execution time of the function, in milliseconds,
* estimated by samples count.
*/
double GetSelfTime() const;
/** Returns the count of samples where function exists. */
double GetTotalSamplesCount() const;
/** Returns the count of samples where function was currently executing. */
double GetSelfSamplesCount() const;
/** Returns function entry UID. */
unsigned GetCallUid() const;
/** Returns child nodes count of the node. */
int GetChildrenCount() const;
/** Retrieves a child node by index. */
const CpuProfileNode* GetChild(int index) const;
static const int kNoLineNumberInfo = 0;
};
/**
* CpuProfile contains a CPU profile in a form of two call trees:
* - top-down (from main() down to functions that do all the work);
* - bottom-up call graph (in backward direction).
*/
class V8EXPORT CpuProfile {
public:
/** Returns CPU profile UID (assigned by the profiler.) */
unsigned GetUid() const;
/** Returns CPU profile title. */
Handle<String> GetTitle() const;
/** Returns the root node of the bottom up call tree. */
const CpuProfileNode* GetBottomUpRoot() const;
/** Returns the root node of the top down call tree. */
const CpuProfileNode* GetTopDownRoot() const;
};
/**
* Interface for controlling CPU profiling.
*/
class V8EXPORT CpuProfiler {
public:
/**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
*/
static int GetProfilesCount();
/** Returns a profile by index. */
static const CpuProfile* GetProfile(int index);
/** Returns a profile by uid. */
static const CpuProfile* FindProfile(unsigned uid);
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
* once. Attempts to start collecting several profiles with the same
* title are silently ignored.
*/
static void StartProfiling(Handle<String> title);
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
static const CpuProfile* StopProfiling(Handle<String> title);
};
} // namespace v8
#undef V8EXPORT
#endif // V8_V8_PROFILER_H_

27
deps/v8/include/v8.h

@ -855,22 +855,27 @@ class V8EXPORT String : public Primitive {
* \param start The starting position within the string at which
* copying begins.
* \param length The number of bytes to copy from the string.
* \param nchars The number of characters written.
* \param nchars_ref The number of characters written, can be NULL.
* \return The number of bytes copied to the buffer
* excluding the NULL terminator.
*/
int Write(uint16_t* buffer, int start = 0, int length = -1) const; // UTF-16
int WriteAscii(char* buffer, int start = 0, int length = -1) const; // ASCII
enum WriteHints {
NO_HINTS = 0,
HINT_MANY_WRITES_EXPECTED = 1
};
int Write(uint16_t* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // UTF-16
int WriteAscii(char* buffer,
int start = 0,
int length = -1,
WriteHints hints = NO_HINTS) const; // ASCII
int WriteUtf8(char* buffer,
int length = -1,
int* nchars = NULL) const; // UTF-8
/**
* Flatten internal memory. Operations on the string tend to run faster
* after flattening especially if the string is a concatenation of many
* others.
*/
void Flatten();
int* nchars_ref = NULL,
WriteHints hints = NO_HINTS) const; // UTF-8
/**
* A zero length string.

42
deps/v8/samples/lineprocessor.cc

@ -25,8 +25,20 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This controls whether this sample is compiled with debugger support.
// You may trace its usages in source text to see what parts of program
// are responsible for debugging support.
// Note that V8 itself should be compiled with enabled debugger support
// to have it all working.
#define SUPPORT_DEBUGGING
#include <v8.h>
#ifdef SUPPORT_DEBUGGING
#include <v8-debug.h>
#endif
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
@ -103,8 +115,9 @@ v8::Handle<v8::Value> ReadLine(const v8::Arguments& args);
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions);
v8::Persistent<v8::Context> debug_message_context;
#ifdef SUPPORT_DEBUGGING
v8::Persistent<v8::Context> debug_message_context;
void DispatchDebugMessages() {
// We are in some random thread. We should already have v8::Locker acquired
@ -122,6 +135,7 @@ void DispatchDebugMessages() {
v8::Debug::ProcessDebugMessages();
}
#endif
int RunMain(int argc, char* argv[]) {
@ -132,9 +146,12 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::Value> script_name(NULL);
int script_param_counter = 0;
#ifdef SUPPORT_DEBUGGING
int port_number = -1;
bool wait_for_connection = false;
bool support_callback = false;
#endif
MainCycleType cycle_type = CycleInCpp;
for (int i = 1; i < argc; i++) {
@ -143,17 +160,19 @@ int RunMain(int argc, char* argv[]) {
// Ignore any -f flags for compatibility with the other stand-
// alone JavaScript engines.
continue;
} else if (strcmp(str, "--callback") == 0) {
support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
wait_for_connection = true;
} else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
cycle_type = CycleInCpp;
} else if (strcmp(str, "--main-cycle-in-js") == 0) {
cycle_type = CycleInJs;
#ifdef SUPPORT_DEBUGGING
} else if (strcmp(str, "--callback") == 0) {
support_callback = true;
} else if (strcmp(str, "--wait-for-connection") == 0) {
wait_for_connection = true;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
port_number = atoi(argv[i + 1]); // NOLINT
i++;
#endif
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
@ -197,12 +216,12 @@ int RunMain(int argc, char* argv[]) {
// Create a new execution environment containing the built-in
// functions
v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
debug_message_context = v8::Persistent<v8::Context>::New(context);
// Enter the newly created execution environment.
v8::Context::Scope context_scope(context);
#ifdef SUPPORT_DEBUGGING
debug_message_context = v8::Persistent<v8::Context>::New(context);
v8::Locker locker;
if (support_callback) {
@ -210,10 +229,9 @@ int RunMain(int argc, char* argv[]) {
}
if (port_number != -1) {
const char* auto_break_param = "--debugger_auto_break";
v8::V8::SetFlagsFromString(auto_break_param, strlen(auto_break_param));
v8::Debug::EnableAgent("lineprocessor", port_number, wait_for_connection);
}
#endif
bool report_exceptions = true;
@ -254,7 +272,9 @@ int RunMain(int argc, char* argv[]) {
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions) {
#ifdef SUPPORT_DEBUGGING
v8::Locker lock;
#endif
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
v8::Handle<v8::Value> process_val =
@ -407,7 +427,9 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
#ifdef SUPPORT_DEBUGGING
v8::Unlocker unlocker;
#endif
res = fgets(buffer, kBufferSize, stdin);
}
if (res == NULL) {

10
deps/v8/src/SConscript

@ -111,6 +111,7 @@ SOURCES = {
variables.cc
version.cc
virtual-frame.cc
vm-state.cc
zone.cc
"""),
'arch:arm': Split("""
@ -305,7 +306,12 @@ def ConfigureObjectFiles():
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [dtoa_obj, source_objs]
# Create snapshot if necessary.
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
# resulting snapshot.cc file from obj/release and put it in the src
# directory. Then rebuild the VM with the cross compiler and specify
# snapshot=nobuild on the scons command line.
empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot'])
@ -315,7 +321,7 @@ def ConfigureObjectFiles():
if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
else:
snapshot_cc = Command('snapshot.cc', [], [])
snapshot_cc = 'snapshot.cc'
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj

171
deps/v8/src/api.cc

@ -36,6 +36,7 @@
#include "global-handles.h"
#include "messages.h"
#include "platform.h"
#include "profile-generator-inl.h"
#include "serialize.h"
#include "snapshot.h"
#include "top.h"
@ -43,6 +44,7 @@
#include "v8threads.h"
#include "version.h"
#include "../include/v8-profiler.h"
#define LOG_API(expr) LOG(ApiEntryCall(expr))
@ -2639,12 +2641,20 @@ int String::Utf8Length() const {
}
int String::WriteUtf8(char* buffer, int capacity, int *ncharsRef) const {
int String::WriteUtf8(char* buffer,
int capacity,
int* nchars_ref,
WriteHints hints) const {
if (IsDeadCheck("v8::String::WriteUtf8()")) return 0;
LOG_API("String::WriteUtf8");
ENTER_V8;
i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
}
write_input_buffer.Reset(0, *str);
int len = str->length();
// Encode the first K - 3 bytes directly into the buffer since we
@ -2679,23 +2689,28 @@ int String::WriteUtf8(char* buffer, int capacity, int *ncharsRef) const {
}
}
}
if (ncharsRef) *ncharsRef = nchars;
if (nchars_ref != NULL) *nchars_ref = nchars;
if (i == len && (capacity == -1 || pos < capacity))
buffer[pos++] = '\0';
return pos;
}
int String::WriteAscii(char* buffer, int start, int length) const {
int String::WriteAscii(char* buffer,
int start,
int length,
WriteHints hints) const {
if (IsDeadCheck("v8::String::WriteAscii()")) return 0;
LOG_API("String::WriteAscii");
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
}
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@ -2713,13 +2728,21 @@ int String::WriteAscii(char* buffer, int start, int length) const {
}
int String::Write(uint16_t* buffer, int start, int length) const {
int String::Write(uint16_t* buffer,
int start,
int length,
WriteHints hints) const {
if (IsDeadCheck("v8::String::Write()")) return 0;
LOG_API("String::Write");
ENTER_V8;
ASSERT(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(this);
StringTracker::RecordWrite(str);
if (hints & HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
// using StringInputBuffer or Get(i) to access the characters.
str->TryFlatten();
}
int end = length;
if ( (length == -1) || (length > str->length() - start) )
end = str->length() - start;
@ -2731,13 +2754,6 @@ int String::Write(uint16_t* buffer, int start, int length) const {
}
void v8::String::Flatten() {
EnsureInitialized("v8::String::Flatten()");
i::Handle<i::String> str = Utils::OpenHandle(this);
i::FlattenString(str);
}
bool v8::String::IsExternal() const {
EnsureInitialized("v8::String::IsExternal()");
i::Handle<i::String> str = Utils::OpenHandle(this);
@ -2866,6 +2882,7 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
void v8::Object::SetPointerInInternalField(int index, void* value) {
ENTER_V8;
i::Object* as_object = reinterpret_cast<i::Object*>(value);
if (as_object->IsSmi()) {
Utils::OpenHandle(this)->SetInternalField(index, as_object);
@ -3430,6 +3447,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
}
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE();
ENTER_V8;
i::Handle<i::JSObject> result = i::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Object>());
@ -4008,6 +4026,131 @@ Local<Context> Debug::GetDebugContext() {
#endif // ENABLE_DEBUGGER_SUPPORT
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
Handle<String> CpuProfileNode::GetFunctionName() const {
IsDeadCheck("v8::CpuProfileNode::GetFunctionName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
return Handle<String>(ToApi<String>(
i::Factory::LookupAsciiSymbol(entry->name())));
} else {
return Handle<String>(ToApi<String>(i::Factory::NewConsString(
i::Factory::LookupAsciiSymbol(entry->name_prefix()),
i::Factory::LookupAsciiSymbol(entry->name()))));
}
}
Handle<String> CpuProfileNode::GetScriptResourceName() const {
IsDeadCheck("v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
node->entry()->resource_name())));
}
int CpuProfileNode::GetLineNumber() const {
IsDeadCheck("v8::CpuProfileNode::GetLineNumber");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number();
}
double CpuProfileNode::GetTotalSamplesCount() const {
IsDeadCheck("v8::CpuProfileNode::GetTotalSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks();
}
double CpuProfileNode::GetSelfSamplesCount() const {
IsDeadCheck("v8::CpuProfileNode::GetSelfSamplesCount");
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
unsigned CpuProfileNode::GetCallUid() const {
IsDeadCheck("v8::CpuProfileNode::GetCallUid");
return reinterpret_cast<const i::ProfileNode*>(this)->entry()->call_uid();
}
int CpuProfileNode::GetChildrenCount() const {
IsDeadCheck("v8::CpuProfileNode::GetChildrenCount");
return reinterpret_cast<const i::ProfileNode*>(this)->children()->length();
}
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
IsDeadCheck("v8::CpuProfileNode::GetChild");
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
return reinterpret_cast<const CpuProfileNode*>(child);
}
unsigned CpuProfile::GetUid() const {
IsDeadCheck("v8::CpuProfile::GetUid");
return reinterpret_cast<const i::CpuProfile*>(this)->uid();
}
Handle<String> CpuProfile::GetTitle() const {
IsDeadCheck("v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
profile->title())));
}
const CpuProfileNode* CpuProfile::GetBottomUpRoot() const {
IsDeadCheck("v8::CpuProfile::GetBottomUpRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root());
}
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
IsDeadCheck("v8::CpuProfile::GetTopDownRoot");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
int CpuProfiler::GetProfilesCount() {
IsDeadCheck("v8::CpuProfiler::GetProfilesCount");
return i::CpuProfiler::GetProfilesCount();
}
const CpuProfile* CpuProfiler::GetProfile(int index) {
IsDeadCheck("v8::CpuProfiler::GetProfile");
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
}
const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
IsDeadCheck("v8::CpuProfiler::FindProfile");
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
}
void CpuProfiler::StartProfiling(Handle<String> title) {
IsDeadCheck("v8::CpuProfiler::StartProfiling");
i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title));
}
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
IsDeadCheck("v8::CpuProfiler::StopProfiling");
return reinterpret_cast<const CpuProfile*>(
i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
}
#endif // ENABLE_CPP_PROFILES_PROCESSOR
namespace internal {

8
deps/v8/src/arm/codegen-arm-inl.h

@ -29,6 +29,8 @@
#ifndef V8_ARM_CODEGEN_ARM_INL_H_
#define V8_ARM_CODEGEN_ARM_INL_H_
#include "virtual-frame-arm.h"
namespace v8 {
namespace internal {
@ -43,6 +45,7 @@ void CodeGenerator::LoadConditionAndSpill(Expression* expression,
void CodeGenerator::LoadAndSpill(Expression* expression) {
ASSERT(VirtualFrame::SpilledScope::is_spilled());
Load(expression);
}
@ -57,11 +60,6 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
}
void Reference::GetValueAndSpill() {
GetValue();
}
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }

1667
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

98
deps/v8/src/arm/codegen-arm.h

@ -28,6 +28,8 @@
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
@ -90,10 +92,6 @@ class Reference BASE_EMBEDDED {
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Generate code to pop a reference, push the value of the reference,
// and then spill the stack frame.
inline void GetValueAndSpill();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The value is stored in the location specified
@ -312,6 +310,9 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue);
void VirtualFrameBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
@ -322,6 +323,11 @@ class CodeGenerator: public AstVisitor {
bool reversed,
OverwriteMode mode);
void VirtualFrameSmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments,
CallFunctionFlags flags,
int position);
@ -387,7 +393,7 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
@ -401,9 +407,14 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
@ -470,37 +481,68 @@ class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = CodeGenerator::kUnknownIntValue)
: op_(op),
mode_(mode),
lhs_(lhs),
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
static const int kKnownRhsKeyBits = 6;
// Minor key encoding in 16 bits.
// Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class KnownIntBits: public BitField<int, 8, 8> {};
class TypeInfoBits: public BitField<int, 8, 2> {};
class RegisterBits: public BitField<bool, 10, 1> {};
class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt());
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(r0));
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
@ -524,9 +566,45 @@ class GenericBinaryOpStub : public CodeStub {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
}
Register LhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r0 : r1;
}
Register RhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r1 : r0;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
const char* GetName();
#ifdef DEBUG

21
deps/v8/src/arm/constants-arm.h

@ -151,24 +151,19 @@ enum Opcode {
};
// Some special instructions encoded as a TEQ with S=0 (bit 20).
enum Opcode9Bits {
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
BX = 1,
BXJ = 2,
BLX = 3,
BKPT = 7
};
BKPT = 7,
// Some special instructions encoded as a CMN with S=0 (bit 20).
enum Opcode11Bits {
// With bits 22-21 11.
CLZ = 1
};
// S
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
no_shift = -1,
@ -310,6 +305,12 @@ class Instr {
// as well as multiplications).
inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
// Test for miscellaneous instructions encodings of type 0 instructions.
inline bool IsMiscType0() const { return (Bit(24) == 1)
&& (Bit(23) == 0)
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
// Special accessors that test for existence of a value.
inline bool HasS() const { return SField() == 1; }
inline bool HasB() const { return BField() == 1; }

15
deps/v8/src/arm/debug-arm.cc

@ -216,8 +216,23 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on arm");
}
#undef __
void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
UNREACHABLE();
}
const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

61
deps/v8/src/arm/disasm-arm.cc

@ -449,6 +449,14 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->ShiftAmountField());
return 8;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
(instr->Bits(19, 8) << 4) +
instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
ASSERT(STRING_STARTS_WITH(format, "off8"));
@ -650,6 +658,34 @@ void Decoder::DecodeType01(Instr* instr) {
}
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
switch (instr->Bits(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
case BLX:
Format(instr, "blx'cond 'rm");
break;
case BKPT:
Format(instr, "bkpt 'off0to3and8to19");
break;
default:
Unknown(instr); // not used by V8
break;
}
} else if (instr->Bits(22, 21) == 3) {
switch (instr->Bits(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
default:
Unknown(instr); // not used by V8
break;
}
} else {
Unknown(instr); // not used by V8
}
} else {
switch (instr->OpcodeField()) {
case AND: {
@ -696,17 +732,9 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "teq'cond 'rn, 'shift_op");
} else {
switch (instr->Bits(7, 4)) {
case BX:
Format(instr, "bx'cond 'rm");
break;
case BLX:
Format(instr, "blx'cond 'rm");
break;
default:
Unknown(instr); // not used by V8
break;
}
// Other instructions matching this pattern are handled in the
// miscellaneous instructions part above.
UNREACHABLE();
}
break;
}
@ -722,14 +750,9 @@ void Decoder::DecodeType01(Instr* instr) {
if (instr->HasS()) {
Format(instr, "cmn'cond 'rn, 'shift_op");
} else {
switch (instr->Bits(7, 4)) {
case CLZ:
Format(instr, "clz'cond 'rd, 'rm");
break;
default:
Unknown(instr); // not used by V8
break;
}
// Other instructions matching this pattern are handled in the
// miscellaneous instructions part above.
UNREACHABLE();
}
break;
}

4
deps/v8/src/arm/full-codegen-arm.cc

@ -1013,7 +1013,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
Expression::Context context) {
__ pop(r1);
GenericBinaryOpStub stub(op, NO_OVERWRITE);
GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
Apply(context, r0);
}
@ -1609,7 +1609,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(r1, Operand(expr->op() == Token::INC
? Smi::FromInt(1)
: Smi::FromInt(-1)));
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
__ bind(&done);

23
deps/v8/src/arm/ic-arm.cc

@ -706,6 +706,29 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- sp[4] : receiver
// -----------------------------------
Label miss, index_ok;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
// Check that the receiver isn't a smi.
__ BranchOnSmi(r1, &miss);
// Check that the receiver is a string.
Condition is_string = masm->IsObjectStringType(r1, r2);
__ b(NegateCondition(is_string), &miss);
// Check if key is a smi or a heap number.
__ BranchOnSmi(r0, &index_ok);
__ CheckMap(r0, r2, Factory::heap_number_map(), &miss, false);
__ bind(&index_ok);
// Duplicate receiver and key since they are expected on the stack after
// the KeyedLoadIC call.
__ stm(db_w, sp, r0.bit() | r1.bit());
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS);
__ bind(&miss);
GenerateGeneric(masm);
}

59
deps/v8/src/arm/macro-assembler-arm.cc

@ -180,6 +180,19 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
if (scratch.is(no_reg)) {
eor(reg1, reg1, Operand(reg2));
eor(reg2, reg2, Operand(reg1));
eor(reg1, reg1, Operand(reg2));
} else {
mov(scratch, reg1);
mov(reg1, reg2);
mov(reg2, scratch);
}
}
void MacroAssembler::Call(Label* target) {
bl(target);
}
@ -190,6 +203,13 @@ void MacroAssembler::Move(Register dst, Handle<Object> value) {
}
void MacroAssembler::Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
}
}
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
@ -1537,6 +1557,45 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
}
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frameAlignment = OS::ActivationFrameAlignment();
// Up to four simple arguments are passed in registers r0..r3.
int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
if (frameAlignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frameAlignment));
and_(sp, sp, Operand(-frameAlignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
mov(ip, Operand(function));
CallCFunction(ip, num_arguments);
}
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
Call(function);
int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
if (OS::ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),

25
deps/v8/src/arm/macro-assembler-arm.h

@ -70,8 +70,15 @@ class MacroAssembler: public Assembler {
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.
void Move(Register dst, Register src);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
@ -366,6 +373,24 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
// garbage collection, since that might move the code and invalidate the
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);

51
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -163,7 +163,7 @@ void RegExpMacroAssemblerARM::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(r0);
__ add(pc, r0, Operand(r5));
__ add(pc, r0, Operand(code_pointer()));
}
@ -338,7 +338,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
} else {
ASSERT(mode_ == UC16);
int argument_count = 3;
FrameAlign(argument_count, r2);
__ PrepareCallCFunction(argument_count, r2);
// r0 - offset of start of capture
// r1 - length of capture
@ -360,7 +360,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16();
CallCFunction(function, argument_count);
__ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0));
@ -770,12 +770,12 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
static const int num_arguments = 2;
FrameAlign(num_arguments, r0);
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
ExternalReference grow_stack =
ExternalReference::re_grow_stack();
CallCFunction(grow_stack, num_arguments);
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ cmp(r0, Operand(0));
@ -800,7 +800,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
LOG(RegExpCodeCreateEvent(*code, *source));
PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
@ -971,7 +971,7 @@ void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
static const int num_arguments = 3;
FrameAlign(num_arguments, scratch);
__ PrepareCallCFunction(num_arguments, scratch);
// RegExp code frame pointer.
__ mov(r2, frame_pointer());
// Code* of self.
@ -1183,47 +1183,12 @@ int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
}
void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) {
int frameAlignment = OS::ActivationFrameAlignment();
// Up to four simple arguments are passed in registers r0..r3.
int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
if (frameAlignment != 0) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
__ mov(scratch, sp);
__ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frameAlignment));
__ and_(sp, sp, Operand(-frameAlignment));
__ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
__ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
}
void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function,
int num_arguments) {
__ mov(r5, Operand(function));
// Just call directly. The function called cannot cause a GC, or
// allow preemption, so the return address in the link register
// stays correct.
__ Call(r5);
int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
if (OS::ActivationFrameAlignment() > kIntSize) {
__ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
__ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
}
__ mov(code_pointer(), Operand(masm_->CodeObject()));
}
void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
ExternalReference function,
int num_arguments) {
// Must pass all arguments in registers. The stub pushes on the stack.
ASSERT(num_arguments <= 4);
__ mov(r5, Operand(function));
__ mov(code_pointer(), Operand(function));
RegExpCEntryStub stub;
__ CallStub(&stub);
if (OS::ActivationFrameAlignment() != 0) {

16
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -206,22 +206,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// and increments it by a word size.
inline void Pop(Register target);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
inline void FrameAlign(int num_arguments, Register scratch);
// Calls a C function and cleans up the space for arguments allocated
// by FrameAlign. The called function is not allowed to trigger a garbage
// collection.
inline void CallCFunction(ExternalReference function,
int num_arguments);
// Calls a C function and cleans up the frame alignment done by
// by FrameAlign. The called function *is* allowed to trigger a garbage
// collection, but may not take more than four arguments (no arguments

3
deps/v8/src/arm/register-allocator-arm-inl.h

@ -92,9 +92,6 @@ Register RegisterAllocator::ToRegister(int num) {
void RegisterAllocator::Initialize() {
Reset();
// The non-reserved r1 and lr registers are live on JS function entry.
Use(r1); // JS function.
Use(lr); // Return address.
}

3
deps/v8/src/arm/register-allocator-arm.h

@ -33,7 +33,8 @@ namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
static const int kNumRegisters = 12;
// No registers are currently managed by the register allocator on ARM.
static const int kNumRegisters = 0;
static const int kInvalidRegister = -1;
};

143
deps/v8/src/arm/simulator-arm.cc

@ -150,7 +150,11 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
*value = GetRegisterValue(regnum);
return true;
} else {
return SScanF(desc, "%i", value) == 1;
if (strncmp(desc, "0x", 2) == 0) {
return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
} else {
return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
}
}
return false;
}
@ -231,6 +235,7 @@ void Debugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
char* argv[3] = { cmd, arg1, arg2 };
// make sure to have a proper terminating character if reaching the limit
cmd[COMMAND_SIZE] = 0;
@ -258,7 +263,7 @@ void Debugger::Debug() {
} else {
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
int args = SScanF(line,
int argc = SScanF(line,
"%" XSTR(COMMAND_SIZE) "s "
"%" XSTR(ARG_SIZE) "s "
"%" XSTR(ARG_SIZE) "s",
@ -271,7 +276,7 @@ void Debugger::Debug() {
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (args == 2) {
if (argc == 2) {
int32_t value;
float svalue;
double dvalue;
@ -296,7 +301,7 @@ void Debugger::Debug() {
}
} else if ((strcmp(cmd, "po") == 0)
|| (strcmp(cmd, "printobject") == 0)) {
if (args == 2) {
if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
@ -313,6 +318,37 @@ void Debugger::Debug() {
} else {
PrintF("printobject <value>\n");
}
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
int32_t* cur = NULL;
int32_t* end = NULL;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
} else { // "mem"
int32_t value;
if (!GetValue(arg1, &value)) {
PrintF("%s unrecognized\n", arg1);
continue;
}
cur = reinterpret_cast<int32_t*>(value);
next_arg++;
}
int32_t words;
if (argc == next_arg) {
words = 10;
} else if (argc == next_arg + 1) {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
}
end = cur + words;
while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d\n", cur, *cur, *cur);
cur++;
}
} else if (strcmp(cmd, "disasm") == 0) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@ -322,10 +358,10 @@ void Debugger::Debug() {
byte* cur = NULL;
byte* end = NULL;
if (args == 1) {
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instr::kInstrSize);
} else if (args == 2) {
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
@ -351,7 +387,7 @@ void Debugger::Debug() {
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
if (args == 2) {
if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
@ -401,6 +437,10 @@ void Debugger::Debug() {
PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("stack [<words>]\n");
PrintF(" dump stack content, default dump 10 words)\n");
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [[<address>] <instructions>]\n");
PrintF(" disassemble code, default is 10 instructions from pc\n");
@ -414,7 +454,7 @@ void Debugger::Debug() {
PrintF(" ignore the stop instruction at the current location");
PrintF(" from now on\n");
PrintF("trace (alias 't')\n");
PrintF(" toogle the tracing of all executed statements");
PrintF(" toogle the tracing of all executed statements\n");
} else {
PrintF("Unknown command: %s\n", cmd);
}
@ -1465,6 +1505,50 @@ void Simulator::DecodeType01(Instr* instr) {
}
return;
}
} else if ((type == 0) && instr->IsMiscType0()) {
if (instr->Bits(22, 21) == 1) {
int rm = instr->RmField();
switch (instr->Bits(7, 4)) {
case BX:
set_pc(get_register(rm));
break;
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
set_register(lr, old_pc + Instr::kInstrSize);
break;
}
case BKPT:
v8::internal::OS::DebugBreak();
break;
default:
UNIMPLEMENTED();
}
} else if (instr->Bits(22, 21) == 3) {
int rm = instr->RmField();
int rd = instr->RdField();
switch (instr->Bits(7, 4)) {
case CLZ: {
uint32_t bits = get_register(rm);
int leading_zeros = 0;
if (bits == 0) {
leading_zeros = 32;
} else {
while ((bits & 0x80000000u) == 0) {
bits <<= 1;
leading_zeros++;
}
}
set_register(rd, leading_zeros);
break;
}
default:
UNIMPLEMENTED();
}
} else {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
} else {
int rd = instr->RdField();
int rn = instr->RnField();
@ -1582,21 +1666,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
ASSERT(type == 0);
int rm = instr->RmField();
switch (instr->Bits(7, 4)) {
case BX:
set_pc(get_register(rm));
break;
case BLX: {
uint32_t old_pc = get_pc();
set_pc(get_register(rm));
set_register(lr, old_pc + Instr::kInstrSize);
break;
}
default:
UNIMPLEMENTED();
}
// Other instructions matching this pattern are handled in the
// miscellaneous instructions part above.
UNREACHABLE();
}
break;
}
@ -1624,27 +1696,9 @@ void Simulator::DecodeType01(Instr* instr) {
SetCFlag(!CarryFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
ASSERT(type == 0);
int rm = instr->RmField();
int rd = instr->RdField();
switch (instr->Bits(7, 4)) {
case CLZ: {
uint32_t bits = get_register(rm);
int leading_zeros = 0;
if (bits == 0) {
leading_zeros = 32;
} else {
while ((bits & 0x80000000u) == 0) {
bits <<= 1;
leading_zeros++;
}
}
set_register(rd, leading_zeros);
break;
}
default:
UNIMPLEMENTED();
}
// Other instructions matching this pattern are handled in the
// miscellaneous instructions part above.
UNREACHABLE();
}
break;
}
@ -1798,6 +1852,7 @@ void Simulator::DecodeType3(Instr* instr) {
break;
}
case 3: {
// UBFX.
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());

350
deps/v8/src/arm/virtual-frame-arm.cc

@ -37,34 +37,126 @@ namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::SyncElementBelowStackPointer(int index) {
UNREACHABLE();
void VirtualFrame::PopToR1R0() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is in r0 and r1.
where_to_go.top_of_stack_state_ = R0_R1_TOS;
MergeTo(&where_to_go);
// Pop the two registers off the stack so they are detached from the frame.
element_count_ -= 2;
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SyncElementByPushing(int index) {
UNREACHABLE();
void VirtualFrame::PopToR1() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is only in r1.
where_to_go.top_of_stack_state_ = R1_TOS;
MergeTo(&where_to_go);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
// ARM frames are currently always in memory.
ASSERT(Equals(expected));
}
void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
UNREACHABLE();
}
void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
UNREACHABLE();
void VirtualFrame::PopToR0() {
VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack only in r0.
where_to_go.top_of_stack_state_ = R0_TOS;
MergeTo(&where_to_go);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
UNREACHABLE();
void VirtualFrame::MergeTo(VirtualFrame* expected) {
if (Equals(expected)) return;
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
__ pop(r0);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
__ pop(r1);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
__ pop(r0);
__ pop(r1);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1);
__ pop(r1);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
__ mov(r1, r0);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
__ pop(r1);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
__ mov(r1, r0);
__ pop(r0);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
__ push(r1);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
__ mov(r0, r1);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
__ mov(r0, r1);
__ pop(r1);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
__ pop(r0);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ push(r1);
__ push(r0);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
__ push(r1);
__ mov(r1, r0);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
__ Swap(r0, r1, ip);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ push(r0);
__ push(r1);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0);
__ mov(r0, r1);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
__ push(r0);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
__ Swap(r0, r1, ip);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
default:
UNREACHABLE();
#undef CASE_NUMBER
}
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
@ -92,8 +184,6 @@ void VirtualFrame::Enter() {
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
cgen()->allocator()->Unuse(r1);
cgen()->allocator()->Unuse(lr);
}
@ -152,37 +242,11 @@ void VirtualFrame::AllocateStackSlots() {
void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED();
}
void VirtualFrame::RestoreContextRegister() {
UNIMPLEMENTED();
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED();
}
int VirtualFrame::InvalidateFrameSlotAt(int index) {
UNIMPLEMENTED();
return kIllegalIndex;
}
void VirtualFrame::TakeFrameSlotAt(int index) {
UNIMPLEMENTED();
}
void VirtualFrame::StoreToFrameSlotAt(int index) {
UNIMPLEMENTED();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
// Grow the expression stack by handler size less one (the return
// address in lr is already counted by a call instruction).
@ -191,6 +255,20 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
}
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
EmitPop(r1);
// +1 for receiver.
Forget(arg_count + 1);
ASSERT(cgen()->HasValidEntryRegisters());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
// Restore the context.
__ ldr(cp, Context());
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
@ -247,52 +325,192 @@ void VirtualFrame::CallCodeObject(Handle<Code> code,
}
// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
const bool VirtualFrame::kR0InUse[TOS_STATES] =
{ false, true, false, true, true };
const bool VirtualFrame::kR1InUse[TOS_STATES] =
{ false, false, true, true, true };
const int VirtualFrame::kVirtualElements[TOS_STATES] =
{ 0, 1, 1, 2, 2 };
const Register VirtualFrame::kTopRegister[TOS_STATES] =
{ r0, r0, r1, r1, r0 };
const Register VirtualFrame::kBottomRegister[TOS_STATES] =
{ r0, r0, r1, r0, r1 };
const Register VirtualFrame::kAllocatedRegisters[
VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
// Popping is done by the transition implied by kStateAfterPop. Of course if
// there were no stack slots allocated to registers then the physical SP must
// be adjusted.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
{ NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
// Pushing is done by the transition implied by kStateAfterPush. Of course if
// the maximum number of registers was already allocated to the top of stack
// slots then one register must be physically pushed onto the stack.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
{ R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
bool VirtualFrame::SpilledScope::is_spilled_ = false;
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
int num_virtual_elements = (element_count() - 1) - stack_pointer_;
// Emit code to lower the stack pointer if necessary.
if (num_virtual_elements < count) {
int num_dropped = count - num_virtual_elements;
stack_pointer_ -= num_dropped;
__ add(sp, sp, Operand(num_dropped * kPointerSize));
}
// Discard elements from the virtual frame and free any registers.
int num_virtual_elements = kVirtualElements[top_of_stack_state_];
while (num_virtual_elements > 0) {
Pop();
num_virtual_elements--;
count--;
if (count == 0) return;
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
element_count_ -= count;
}
Result VirtualFrame::Pop() {
UNIMPLEMENTED();
return Result();
void VirtualFrame::Pop() {
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ add(sp, sp, Operand(kPointerSize));
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
element_count_--;
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_--;
ASSERT(!is_used(reg));
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ pop(reg);
} else {
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
element_count_--;
}
Register VirtualFrame::Peek() {
AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register answer = kTopRegister[top_of_stack_state_];
__ pop(answer);
return answer;
} else {
return kTopRegister[top_of_stack_state_];
}
}
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
AssertIsNotSpilled();
element_count_--;
__ pop(reg);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
return r1;
} else {
__ pop(r0);
return r0;
}
} else {
Register answer = kTopRegister[top_of_stack_state_];
ASSERT(!answer.is(but_not_to_this_one));
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
return answer;
}
}
void VirtualFrame::EnsureOneFreeTOSRegister() {
if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
__ push(kBottomRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
}
void VirtualFrame::EmitPush(Register reg) {
ASSERT(stack_pointer_ == element_count() - 1);
element_count_++;
stack_pointer_++;
__ push(reg);
if (SpilledScope::is_spilled()) {
__ push(reg);
return;
}
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (reg.is(r0)) {
top_of_stack_state_ = R0_TOS;
return;
}
if (reg.is(r1)) {
top_of_stack_state_ = R1_TOS;
return;
}
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register dest = kTopRegister[top_of_stack_state_];
__ Move(dest, reg);
}
Register VirtualFrame::GetTOSRegister() {
if (SpilledScope::is_spilled()) return r0;
EnsureOneFreeTOSRegister();
return kTopRegister[kStateAfterPush[top_of_stack_state_]];
}
void VirtualFrame::EmitPush(MemOperand operand) {
element_count_++;
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ ldr(kTopRegister[top_of_stack_state_], operand);
}
void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
ASSERT(stack_pointer_ == element_count() - 1);
ASSERT(SpilledScope::is_spilled());
Adjust(count);
__ stm(db_w, sp, src_regs);
}
void VirtualFrame::SpillAll() {
switch (top_of_stack_state_) {
case R1_R0_TOS:
masm()->push(r0);
// Fall through.
case R1_TOS:
masm()->push(r1);
top_of_stack_state_ = NO_TOS_REGISTERS;
break;
case R0_R1_TOS:
masm()->push(r1);
// Fall through.
case R0_TOS:
masm()->push(r0);
top_of_stack_state_ = NO_TOS_REGISTERS;
// Fall through.
case NO_TOS_REGISTERS:
break;
}
ASSERT(register_allocation_map_ == 0); // Not yet implemented.
}
#undef __
} } // namespace v8::internal

351
deps/v8/src/arm/virtual-frame-arm.h

@ -45,14 +45,69 @@ namespace internal {
class VirtualFrame : public ZoneObject {
public:
class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, but no attempt is made to require it
// to stay spilled. It is intended as documentation while the code
// generator is being transformed.
// generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
SpilledScope() {}
explicit SpilledScope(VirtualFrame* frame)
: old_is_spilled_(is_spilled_) {
if (frame != NULL) {
if (!is_spilled_) {
frame->SpillAll();
} else {
frame->AssertIsSpilled();
}
}
is_spilled_ = true;
}
~SpilledScope() {
is_spilled_ = old_is_spilled_;
}
static bool is_spilled() { return is_spilled_; }
private:
static bool is_spilled_;
int old_is_spilled_;
SpilledScope() { }
friend class RegisterAllocationScope;
};
class RegisterAllocationScope BASE_EMBEDDED {
public:
// A utility class to introduce a scope where the virtual frame
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
explicit RegisterAllocationScope(CodeGenerator* cgen)
: cgen_(cgen),
old_is_spilled_(SpilledScope::is_spilled_) {
SpilledScope::is_spilled_ = false;
if (old_is_spilled_) {
VirtualFrame* frame = cgen->frame();
if (frame != NULL) {
frame->AssertIsSpilled();
}
}
}
~RegisterAllocationScope() {
SpilledScope::is_spilled_ = old_is_spilled_;
if (old_is_spilled_) {
VirtualFrame* frame = cgen_->frame();
if (frame != NULL) {
frame->SpillAll();
}
}
}
private:
CodeGenerator* cgen_;
bool old_is_spilled_;
RegisterAllocationScope() { }
};
// An illegal index into the virtual frame.
@ -75,27 +130,38 @@ class VirtualFrame : public ZoneObject {
return element_count() - expression_base_index();
}
int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num];
}
int register_location(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)];
}
void set_register_location(Register reg, int index) {
register_locations_[RegisterAllocator::ToNumber(reg)] = index;
}
bool is_used(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
return register_locations_[num] != kIllegalIndex;
switch (num) {
case 0: { // r0.
return kR0InUse[top_of_stack_state_];
}
case 1: { // r1.
return kR1InUse[top_of_stack_state_];
}
case 2:
case 3:
case 4:
case 5:
case 6: { // r2 to r6.
ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
ASSERT(num >= kFirstAllocatedRegister);
if ((register_allocation_map_ &
(1 << (num - kFirstAllocatedRegister))) == 0) {
return false;
} else {
return true;
}
}
default: {
ASSERT(num < kFirstAllocatedRegister ||
num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
return false;
}
}
}
bool is_used(Register reg) {
return register_locations_[RegisterAllocator::ToNumber(reg)]
!= kIllegalIndex;
return is_used(RegisterAllocator::ToNumber(reg));
}
// Add extra in-memory elements to the top of the frame to match an actual
@ -104,39 +170,35 @@ class VirtualFrame : public ZoneObject {
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted.
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
void Forget(int count) {
ASSERT(count >= 0);
ASSERT(stack_pointer_ == element_count() - 1);
stack_pointer_ -= count;
// On ARM, all elements are in memory, so there is no extra bookkeeping
// (registers, copies, etc.) beyond dropping the elements.
SpillAll();
element_count_ -= count;
}
// Forget count elements from the top of the frame and adjust the stack
// pointer downward. This is used, for example, before merging frames at
// break, continue, and return targets.
void ForgetElements(int count);
// Spill all values from the frame to memory.
inline void SpillAll();
void SpillAll();
void AssertIsSpilled() {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
void AssertIsNotSpilled() {
ASSERT(!SpilledScope::is_spilled());
}
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
if (is_used(reg)) SpillElementAt(register_location(reg));
UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
// (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
void PrepareMergeTo(VirtualFrame* expected);
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
@ -147,10 +209,7 @@ class VirtualFrame : public ZoneObject {
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
AssertIsSpilled();
}
// (Re)attach a frame to its code generator. This informs the register
@ -158,10 +217,7 @@ class VirtualFrame : public ZoneObject {
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
RegisterAllocator* cgen_allocator = cgen()->allocator();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
if (is_used(i)) cgen_allocator->Unuse(i);
}
AssertIsSpilled();
}
// Emit code for the physical JS entry and exit frame sequences. After
@ -184,23 +240,17 @@ class VirtualFrame : public ZoneObject {
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() { return MemOperand(sp, 0); }
MemOperand Top() {
AssertIsSpilled();
return MemOperand(sp, 0);
}
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
AssertIsSpilled();
return MemOperand(sp, index * kPointerSize);
}
// Random-access store to a frame-top relative frame element. The result
// becomes owned by the frame and is invalidated.
void SetElementAt(int index, Result* value);
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
Result temp(value);
SetElementAt(index, &temp);
}
// A frame-allocated local as an assembly operand.
MemOperand LocalAt(int index) {
ASSERT(0 <= index);
@ -208,13 +258,6 @@ class VirtualFrame : public ZoneObject {
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
}
// Push the value of a local frame slot on top of the frame and invalidate
// the local slot. The slot should be written to before trying to read
// from it again.
void TakeLocalAt(int index) {
TakeFrameSlotAt(local0_index() + index);
}
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
@ -224,13 +267,6 @@ class VirtualFrame : public ZoneObject {
// The context frame slot.
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// Save the value of the esi register to the context frame slot.
void SaveContextRegister();
// Restore the esi register from the value of the context frame
// slot.
void RestoreContextRegister();
// A parameter as an assembly operand.
MemOperand ParameterAt(int index) {
// Index -1 corresponds to the receiver.
@ -239,19 +275,6 @@ class VirtualFrame : public ZoneObject {
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
// Push the value of a paramter frame slot on top of the frame and
// invalidate the parameter slot. The slot should be written to before
// trying to read from it again.
void TakeParameterAt(int index) {
TakeFrameSlotAt(param0_index() + index);
}
// Store the top value on the virtual frame into a parameter frame slot.
// The value is left in place on top of the frame.
void StoreToParameterAt(int index) {
StoreToFrameSlotAt(param0_index() + index);
}
// The receiver frame slot.
MemOperand Receiver() { return ParameterAt(-1); }
@ -261,11 +284,15 @@ class VirtualFrame : public ZoneObject {
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
void CallStub(CodeStub* stub, int arg_count) {
Forget(arg_count);
if (arg_count != 0) Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
masm()->CallStub(stub);
}
// Call JS function from top of the stack with arguments
// taken from the stack.
void CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
@ -296,34 +323,49 @@ class VirtualFrame : public ZoneObject {
// Drop one element.
void Drop() { Drop(1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
Result Pop();
// Pop an element from the top of the expression stack. Discards
// the result.
void Pop();
// Pop an element from the top of the expression stack. The register
// will be one normally used for the top of stack register allocation
// so you can't hold on to it if you push on the stack.
Register PopToRegister(Register but_not_to_this_one = no_reg);
// Look at the top of the stack. The register returned is aliased and
// must be copied to a scratch register before modification.
Register Peek();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Takes the top two elements and puts them in r0 (top element) and r1
// (second element).
void PopToR1R0();
// Takes the top element and puts it in r1.
void PopToR1();
// Takes the top element and puts it in r0.
void PopToR0();
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg);
void EmitPush(MemOperand operand);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
// Push multiple registers on the stack and the virtual frame
// Register are selected by setting bit in src_regs and
// are pushed in decreasing order: r15 .. r0.
void EmitPushMultiple(int count, int src_regs);
// Push an element on the virtual frame.
inline void Push(Handle<Object> value);
inline void Push(Smi* value);
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
// the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
inline void Nip(int num_dropped);
inline void SetTypeForLocalAt(int index, TypeInfo info);
inline void SetTypeForParamAt(int index, TypeInfo info);
static Register scratch0() { return r7; }
static Register scratch1() { return r9; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@ -333,16 +375,40 @@ class VirtualFrame : public ZoneObject {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
// 5 states for the top of stack, which can be in memory or in r0 and r1.
enum TopOfStack { NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS,
TOS_STATES};
static const int kMaxTOSRegisters = 2;
static const bool kR0InUse[TOS_STATES];
static const bool kR1InUse[TOS_STATES];
static const int kVirtualElements[TOS_STATES];
static const TopOfStack kStateAfterPop[TOS_STATES];
static const TopOfStack kStateAfterPush[TOS_STATES];
static const Register kTopRegister[TOS_STATES];
static const Register kBottomRegister[TOS_STATES];
// We allocate up to 5 locals in registers.
static const int kNumberOfAllocatedRegisters = 5;
// r2 to r6 are allocated to locals.
static const int kFirstAllocatedRegister = 2;
static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
static Register AllocatedRegister(int r) {
ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
return kAllocatedRegisters[r];
}
// The number of elements on the stack frame.
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
// The index of the element that is at the processor's stack pointer
// (the sp register).
int stack_pointer_;
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
int register_locations_[RegisterAllocator::kNumRegisters];
// (the sp register). For now since everything is in memory it is given
// by the number of elements on the not-very-virtual stack frame.
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); }
@ -380,80 +446,15 @@ class VirtualFrame : public ZoneObject {
return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
void Use(Register reg, int index) {
ASSERT(!is_used(reg));
set_register_location(reg, index);
cgen()->allocator()->Use(reg);
}
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
void Unuse(Register reg) {
ASSERT(is_used(reg));
set_register_location(reg, kIllegalIndex);
cgen()->allocator()->Unuse(reg);
}
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
// constant.
void SpillElementAt(int index);
// Sync the element at a particular index. If it is a register or
// constant that disagrees with the value on the stack, write it to memory.
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
// Sync a single unsynced element that lies just above the stack pointer.
void SyncElementByPushing(int index);
// Push a the value of a frame slot (typically a local or parameter) on
// top of the frame and invalidate the slot.
void TakeFrameSlotAt(int index);
// Store the value on top of the frame to a frame slot (typically a local
// or parameter).
void StoreToFrameSlotAt(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// Move frame elements currently in registers or constants, that
// should be in memory in the expected frame, to memory.
void MergeMoveRegistersToMemory(VirtualFrame* expected);
// Make the register-to-register moves necessary to
// merge this frame with the expected frame.
// Register to memory moves must already have been made,
// and memory to register moves must follow this call.
// This is because some new memory-to-register moves are
// created in order to break cycles of register moves.
// Used in the implementation of MergeTo().
void MergeMoveRegistersToRegisters(VirtualFrame* expected);
// Make the memory-to-register and constant-to-register moves
// needed to make this frame equal the expected frame.
// Called after all register-to-memory and register-to-register
// moves have been made. After this function returns, the frames
// should be equal.
void MergeMoveMemoryToRegisters(VirtualFrame* expected);
// Invalidates a frame slot (puts an invalid frame element in it).
// Copies on the frame are correctly handled, and if this slot was
// the backing store of copies, the index of the new backing store
// is returned. Otherwise, returns kIllegalIndex.
// Register counts are correctly updated.
int InvalidateFrameSlotAt(int index);
// If all top-of-stack registers are in use then the lowest one is pushed
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
inline bool Equals(VirtualFrame* other);

106
deps/v8/src/array.js

@ -644,16 +644,62 @@ function ArraySort(comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
var custom_compare = IS_FUNCTION(comparefn);
var global_receiver;
function InsertionSortWithFunc(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
if (order > 0) {
a[j + 1] = tmp;
} else {
break;
}
}
a[j + 1] = element;
}
}
function QuickSortWithFunc(a, from, to) {
// Insertion sort is faster for short arrays.
if (to - from <= 22) {
InsertionSortWithFunc(a, from, to);
return;
}
var pivot_index = $floor($random() * (to - from)) + from;
var pivot = a[pivot_index];
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
a[pivot_index] = a[from];
a[from] = pivot;
var low_end = from; // Upper bound of the elements lower than pivot.
var high_start = to; // Lower bound of the elements greater than pivot.
// From low_end to i are elements equal to pivot.
// From i to high_start are elements that haven't been compared yet.
for (var i = from + 1; i < high_start; ) {
var element = a[i];
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
i++;
low_end++;
} else if (order > 0) {
high_start--;
a[i] = a[high_start];
a[high_start] = element;
} else { // order == 0
i++;
}
}
QuickSortWithFunc(a, from, low_end);
QuickSortWithFunc(a, high_start, to);
}
function Compare(x,y) {
// Assume the comparefn, if any, is a consistent comparison function.
// If it isn't, we are allowed arbitrary behavior by ECMA 15.4.4.11.
if (x === y) return 0;
if (custom_compare) {
// Don't call directly to avoid exposing the builtin's global object.
return comparefn.call(null, x, y);
}
if (%_IsSmi(x) && %_IsSmi(y)) {
return %SmiLexicographicCompare(x, y);
}
@ -666,33 +712,17 @@ function ArraySort(comparefn) {
function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
// Pre-convert the element to a string for comparison if we know
// it will happen on each compare anyway.
var key =
(custom_compare || %_IsSmi(element)) ? element : ToString(element);
// place element in a[from..i[
// binary search
var min = from;
var max = i;
// The search interval is a[min..max[
while (min < max) {
var mid = min + ((max - min) >> 1);
var order = Compare(a[mid], key);
if (order == 0) {
min = max = mid;
break;
}
if (order < 0) {
min = mid + 1;
var key = %_IsSmi(element) ? element : ToString(element);
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
var order = Compare(tmp, key);
if (order > 0) {
a[j + 1] = tmp;
} else {
max = mid;
break;
}
}
// place element at position min==max.
for (var j = i; j > min; j--) {
a[j] = a[j - 1];
}
a[min] = element;
a[j + 1] = element;
}
}
@ -706,8 +736,7 @@ function ArraySort(comparefn) {
var pivot = a[pivot_index];
// Pre-convert the element to a string for comparison if we know
// it will happen on each compare anyway.
var pivot_key =
(custom_compare || %_IsSmi(pivot)) ? pivot : ToString(pivot);
var pivot_key = %_IsSmi(pivot) ? pivot : ToString(pivot);
// Issue 95: Keep the pivot element out of the comparisons to avoid
// infinite recursion if comparefn(pivot, pivot) != 0.
a[pivot_index] = a[from];
@ -736,8 +765,6 @@ function ArraySort(comparefn) {
QuickSort(a, high_start, to);
}
var length;
// Copies elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
@ -855,7 +882,7 @@ function ArraySort(comparefn) {
return first_undefined;
}
length = TO_UINT32(this.length);
var length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@ -880,7 +907,12 @@ function ArraySort(comparefn) {
num_non_undefined = SafeRemoveArrayHoles(this);
}
QuickSort(this, 0, num_non_undefined);
if(IS_FUNCTION(comparefn)) {
global_receiver = %GetGlobalReceiver();
QuickSortWithFunc(this, 0, num_non_undefined);
} else {
QuickSort(this, 0, num_non_undefined);
}
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
// For compatibility with JSC, we shadow any elements in the prototype

10
deps/v8/src/assembler.cc

@ -574,8 +574,14 @@ ExternalReference ExternalReference::perform_gc_function() {
}
ExternalReference ExternalReference::random_positive_smi_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
ExternalReference ExternalReference::fill_heap_number_with_random_function() {
return
ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
}
ExternalReference ExternalReference::random_uint32_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random)));
}

3
deps/v8/src/assembler.h

@ -398,7 +398,8 @@ class ExternalReference BASE_EMBEDDED {
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function();
static ExternalReference random_positive_smi_function();
static ExternalReference fill_heap_number_with_random_function();
static ExternalReference random_uint32_function();
static ExternalReference transcendental_cache_array_address();
// Static data in the keyed lookup cache.

134
deps/v8/src/ast.cc

@ -47,11 +47,8 @@ Call Call::sentinel_(NULL, NULL, 0);
// ----------------------------------------------------------------------------
// All the Accept member functions for each syntax tree node type.
#define DECL_ACCEPT(type) \
void type::Accept(AstVisitor* v) { \
if (v->CheckStackOverflow()) return; \
v->Visit##type(this); \
}
#define DECL_ACCEPT(type) \
void type::Accept(AstVisitor* v) { v->Visit##type(this); }
AST_NODE_LIST(DECL_ACCEPT)
#undef DECL_ACCEPT
@ -241,6 +238,13 @@ bool Expression::GuaranteedSmiResult() {
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
StackLimitCheck check;
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
@ -749,117 +753,6 @@ bool CompareOperation::IsCritical() {
}
static inline void MarkIfNotLive(Expression* expr, List<AstNode*>* stack) {
if (!expr->is_live()) {
expr->mark_as_live();
stack->Add(expr);
}
}
// Overloaded functions for marking children of live code as live.
void VariableProxy::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
// A reference to a stack-allocated variable depends on all the
// definitions reaching it.
BitVector* defs = reaching_definitions();
if (defs != NULL) {
ASSERT(var()->IsStackAllocated());
// The first variable_count definitions are the initial parameter and
// local declarations.
for (int i = variable_count; i < defs->length(); i++) {
if (defs->Contains(i)) {
MarkIfNotLive(body_definitions->at(i - variable_count), stack);
}
}
}
}
void Literal::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
// Leaf node, no children.
}
void Assignment::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
Property* prop = target()->AsProperty();
VariableProxy* proxy = target()->AsVariableProxy();
if (prop != NULL) {
if (!prop->key()->IsPropertyName()) MarkIfNotLive(prop->key(), stack);
MarkIfNotLive(prop->obj(), stack);
} else if (proxy == NULL) {
// Must be a reference error.
ASSERT(!target()->IsValidLeftHandSide());
MarkIfNotLive(target(), stack);
} else if (is_compound()) {
// A variable assignment so lhs is an operand to the operation.
MarkIfNotLive(target(), stack);
}
MarkIfNotLive(value(), stack);
}
void Property::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
if (!key()->IsPropertyName()) MarkIfNotLive(key(), stack);
MarkIfNotLive(obj(), stack);
}
void Call::ProcessNonLiveChildren(List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
ZoneList<Expression*>* args = arguments();
for (int i = args->length() - 1; i >= 0; i--) {
MarkIfNotLive(args->at(i), stack);
}
MarkIfNotLive(expression(), stack);
}
void UnaryOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(expression(), stack);
}
void CountOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(expression(), stack);
}
void BinaryOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(right(), stack);
MarkIfNotLive(left(), stack);
}
void CompareOperation::ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
MarkIfNotLive(right(), stack);
MarkIfNotLive(left(), stack);
}
// Implementation of a copy visitor. The visitor create a deep copy
// of ast nodes. Nodes that do not require a deep copy are copied
// with the default copy constructor.
@ -963,13 +856,11 @@ UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
: Expression(other), op_(other->op_), expression_(expression) {}
BinaryOperation::BinaryOperation(BinaryOperation* other,
BinaryOperation::BinaryOperation(Expression* other,
Token::Value op,
Expression* left,
Expression* right)
: Expression(other),
op_(other->op_),
left_(left),
right_(right) {}
: Expression(other), op_(op), left_(left), right_(right) {}
CountOperation::CountOperation(CountOperation* other, Expression* expression)
@ -1221,6 +1112,7 @@ void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
expr_ = new BinaryOperation(expr,
expr->op(),
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}

72
deps/v8/src/ast.h

@ -294,19 +294,6 @@ class Expression: public AstNode {
bitfields_ |= NumBitOpsField::encode(num_bit_ops);
}
// Functions used for dead-code elimination. Predicate is true if the
// expression is not dead code.
int is_live() const { return LiveField::decode(bitfields_); }
void mark_as_live() { bitfields_ |= LiveField::encode(true); }
// Mark non-live children as live and push them on a stack for further
// processing.
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
}
private:
static const int kMaxNumBitOps = (1 << 5) - 1;
@ -319,7 +306,6 @@ class Expression: public AstNode {
class ToInt32Field : public BitField<bool, 2, 1> {};
class NumBitOpsField : public BitField<int, 3, 5> {};
class LoopConditionField: public BitField<bool, 8, 1> {};
class LiveField: public BitField<bool, 9, 1> {};
};
@ -907,10 +893,6 @@ class Literal: public Expression {
virtual bool IsTrivial() { return true; }
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
@ -1118,10 +1100,6 @@ class VariableProxy: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
void SetIsPrimitive(bool value) { is_primitive_ = value; }
@ -1260,10 +1238,6 @@ class Property: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Expression* obj() const { return obj_; }
Expression* key() const { return key_; }
@ -1299,10 +1273,6 @@ class Call: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1382,10 +1352,6 @@ class UnaryOperation: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Token::Value op() const { return op_; }
Expression* expression() const { return expression_; }
@ -1403,7 +1369,13 @@ class BinaryOperation: public Expression {
ASSERT(Token::IsBinaryOp(op));
}
BinaryOperation(BinaryOperation* other, Expression* left, Expression* right);
// Construct a binary operation with a given operator and left and right
// subexpressions. The rest of the expression state is copied from
// another expression.
BinaryOperation(Expression* other,
Token::Value op,
Expression* left,
Expression* right);
virtual void Accept(AstVisitor* v);
@ -1412,10 +1384,6 @@ class BinaryOperation: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
@ -1473,10 +1441,6 @@ class CountOperation: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; }
@ -1510,10 +1474,6 @@ class CompareOperation: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Token::Value op() const { return op_; }
Expression* left() const { return left_; }
@ -1568,10 +1528,6 @@ class Assignment: public Expression {
virtual bool IsPrimitive();
virtual bool IsCritical();
virtual void ProcessNonLiveChildren(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
@ -2110,29 +2066,23 @@ class AstVisitor BASE_EMBEDDED {
AstVisitor() : stack_overflow_(false) { }
virtual ~AstVisitor() { }
// Dispatch
void Visit(AstNode* node) { node->Accept(this); }
// Stack overflow check and dynamic dispatch.
void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
// Iteration
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
// Stack overflow tracking support.
bool HasStackOverflow() const { return stack_overflow_; }
bool CheckStackOverflow() {
if (stack_overflow_) return true;
StackLimitCheck check;
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
bool CheckStackOverflow();
// If a stack-overflow exception is encountered when visiting a
// node, calling SetStackOverflow will make sure that the visitor
// bails out without visiting more nodes.
void SetStackOverflow() { stack_overflow_ = true; }
// Individual nodes
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;

118
deps/v8/src/bootstrapper.cc

@ -723,8 +723,68 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
Top::initial_object_prototype(), Builtins::Illegal,
true);
global_context()->set_regexp_function(*regexp_fun);
ASSERT(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
ASSERT_EQ(0, initial_map->inobject_properties());
Handle<DescriptorArray> descriptors = Factory::NewDescriptorArray(5);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
{
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(Heap::source_symbol(),
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
descriptors->Set(0, &field);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(Heap::global_symbol(),
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
descriptors->Set(1, &field);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(Heap::ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
descriptors->Set(2, &field);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(Heap::multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
descriptors->Set(3, &field);
}
{
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(Heap::last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
descriptors->Set(4, &field);
}
descriptors->SetNextEnumerationIndex(enum_index);
descriptors->Sort();
initial_map->set_inobject_properties(5);
initial_map->set_pre_allocated_property_fields(5);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(
initial_map->instance_size() + 5 * kPointerSize);
initial_map->set_instance_descriptors(*descriptors);
}
{ // -- J S O N
@ -1177,6 +1237,62 @@ bool Genesis::InstallNatives() {
apply->shared()->set_length(2);
}
// Create a constructor for RegExp results (a variant of Array that
// predefines the two properties index and match).
{
// RegExpResult initial map.
// Find global.Array.prototype to inherit from.
Handle<JSFunction> array_constructor(global_context()->array_function());
Handle<JSObject> array_prototype(
JSObject::cast(array_constructor->instance_prototype()));
// Add initial map.
Handle<Map> initial_map =
Factory::NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
initial_map->set_constructor(*array_constructor);
// Set prototype on map.
initial_map->set_non_instance_prototype(false);
initial_map->set_prototype(*array_prototype);
// Update map with length accessor from Array and add "index" and "input".
Handle<Map> array_map(global_context()->js_array_map());
Handle<DescriptorArray> array_descriptors(
array_map->instance_descriptors());
ASSERT_EQ(1, array_descriptors->number_of_descriptors());
Handle<DescriptorArray> reresult_descriptors =
Factory::NewDescriptorArray(3);
reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
int enum_index = 0;
{
FieldDescriptor index_field(Heap::index_symbol(),
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
reresult_descriptors->Set(1, &index_field);
}
{
FieldDescriptor input_field(Heap::input_symbol(),
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
reresult_descriptors->Set(2, &input_field);
}
reresult_descriptors->Sort();
initial_map->set_inobject_properties(2);
initial_map->set_pre_allocated_property_fields(2);
initial_map->set_unused_property_fields(0);
initial_map->set_instance_descriptors(*reresult_descriptors);
global_context()->set_regexp_result_map(*initial_map);
}
#ifdef DEBUG
builtins->Verify();
#endif

176
deps/v8/src/builtins.cc

@ -268,9 +268,10 @@ static void CopyElements(AssertNoAllocation* no_gc,
int src_index,
int len) {
ASSERT(dst != src); // Use MoveElements instead.
memcpy(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kPointerSize);
ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index,
len);
WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
if (mode == UPDATE_WRITE_BARRIER) {
Heap::RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
@ -299,6 +300,73 @@ static void FillWithHoles(FixedArray* dst, int from, int to) {
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
former_start[1] = Heap::fixed_array_map();
former_start[2] = reinterpret_cast<Object*>(len - 1);
ASSERT_EQ(elms->address() + kPointerSize, (elms + kPointerSize)->address());
return elms + kPointerSize;
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
if (to_trim == 1) {
former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
} else if (to_trim == 2) {
former_start[0] = Heap::raw_unchecked_two_pointer_filler_map();
} else {
former_start[0] = Heap::raw_unchecked_byte_array_map();
ByteArray* as_byte_array = reinterpret_cast<ByteArray*>(elms);
as_byte_array->set_length(ByteArray::LengthFor(to_trim * kPointerSize));
}
former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
(elms + to_trim * kPointerSize)->address());
return elms + to_trim * kPointerSize;
}
static bool ArrayPrototypeHasNoElements() {
// This method depends on non writability of Object and Array prototype
// fields.
@ -390,7 +458,9 @@ BUILTIN(ArrayPush) {
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyElements(&no_gc, new_elms, 0, elms, 0, len);
if (len > 0) {
CopyElements(&no_gc, new_elms, 0, elms, 0, len);
}
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@ -443,38 +513,6 @@ BUILTIN(ArrayPop) {
}
static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
// For now this trick is only applied to fixed arrays in new space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
// remembered sets.
ASSERT(Heap::new_space()->Contains(elms));
Object** former_map =
HeapObject::RawField(elms, FixedArray::kMapOffset);
Object** former_length =
HeapObject::RawField(elms, FixedArray::kLengthOffset);
Object** former_first =
HeapObject::RawField(elms, FixedArray::kHeaderSize);
// Check that we don't forget to copy all the bits.
STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize
== FixedArray::kHeaderSize);
int len = elms->length();
*former_first = reinterpret_cast<Object*>(len - 1);
*former_length = Heap::fixed_array_map();
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
*former_map = Heap::raw_unchecked_one_pointer_filler_map();
ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address());
return elms + kPointerSize;
}
BUILTIN(ArrayShift) {
Object* receiver = *args.receiver();
FixedArray* elms = NULL;
@ -537,7 +575,9 @@ BUILTIN(ArrayUnshift) {
FixedArray* new_elms = FixedArray::cast(obj);
AssertNoAllocation no_gc;
CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
if (len > 0) {
CopyElements(&no_gc, new_elms, to_add, elms, 0, len);
}
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@ -713,12 +753,27 @@ BUILTIN(ArraySplice) {
if (item_count < actual_delete_count) {
// Shrink the array.
AssertNoAllocation no_gc;
MoveElements(&no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(elms, new_length, len);
const bool trim_array = Heap::new_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
if (actual_start > 0) {
Object** start = elms->data_start();
memmove(start + delta, start, actual_start * kPointerSize);
}
elms = LeftTrimFixedArray(elms, delta);
array->set_elements(elms, SKIP_WRITE_BARRIER);
} else {
AssertNoAllocation no_gc;
MoveElements(&no_gc,
elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
FillWithHoles(elms, new_length, len);
}
} else if (item_count > actual_delete_count) {
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
@ -734,11 +789,16 @@ BUILTIN(ArraySplice) {
AssertNoAllocation no_gc;
// Copy the part before actual_start as is.
CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
CopyElements(&no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
(len - actual_delete_count - actual_start));
if (actual_start > 0) {
CopyElements(&no_gc, new_elms, 0, elms, 0, actual_start);
}
const int to_copy = len - actual_delete_count - actual_start;
if (to_copy > 0) {
CopyElements(&no_gc,
new_elms, actual_start + item_count,
elms, actual_start + actual_delete_count,
to_copy);
}
FillWithHoles(new_elms, new_length, capacity);
elms = new_elms;
@ -812,10 +872,12 @@ BUILTIN(ArrayConcat) {
int start_pos = 0;
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
FixedArray* elms = FixedArray::cast(array->elements());
int len = Smi::cast(array->length())->value();
CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
start_pos += len;
if (len > 0) {
FixedArray* elms = FixedArray::cast(array->elements());
CopyElements(&no_gc, result_elms, start_pos, elms, 0, len);
start_pos += len;
}
}
ASSERT(start_pos == result_len);
@ -1330,6 +1392,14 @@ static void Generate_Return_DebugBreak(MacroAssembler* masm) {
static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Debug::GenerateStubNoRegistersDebugBreak(masm);
}
static void Generate_PlainReturn_LiveEdit(MacroAssembler* masm) {
Debug::GeneratePlainReturnLiveEdit(masm);
}
static void Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
Debug::GenerateFrameDropperLiveEdit(masm);
}
#endif
Object* Builtins::builtins_[builtin_count] = { NULL, };
@ -1431,8 +1501,8 @@ void Builtins::Setup(bool create_heap_objects) {
}
}
// Log the event and add the code to the builtins array.
LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name));
PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), functions[i].s_name));
builtins_[i] = code;
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_builtin_code) {

4
deps/v8/src/builtins.h

@ -126,7 +126,9 @@ enum BuiltinExtraArguments {
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK)
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif

3
deps/v8/src/circular-queue-inl.h

@ -38,7 +38,8 @@ template<typename Record>
CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
: buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
enqueue_semaphore_(OS::CreateSemaphore((buffer_end_ - buffer_) - 1)),
enqueue_semaphore_(
OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
enqueue_pos_(buffer_),
dequeue_pos_(buffer_) {
// To be able to distinguish between a full and an empty queue

6
deps/v8/src/circular-queue.cc

@ -58,8 +58,10 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
// updates of positions by different processor cores.
const int positions_size =
RoundUp(1, kProcessorCacheLineSize) +
RoundUp(sizeof(ProducerPosition), kProcessorCacheLineSize) +
RoundUp(sizeof(ConsumerPosition), kProcessorCacheLineSize);
RoundUp(static_cast<int>(sizeof(ProducerPosition)),
kProcessorCacheLineSize) +
RoundUp(static_cast<int>(sizeof(ConsumerPosition)),
kProcessorCacheLineSize);
positions_ = NewArray<byte>(positions_size);
producer_pos_ = reinterpret_cast<ProducerPosition*>(

2
deps/v8/src/circular-queue.h

@ -119,6 +119,8 @@ class SamplingCircularQueue {
byte* positions_;
ProducerPosition* producer_pos_;
ConsumerPosition* consumer_pos_;
DISALLOW_COPY_AND_ASSIGN(SamplingCircularQueue);
};

2
deps/v8/src/code-stubs.cc

@ -64,7 +64,7 @@ void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
OPROFILE(CreateNativeCodeRegion(GetName(),
code->instruction_start(),
code->instruction_size()));
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER

3
deps/v8/src/codegen.cc

@ -235,7 +235,7 @@ Handle<Code> CodeGenerator::MakeCode(CompilationInfo* info) {
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
if (!Logger::is_logging()) return false;
if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
@ -454,7 +454,6 @@ const char* GenericUnaryOpStub::GetName() {
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
case READ_LENGTH: GenerateReadLength(masm); break;
case READ_ELEMENT: GenerateReadElement(masm); break;
case NEW_OBJECT: GenerateNewObject(masm); break;
}

13
deps/v8/src/codegen.h

@ -104,6 +104,7 @@ namespace internal {
F(IsNonNegativeSmi, 1, 1) \
F(IsArray, 1, 1) \
F(IsRegExp, 1, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
F(IsConstructCall, 0, 1) \
F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1) \
@ -114,7 +115,7 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomPositiveSmi, 0, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
F(IsFunction, 1, 1) \
F(IsUndetectableObject, 1, 1) \
@ -122,6 +123,7 @@ namespace internal {
F(SubString, 3, 1) \
F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \
F(RegExpConstructResult, 3, 1) \
F(NumberToString, 1, 1) \
F(MathPow, 2, 1) \
F(MathSin, 1, 1) \
@ -229,7 +231,12 @@ class DeferredCode: public ZoneObject {
Label entry_label_;
Label exit_label_;
int registers_[RegisterAllocator::kNumRegisters];
// C++ doesn't allow zero length arrays, so we make the array length 1 even
// if we don't need it.
static const int kRegistersArrayLength =
(RegisterAllocator::kNumRegisters == 0) ?
1 : RegisterAllocator::kNumRegisters;
int registers_[kRegistersArrayLength];
#ifdef DEBUG
const char* comment_;
@ -498,7 +505,6 @@ class JSConstructEntryStub : public JSEntryStub {
class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_LENGTH,
READ_ELEMENT,
NEW_OBJECT
};
@ -512,7 +518,6 @@ class ArgumentsAccessStub: public CodeStub {
int MinorKey() { return type_; }
void Generate(MacroAssembler* masm);
void GenerateReadLength(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewObject(MacroAssembler* masm);

80
deps/v8/src/compiler.cc

@ -90,33 +90,13 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
}
if (FLAG_use_flow_graph) {
int variable_count =
function->num_parameters() + function->scope()->num_stack_slots();
FlowGraphBuilder builder(variable_count);
builder.Build(function);
if (!builder.HasStackOverflow()) {
if (variable_count > 0) {
ReachingDefinitions rd(builder.postorder(),
builder.body_definitions(),
variable_count);
rd.Compute();
TypeAnalyzer ta(builder.postorder(),
builder.body_definitions(),
variable_count,
function->num_parameters());
ta.Compute();
MarkLiveCode(builder.preorder(),
builder.body_definitions(),
variable_count);
}
}
FlowGraphBuilder builder;
FlowGraph* graph = builder.Build(function);
USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(function, builder.postorder());
graph->PrintAsText(function->name());
}
#endif
}
@ -237,14 +217,18 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global,
}
if (script->name()->IsString()) {
LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
*code, String::cast(script->name())));
PROFILE(CodeCreateEvent(
is_eval ? Logger::EVAL_TAG :
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*code, String::cast(script->name())));
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
code->instruction_start(),
code->instruction_size()));
} else {
LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
*code, ""));
PROFILE(CodeCreateEvent(
is_eval ? Logger::EVAL_TAG :
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*code, ""));
OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
code->instruction_start(),
code->instruction_size()));
@ -499,33 +483,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
}
if (FLAG_use_flow_graph) {
int variable_count =
literal->num_parameters() + literal->scope()->num_stack_slots();
FlowGraphBuilder builder(variable_count);
builder.Build(literal);
if (!builder.HasStackOverflow()) {
if (variable_count > 0) {
ReachingDefinitions rd(builder.postorder(),
builder.body_definitions(),
variable_count);
rd.Compute();
TypeAnalyzer ta(builder.postorder(),
builder.body_definitions(),
variable_count,
literal->num_parameters());
ta.Compute();
MarkLiveCode(builder.preorder(),
builder.body_definitions(),
variable_count);
}
}
FlowGraphBuilder builder;
FlowGraph* graph = builder.Build(literal);
USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
builder.graph()->PrintText(literal, builder.postorder());
graph->PrintAsText(literal->name());
}
#endif
}
@ -625,20 +589,24 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available
// include script name and line number. Check explicitly whether
// logging is enabled as finding the line number is not free.
if (Logger::is_logging() || OProfileAgent::is_enabled()) {
if (Logger::is_logging()
|| OProfileAgent::is_enabled()
|| CpuProfiler::is_profiling()) {
Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
USE(line_num);
LOG(CodeCreateEvent(tag, *code, *func_name,
String::cast(script->name()), line_num));
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code, *func_name,
String::cast(script->name()), line_num));
OPROFILE(CreateNativeCodeRegion(*func_name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size()));
} else {
LOG(CodeCreateEvent(tag, *code, *func_name));
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code, *func_name));
OPROFILE(CreateNativeCodeRegion(*func_name,
code->instruction_start(),
code->instruction_size()));

5
deps/v8/src/compiler.h

@ -138,10 +138,7 @@ class CompilationInfo BASE_EMBEDDED {
// There should always be a function literal, but it may be set after
// construction (for lazy compilation).
FunctionLiteral* function() { return function_; }
void set_function(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
}
void set_function(FunctionLiteral* literal) { function_ = literal; }
// Simple accessors.
bool is_eval() { return is_eval_; }

2
deps/v8/src/contexts.h

@ -76,6 +76,7 @@ enum ContextLookupFlags {
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
@ -175,6 +176,7 @@ class Context: public FixedArray {
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
FUNCTION_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,

532
deps/v8/src/conversions.cc

@ -48,51 +48,6 @@ int HexValue(uc32 c) {
return -1;
}
// Provide a common interface to getting a character at a certain
// index from a char* or a String object.
static inline int GetChar(const char* str, int index) {
ASSERT(index >= 0 && index < StrLength(str));
return str[index];
}
static inline int GetChar(String* str, int index) {
return str->Get(index);
}
static inline int GetLength(const char* str) {
return StrLength(str);
}
static inline int GetLength(String* str) {
return str->length();
}
static inline const char* GetCString(const char* str, int index) {
return str + index;
}
static inline const char* GetCString(String* str, int index) {
int length = str->length();
char* result = NewArray<char>(length + 1);
for (int i = index; i < length; i++) {
uc16 c = str->Get(i);
if (c <= 127) {
result[i - index] = static_cast<char>(c);
} else {
result[i - index] = 127; // Force number parsing to fail.
}
}
result[length - index] = '\0';
return result;
}
namespace {
// C++-style iterator adaptor for StringInputBuffer
@ -134,15 +89,6 @@ void StringInputBufferIterator::operator++() {
}
static inline void ReleaseCString(const char* original, const char* str) {
}
static inline void ReleaseCString(String* original, const char* str) {
DeleteArray(const_cast<char *>(str));
}
template <class Iterator, class EndMark>
static bool SubStringEquals(Iterator* current,
EndMark end,
@ -168,179 +114,309 @@ extern "C" double gay_strtod(const char* s00, const char** se);
// we don't need to preserve all the digits.
const int kMaxSignificantDigits = 772;
// Parse an int from a string starting a given index and in a given
// radix. The string can be either a char* or a String*.
template <class S>
static int InternalStringToInt(S* s, int i, int radix, double* value) {
int len = GetLength(s);
// Setup limits for computing the value.
ASSERT(2 <= radix && radix <= 36);
int lim_0 = '0' + (radix < 10 ? radix : 10);
int lim_a = 'a' + (radix - 10);
int lim_A = 'A' + (radix - 10);
static const double JUNK_STRING_VALUE = OS::nan_value();
// NOTE: The code for computing the value may seem a bit complex at
// first glance. It is structured to use 32-bit multiply-and-add
// loops as long as possible to avoid loosing precision.
double v = 0.0;
int j;
for (j = i; j < len;) {
// Parse the longest part of the string starting at index j
// possible while keeping the multiplier, and thus the part
// itself, within 32 bits.
uint32_t part = 0, multiplier = 1;
int k;
for (k = j; k < len; k++) {
int c = GetChar(s, k);
if (c >= '0' && c < lim_0) {
c = c - '0';
} else if (c >= 'a' && c < lim_a) {
c = c - 'a' + 10;
} else if (c >= 'A' && c < lim_A) {
c = c - 'A' + 10;
} else {
break;
}
// Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark>
static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
while (*current != end) {
if (!Scanner::kIsWhiteSpace.get(**current)) return true;
++*current;
}
return false;
}
// Update the value of the part as long as the multiplier fits
// in 32 bits. When we can't guarantee that the next iteration
// will not overflow the multiplier, we stop parsing the part
// by leaving the loop.
static const uint32_t kMaximumMultiplier = 0xffffffffU / 36;
uint32_t m = multiplier * radix;
if (m > kMaximumMultiplier) break;
part = part * radix + c;
multiplier = m;
ASSERT(multiplier > part);
}
// Compute the number of part digits. If no digits were parsed;
// we're done parsing the entire string.
int digits = k - j;
if (digits == 0) break;
static bool isDigit(int x, int radix) {
return (x >= '0' && x <= '9' && x < '0' + radix)
|| (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
|| (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
}
// Update the value and skip the part in the string.
ASSERT(multiplier ==
pow(static_cast<double>(radix), static_cast<double>(digits)));
v = v * multiplier + part;
j = k;
}
// If the resulting value is larger than 2^53 the value does not fit
// in the mantissa of the double and there is a loss of precision.
// When the value is larger than 2^53 the rounding depends on the
// code generation. If the code generator spills the double value
// it uses 64 bits and if it does not it uses 80 bits.
//
// If there is a potential for overflow we resort to strtod for
// radix 10 numbers to get higher precision. For numbers in another
// radix we live with the loss of precision.
static const double kPreciseConversionLimit = 9007199254740992.0;
if (radix == 10 && v > kPreciseConversionLimit) {
const char* cstr = GetCString(s, i);
const char* end;
v = gay_strtod(cstr, &end);
ReleaseCString(s, cstr);
}
*value = v;
return j;
static double SignedZero(bool sign) {
return sign ? -0.0 : 0.0;
}
int StringToInt(String* str, int index, int radix, double* value) {
return InternalStringToInt(str, index, radix, value);
}
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(Iterator current,
EndMark end,
bool sign,
bool allow_trailing_junk) {
ASSERT(current != end);
// Skip leading 0s.
while (*current == '0') {
++current;
if (current == end) return SignedZero(sign);
}
int StringToInt(const char* str, int index, int radix, double* value) {
return InternalStringToInt(const_cast<char*>(str), index, radix, value);
}
int64_t number = 0;
int exponent = 0;
const int radix = (1 << radix_log_2);
do {
int digit;
if (*current >= '0' && *current <= '9' && *current < '0' + radix) {
digit = static_cast<char>(*current) - '0';
} else if (radix > 10 && *current >= 'a' && *current < 'a' + radix - 10) {
digit = static_cast<char>(*current) - 'a' + 10;
} else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
break;
} else {
return JUNK_STRING_VALUE;
}
}
static const double JUNK_STRING_VALUE = OS::nan_value();
number = number * radix + digit;
int overflow = static_cast<int>(number >> 53);
if (overflow != 0) {
// Overflow occurred. Need to determine which direction to round the
// result.
int overflow_bits_count = 1;
while (overflow > 1) {
overflow_bits_count++;
overflow >>= 1;
}
int dropped_bits_mask = ((1 << overflow_bits_count) - 1);
int dropped_bits = static_cast<int>(number) & dropped_bits_mask;
number >>= overflow_bits_count;
exponent = overflow_bits_count;
// Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark>
static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
while (*current != end) {
if (!Scanner::kIsWhiteSpace.get(**current)) return true;
++*current;
bool zero_tail = true;
while (true) {
++current;
if (current == end || !isDigit(*current, radix)) break;
zero_tail = zero_tail && *current == '0';
exponent += radix_log_2;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JUNK_STRING_VALUE;
}
int middle_value = (1 << (overflow_bits_count - 1));
if (dropped_bits > middle_value) {
number++; // Rounding up.
} else if (dropped_bits == middle_value) {
// Rounding to even to consistency with decimals: half-way case rounds
// up if significant part is odd and down otherwise.
if ((number & 1) != 0 || !zero_tail) {
number++; // Rounding up.
}
}
// Rounding up may cause overflow.
if ((number & ((int64_t)1 << 53)) != 0) {
exponent++;
number >>= 1;
}
break;
}
++current;
} while (current != end);
ASSERT(number < ((int64_t)1 << 53));
ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
if (exponent == 0) {
if (sign) {
if (number == 0) return -0.0;
number = -number;
}
return static_cast<double>(number);
}
return false;
ASSERT(number != 0);
// The double could be constructed faster from number (mantissa), exponent
// and sign. Assuming it's a rare case more simple code is used.
return static_cast<double>(sign ? -number : number) * pow(2.0, exponent);
}
template <class Iterator, class EndMark>
static double InternalHexadecimalStringToDouble(Iterator current,
EndMark end,
char* buffer,
bool allow_trailing_junk) {
ASSERT(current != end);
static double InternalStringToInt(Iterator current, EndMark end, int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE;
const int max_hex_significant_digits = 52 / 4 + 2;
// We reuse the buffer of InternalStringToDouble. Since hexadecimal
// numbers may have much less digits than decimal the buffer won't overflow.
ASSERT(max_hex_significant_digits < kMaxSignificantDigits);
if (!AdvanceToNonspace(&current, end)) return empty_string_val;
int significant_digits = 0;
int insignificant_digits = 0;
bool sign = false;
bool leading_zero = false;
// A double has a 53bit significand (once the hidden bit has been added).
// Halfway cases thus have at most 54bits. Therefore 54/4 + 1 digits are
// sufficient to represent halfway cases. By adding another digit we can keep
// track of dropped digits.
int buffer_pos = 0;
bool nonzero_digit_dropped = false;
// Skip leading 0s.
while (*current == '0') {
leading_zero = true;
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
if (current == end) return 0;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
} else if (*current == '-') {
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
sign = true;
}
int begin_pos = buffer_pos;
while ((*current >= '0' && *current <= '9')
|| (*current >= 'a' && *current <= 'f')
|| (*current >= 'A' && *current <= 'F')) {
if (significant_digits <= max_hex_significant_digits) {
buffer[buffer_pos++] = static_cast<char>(*current);
significant_digits++;
if (radix == 0) {
// Radix detection.
if (*current == '0') {
++current;
if (current == end) return SignedZero(sign);
if (*current == 'x' || *current == 'X') {
radix = 16;
++current;
if (current == end) return JUNK_STRING_VALUE;
} else {
radix = 8;
leading_zero = true;
}
} else {
insignificant_digits++;
nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
radix = 10;
}
} else if (radix == 16) {
if (*current == '0') {
// Allow "0x" prefix.
++current;
if (current == end) return SignedZero(sign);
if (*current == 'x' || *current == 'X') {
++current;
if (current == end) return JUNK_STRING_VALUE;
} else {
leading_zero = true;
}
}
}
if (radix < 2 || radix > 36) return JUNK_STRING_VALUE;
// Skip leading zeros.
while (*current == '0') {
leading_zero = true;
++current;
if (current == end) break;
if (current == end) return SignedZero(sign);
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!leading_zero && !isDigit(*current, radix)) {
return JUNK_STRING_VALUE;
}
if (significant_digits == 0) {
return leading_zero ? 0 : JUNK_STRING_VALUE;
if (IsPowerOf2(radix)) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
current, end, sign, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
current, end, sign, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
current, end, sign, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
current, end, sign, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
current, end, sign, allow_trailing_junk);
default:
UNREACHABLE();
}
}
if (nonzero_digit_dropped) {
ASSERT(insignificant_digits > 0);
insignificant_digits--;
buffer[buffer_pos++] = '1';
if (radix == 10) {
// Parsing with strtod.
const int kMaxSignificantDigits = 309; // Doubles are less than 1.8e308.
// The buffer may contain up to kMaxSignificantDigits + 1 digits and a zero
// end.
const int kBufferSize = kMaxSignificantDigits + 2;
char buffer[kBufferSize];
int buffer_pos = 0;
while (*current >= '0' && *current <= '9') {
if (buffer_pos <= kMaxSignificantDigits) {
// If the number has more than kMaxSignificantDigits it will be parsed
// as infinity.
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = static_cast<char>(*current);
}
++current;
if (current == end) break;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JUNK_STRING_VALUE;
}
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = '\0';
return sign ? -gay_strtod(buffer, NULL) : gay_strtod(buffer, NULL);
}
buffer[buffer_pos] = '\0';
// The following code causes accumulating rounding error for numbers greater
// than ~2^56. It's explicitly allowed in the spec: "if R is not 2, 4, 8, 10,
// 16, or 32, then mathInt may be an implementation-dependent approximation to
// the mathematical integer value" (15.1.2.2).
int lim_0 = '0' + (radix < 10 ? radix : 10);
int lim_a = 'a' + (radix - 10);
int lim_A = 'A' + (radix - 10);
// NOTE: The code for computing the value may seem a bit complex at
// first glance. It is structured to use 32-bit multiply-and-add
// loops as long as possible to avoid loosing precision.
double v = 0.0;
bool done = false;
do {
// Parse the longest part of the string starting at index j
// possible while keeping the multiplier, and thus the part
// itself, within 32 bits.
unsigned int part = 0, multiplier = 1;
while (true) {
int d;
if (*current >= '0' && *current < lim_0) {
d = *current - '0';
} else if (*current >= 'a' && *current < lim_a) {
d = *current - 'a' + 10;
} else if (*current >= 'A' && *current < lim_A) {
d = *current - 'A' + 10;
} else {
done = true;
break;
}
// Update the value of the part as long as the multiplier fits
// in 32 bits. When we can't guarantee that the next iteration
// will not overflow the multiplier, we stop parsing the part
// by leaving the loop.
const unsigned int kMaximumMultiplier = 0xffffffffU / 36;
uint32_t m = multiplier * radix;
if (m > kMaximumMultiplier) break;
part = part * radix + d;
multiplier = m;
ASSERT(multiplier > part);
++current;
if (current == end) {
done = true;
break;
}
}
double result;
StringToInt(buffer, begin_pos, 16, &result);
if (insignificant_digits > 0) {
// Multiplying by a power of 2 doesn't cause a loss of precision.
result *= pow(16.0, insignificant_digits);
// Update the value and skip the part in the string.
v = v * multiplier + part;
} while (!done);
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
return JUNK_STRING_VALUE;
}
return result;
return sign ? -v : v;
}
@ -377,8 +453,9 @@ static double InternalStringToDouble(Iterator current,
int significant_digits = 0;
int insignificant_digits = 0;
bool nonzero_digit_dropped = false;
bool fractional_part = false;
double signed_zero = 0.0;
bool sign = false;
if (*current == '+') {
// Ignore leading sign; skip following spaces.
@ -388,7 +465,7 @@ static double InternalStringToDouble(Iterator current,
buffer[buffer_pos++] = '-';
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
signed_zero = -0.0;
sign = true;
}
static const char kInfinitySymbol[] = "Infinity";
@ -408,26 +485,28 @@ static double InternalStringToDouble(Iterator current,
bool leading_zero = false;
if (*current == '0') {
++current;
if (current == end) return signed_zero;
if (current == end) return SignedZero(sign);
leading_zero = true;
// It could be hexadecimal value.
if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
++current;
if (current == end) return JUNK_STRING_VALUE; // "0x".
if (current == end || !isDigit(*current, 16)) {
return JUNK_STRING_VALUE; // "0x".
}
double result = InternalHexadecimalStringToDouble(current,
end,
buffer + buffer_pos,
allow_trailing_junk);
return (buffer_pos > 0 && buffer[0] == '-') ? -result : result;
bool sign = (buffer_pos > 0 && buffer[0] == '-');
return InternalStringToIntDouble<4>(current,
end,
sign,
allow_trailing_junk);
}
// Ignore leading zeros in the integer part.
while (*current == '0') {
++current;
if (current == end) return signed_zero;
if (current == end) return SignedZero(sign);
}
}
@ -454,8 +533,6 @@ static double InternalStringToDouble(Iterator current,
}
if (*current == '.') {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = '.';
++current;
if (current == end) {
if (significant_digits == 0 && !leading_zero) {
@ -471,11 +548,15 @@ static double InternalStringToDouble(Iterator current,
// leading zeros (if any).
while (*current == '0') {
++current;
if (current == end) return signed_zero;
if (current == end) return SignedZero(sign);
exponent--; // Move this 0 into the exponent.
}
}
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = '.';
fractional_part = true;
// There is the fractional part.
while (*current >= '0' && *current <= '9') {
if (significant_digits < kMaxSignificantDigits) {
@ -557,22 +638,13 @@ static double InternalStringToDouble(Iterator current,
exponent += insignificant_digits;
if (octal) {
buffer[buffer_pos] = '\0';
// ALLOW_OCTALS is set and there is no '8' or '9' in insignificant
// digits. Check significant digits now.
char sign = '+';
const char* s = buffer;
if (*s == '-' || *s == '+') sign = *s++;
bool sign = buffer[0] == '-';
int start_pos = (sign ? 1 : 0);
double result;
s += StringToInt(s, 0, 8, &result);
if (!allow_trailing_junk && *s != '\0') return JUNK_STRING_VALUE;
if (sign == '-') result = -result;
if (insignificant_digits > 0) {
result *= pow(8.0, insignificant_digits);
}
return result;
return InternalStringToIntDouble<3>(buffer + start_pos,
buffer + buffer_pos,
sign,
allow_trailing_junk);
}
if (nonzero_digit_dropped) {
@ -580,6 +652,11 @@ static double InternalStringToDouble(Iterator current,
buffer[buffer_pos++] = '1';
}
// If the number has no more than kMaxDigitsInInt digits and doesn't have
// fractional part it could be parsed faster (without checks for
// spaces, overflow, etc.).
const int kMaxDigitsInInt = 9 * sizeof(int) / 4; // NOLINT
if (exponent != 0) {
ASSERT(buffer_pos < kBufferSize);
buffer[buffer_pos++] = 'e';
@ -597,6 +674,16 @@ static double InternalStringToDouble(Iterator current,
}
ASSERT(exponent == 0);
buffer_pos += exp_digits;
} else if (!fractional_part && significant_digits <= kMaxDigitsInInt) {
if (significant_digits == 0) return SignedZero(sign);
ASSERT(buffer_pos > 0);
int num = 0;
int start_pos = (buffer[0] == '-' ? 1 : 0);
for (int i = start_pos; i < buffer_pos; i++) {
ASSERT(buffer[i] >= '0' && buffer[i] <= '9');
num = 10 * num + (buffer[i] - '0');
}
return static_cast<double>(start_pos == 0 ? num : -num);
}
ASSERT(buffer_pos < kBufferSize);
@ -625,6 +712,25 @@ double StringToDouble(String* str, int flags, double empty_string_val) {
}
double StringToInt(String* str, int radix) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToInt(begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToInt(begin, end, radix);
} else {
StringInputBuffer buffer(str);
return InternalStringToInt(StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
radix);
}
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);

3
deps/v8/src/conversions.h

@ -100,8 +100,7 @@ double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToDouble(String* str, int flags, double empty_string_val = 0);
// Converts a string into an integer.
int StringToInt(String* str, int index, int radix, double* value);
int StringToInt(const char* str, int index, int radix, double* value);
double StringToInt(String* str, int radix);
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.

54
deps/v8/src/cpu-profiler-inl.h

@ -28,23 +28,71 @@
#ifndef V8_CPU_PROFILER_INL_H_
#define V8_CPU_PROFILER_INL_H_
#include "cpu-profiler.h"
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "cpu-profiler.h"
namespace v8 {
namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
}
void CodeMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->DeleteCode(start);
}
void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddAlias(alias, start);
}
TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
TickSampleEventRecord* result =
reinterpret_cast<TickSampleEventRecord*>(value);
result->filler = 1;
ASSERT(result->filler != SamplingCircularQueue::kClear);
// Init the required fields only.
result->sample.pc = NULL;
result->sample.frames_count = 0;
return result;
}
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
TickSampleEventRecord* evt =
reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.Enqueue());
TickSampleEventRecord::init(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
return &evt->sample;
}
bool ProfilerEventsProcessor::FilterOutCodeCreateEvent(
Logger::LogEventsAndTags tag) {
// In browser mode, leave only callbacks and non-native JS entries.
// We filter out regular expressions as currently we can't tell
// whether they origin from native scripts, so let's not confise people by
// showing them weird regexes they didn't wrote.
return FLAG_prof_browser_mode
&& (tag != Logger::CALLBACK_TAG
&& tag != Logger::FUNCTION_TAG
&& tag != Logger::LAZY_COMPILE_TAG
&& tag != Logger::SCRIPT_TAG);
}
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // V8_CPU_PROFILER_INL_H_

297
deps/v8/src/cpu-profiler.cc

@ -29,10 +29,15 @@
#include "cpu-profiler-inl.h"
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "log-inl.h"
#include "../include/v8-profiler.h"
namespace v8 {
namespace internal {
static const int kEventsBufferSize = 256*KB;
static const int kTickSamplesBufferChunkSize = 64*KB;
static const int kTickSamplesBufferChunksCount = 16;
@ -48,12 +53,29 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
enqueue_order_(0) { }
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix,
String* name,
Address start) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1;
events_buffer_.Enqueue(evt_rec);
}
void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name,
int line_number,
Address start,
unsigned size) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
@ -69,6 +91,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start,
unsigned size) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
@ -84,6 +107,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int args_count,
Address start,
unsigned size) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
@ -138,6 +162,24 @@ void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
}
void ProfilerEventsProcessor::RegExpCodeCreateEvent(
Logger::LogEventsAndTags tag,
const char* prefix,
String* name,
Address start,
unsigned size) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->type = CodeEventRecord::CODE_CREATION;
rec->order = ++enqueue_order_;
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = size;
events_buffer_.Enqueue(evt_rec);
}
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
@ -163,7 +205,7 @@ bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
const TickSampleEventRecord* rec =
reinterpret_cast<TickSampleEventRecord*>(ticks_buffer_.StartDequeue());
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return false;
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
@ -196,4 +238,255 @@ void ProfilerEventsProcessor::Run() {
}
CpuProfiler* CpuProfiler::singleton_ = NULL;
void CpuProfiler::StartProfiling(const char* title) {
ASSERT(singleton_ != NULL);
singleton_->StartCollectingProfile(title);
}
void CpuProfiler::StartProfiling(String* title) {
ASSERT(singleton_ != NULL);
singleton_->StartCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
ASSERT(singleton_ != NULL);
return singleton_->StopCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(String* title) {
ASSERT(singleton_ != NULL);
return singleton_->StopCollectingProfile(title);
}
int CpuProfiler::GetProfilesCount() {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->profiles()->length();
}
CpuProfile* CpuProfiler::GetProfile(int index) {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->profiles()->at(index);
}
CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
ASSERT(singleton_ != NULL);
return singleton_->profiles_->GetProfile(uid);
}
TickSample* CpuProfiler::TickSampleEvent() {
if (CpuProfiler::is_profiling()) {
return singleton_->processor_->TickSampleEvent();
} else {
return NULL;
}
}
void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) {
singleton_->processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
singleton_->processor_->CodeCreateEvent(
tag,
name,
Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name,
String* source, int line) {
singleton_->processor_->CodeCreateEvent(
tag,
name,
source,
line,
code->address(),
code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) {
singleton_->processor_->CodeCreateEvent(
tag,
args_count,
code->address(),
code->ExecutableSize());
}
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
singleton_->processor_->CodeMoveEvent(from, to);
}
void CpuProfiler::CodeDeleteEvent(Address from) {
singleton_->processor_->CodeDeleteEvent(from);
}
void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
singleton_->processor_->FunctionCreateEvent(
function->address(), function->code()->address());
}
void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
singleton_->processor_->FunctionMoveEvent(from, to);
}
void CpuProfiler::FunctionDeleteEvent(Address from) {
singleton_->processor_->FunctionDeleteEvent(from);
}
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
singleton_->processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG,
"RegExp: ",
source,
code->address(),
code->ExecutableSize());
}
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point);
}
CpuProfiler::CpuProfiler()
: profiles_(new CpuProfilesCollection()),
next_profile_uid_(1),
generator_(NULL),
processor_(NULL) {
}
CpuProfiler::~CpuProfiler() {
delete profiles_;
}
void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
}
void CpuProfiler::StartCollectingProfile(String* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
}
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
// Disable logging when using the new implementation.
saved_logging_nesting_ = Logger::logging_nesting_;
Logger::logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
Logger::LogCompiledFunctions();
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.
reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
StopProcessorIfLastProfile();
CpuProfile* result = profiles_->StopProfiling(title);
if (result != NULL) {
result->Print();
}
return result;
}
CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
StopProcessorIfLastProfile();
return profiles_->StopProfiling(title);
}
void CpuProfiler::StopProcessorIfLastProfile() {
if (profiles_->is_last_profile()) {
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
Logger::logging_nesting_ = saved_logging_nesting_;
}
}
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
namespace v8 {
namespace internal {
void CpuProfiler::Setup() {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
if (singleton_ == NULL) {
singleton_ = new CpuProfiler();
}
#endif
}
void CpuProfiler::TearDown() {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
if (singleton_ != NULL) {
delete singleton_;
}
singleton_ = NULL;
#endif
}
} } // namespace v8::internal

152
deps/v8/src/cpu-profiler.h

@ -28,12 +28,20 @@
#ifndef V8_CPU_PROFILER_H_
#define V8_CPU_PROFILER_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "circular-queue.h"
#include "profile-generator.h"
namespace v8 {
namespace internal {
// Forward declarations.
class CodeEntry;
class CodeMap;
class CpuProfile;
class CpuProfilesCollection;
class ProfileGenerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
@ -63,9 +71,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
CodeEntry* entry;
unsigned size;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->AddCode(start, entry, size);
}
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -74,9 +80,7 @@ class CodeMoveEventRecord : public CodeEventRecord {
Address from;
Address to;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->MoveCode(from, to);
}
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -84,9 +88,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
public:
Address start;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->DeleteCode(start);
}
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -95,33 +97,29 @@ class CodeAliasEventRecord : public CodeEventRecord {
Address alias;
Address start;
INLINE(void UpdateCodeMap(CodeMap* code_map)) {
code_map->AddAlias(alias, start);
}
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
class TickSampleEventRecord {
class TickSampleEventRecord BASE_EMBEDDED {
public:
// In memory, the first machine word of a TickSampleEventRecord will be the
// first entry of TickSample, that is -- a program counter field.
// TickSample is put first, because 'order' can become equal to
// SamplingCircularQueue::kClear, while program counter can't.
TickSample sample;
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
// can overflow, TickSample can have all fields reset), we are
// forced to use an artificial filler field.
int filler;
unsigned order;
TickSample sample;
#if defined(__GNUC__) && (__GNUC__ < 4)
// Added to avoid 'all member functions in class are private' warning.
INLINE(unsigned get_order() const) { return order; }
// Added to avoid 'class only defines private constructors and
// has no friends' warning.
friend class TickSampleEventRecordFriend;
#endif
private:
// Disable instantiation.
TickSampleEventRecord();
static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value);
}
DISALLOW_COPY_AND_ASSIGN(TickSampleEventRecord);
INLINE(static TickSampleEventRecord* init(void* value));
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
@ -138,6 +136,9 @@ class ProfilerEventsProcessor : public Thread {
INLINE(bool running()) { return running_; }
// Events adding methods. Called by VM threads.
void CallbackCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number,
@ -153,6 +154,9 @@ class ProfilerEventsProcessor : public Thread {
void FunctionCreateEvent(Address alias, Address start);
void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@ -172,6 +176,8 @@ class ProfilerEventsProcessor : public Thread {
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
bool running_;
CircularQueue<CodeEventsContainer> events_buffer_;
@ -179,7 +185,93 @@ class ProfilerEventsProcessor : public Thread {
unsigned enqueue_order_;
};
} } // namespace v8::internal
#define PROFILE(Call) \
LOG(Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling()) { \
v8::internal::CpuProfiler::Call; \
} \
} while (false)
#else
#define PROFILE(Call) LOG(Call)
#endif // ENABLE_CPP_PROFILES_PROCESSOR
namespace v8 {
namespace internal {
class CpuProfiler {
public:
static void Setup();
static void TearDown();
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
static void StartProfiling(const char* title);
static void StartProfiling(String* title);
static CpuProfile* StopProfiling(const char* title);
static CpuProfile* StopProfiling(String* title);
static int GetProfilesCount();
static CpuProfile* GetProfile(int index);
static CpuProfile* FindProfile(unsigned uid);
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent();
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
static void CallbackEvent(String* name, Address entry_point);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function);
static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
static void SetterCallbackEvent(String* name, Address entry_point);
static INLINE(bool is_profiling()) {
return singleton_ != NULL && singleton_->processor_ != NULL;
}
private:
CpuProfiler();
~CpuProfiler();
void StartCollectingProfile(const char* title);
void StartCollectingProfile(String* title);
void StartProcessorIfNotStarted();
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(String* title);
void StopProcessorIfLastProfile();
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
static CpuProfiler* singleton_;
#else
static INLINE(bool is_profiling()) { return false; }
#endif // ENABLE_CPP_PROFILES_PROCESSOR
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
};
} } // namespace v8::internal
#endif // V8_CPU_PROFILER_H_

18
deps/v8/src/d8-posix.cc

@ -663,10 +663,28 @@ Handle<Value> Shell::SetEnvironment(const Arguments& args) {
}
Handle<Value> Shell::UnsetEnvironment(const Arguments& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
return ThrowException(String::New(message));
}
String::Utf8Value var(args[0]);
if (*var == NULL) {
const char* message =
"os.setenv(): String conversion of variable name failed.";
return ThrowException(String::New(message));
}
unsetenv(*var);
return v8::Undefined();
}
void Shell::AddOSMethods(Handle<ObjectTemplate> os_templ) {
os_templ->Set(String::New("system"), FunctionTemplate::New(System));
os_templ->Set(String::New("chdir"), FunctionTemplate::New(ChangeDirectory));
os_templ->Set(String::New("setenv"), FunctionTemplate::New(SetEnvironment));
os_templ->Set(String::New("unsetenv"),
FunctionTemplate::New(UnsetEnvironment));
os_templ->Set(String::New("umask"), FunctionTemplate::New(SetUMask));
os_templ->Set(String::New("mkdirp"), FunctionTemplate::New(MakeDirectory));
os_templ->Set(String::New("rmdir"), FunctionTemplate::New(RemoveDirectory));

1
deps/v8/src/d8.h

@ -175,6 +175,7 @@ class Shell: public i::AllStatic {
static Handle<Value> System(const Arguments& args);
static Handle<Value> ChangeDirectory(const Arguments& args);
static Handle<Value> SetEnvironment(const Arguments& args);
static Handle<Value> UnsetEnvironment(const Arguments& args);
static Handle<Value> SetUMask(const Arguments& args);
static Handle<Value> MakeDirectory(const Arguments& args);
static Handle<Value> RemoveDirectory(const Arguments& args);

864
deps/v8/src/data-flow.cc

@ -28,7 +28,6 @@
#include "v8.h"
#include "data-flow.h"
#include "flow-graph.h"
#include "scopes.h"
namespace v8 {
@ -621,21 +620,34 @@ void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
ASSERT(av_.IsEmpty());
if (expr->target()->AsProperty() != NULL) {
// Visit receiver and key of property store and rhs.
Visit(expr->target()->AsProperty()->obj());
ProcessExpression(expr->target()->AsProperty()->key());
ProcessExpression(expr->value());
// There are three kinds of assignments: variable assignments, property
// assignments, and reference errors (invalid left-hand sides).
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
MarkIfTrivial(expr->value());
Visit(expr->value());
if (expr->is_compound()) {
// Left-hand side occurs also as an rvalue.
MarkIfTrivial(expr->target());
ProcessExpression(expr->target());
}
RecordAssignedVar(var);
} else if (prop != NULL) {
MarkIfTrivial(expr->value());
Visit(expr->value());
if (!prop->key()->IsPropertyName()) {
MarkIfTrivial(prop->key());
ProcessExpression(prop->key());
}
MarkIfTrivial(prop->obj());
ProcessExpression(prop->obj());
// If we have a variable as a receiver in a property store, check if
// we can mark it as trivial.
MarkIfTrivial(expr->target()->AsProperty()->obj());
} else {
Visit(expr->target());
ProcessExpression(expr->value());
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
if (var != NULL) RecordAssignedVar(var);
}
}
@ -648,12 +660,12 @@ void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
ASSERT(av_.IsEmpty());
Visit(expr->obj());
ProcessExpression(expr->key());
// In case we have a variable as a receiver, check if we can mark
// it as trivial.
if (!expr->key()->IsPropertyName()) {
MarkIfTrivial(expr->key());
Visit(expr->key());
}
MarkIfTrivial(expr->obj());
ProcessExpression(expr->obj());
}
@ -713,25 +725,19 @@ void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(av_.IsEmpty());
Visit(expr->left());
ProcessExpression(expr->right());
// In case we have a variable on the left side, check if we can mark
// it as trivial.
MarkIfTrivial(expr->right());
Visit(expr->right());
MarkIfTrivial(expr->left());
ProcessExpression(expr->left());
}
void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
ASSERT(av_.IsEmpty());
Visit(expr->left());
ProcessExpression(expr->right());
// In case we have a variable on the left side, check if we can mark
// it as trivial.
MarkIfTrivial(expr->right());
Visit(expr->right());
MarkIfTrivial(expr->left());
ProcessExpression(expr->left());
}
@ -746,802 +752,4 @@ void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
}
int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
// Parameters are numbered left-to-right from the beginning of the bit
// set. Stack-allocated locals are allocated right-to-left from the end.
ASSERT(var != NULL && var->IsStackAllocated());
Slot* slot = var->slot();
if (slot->type() == Slot::PARAMETER) {
return slot->index();
} else {
return (variable_count - 1) - slot->index();
}
}
void Node::InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark) {
ASSERT(!IsMarkedWith(mark));
rd_.Initialize(definition_count);
MarkWith(mark);
worklist->Insert(this);
}
void BlockNode::InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark) {
ASSERT(!IsMarkedWith(mark));
int instruction_count = instructions_.length();
int variable_count = variables->length();
rd_.Initialize(definition_count);
// The RD_in set for the entry node has a definition for each parameter
// and local.
if (predecessor_ == NULL) {
for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
}
for (int i = 0; i < instruction_count; i++) {
Expression* expr = instructions_[i]->AsExpression();
if (expr == NULL) continue;
Variable* var = expr->AssignedVariable();
if (var == NULL || !var->IsStackAllocated()) continue;
// All definitions of this variable are killed.
BitVector* def_set =
variables->at(ReachingDefinitions::IndexFor(var, variable_count));
rd_.kill()->Union(*def_set);
// All previously generated definitions are not generated.
rd_.gen()->Subtract(*def_set);
// This one is generated.
rd_.gen()->Add(expr->num());
}
// Add all blocks except the entry node to the worklist.
if (predecessor_ != NULL) {
MarkWith(mark);
worklist->Insert(this);
}
}
void ExitNode::ComputeRDOut(BitVector* result) {
// Should not be the predecessor of any node.
UNREACHABLE();
}
void BlockNode::ComputeRDOut(BitVector* result) {
// All definitions reaching this block ...
*result = *rd_.rd_in();
// ... except those killed by the block ...
result->Subtract(*rd_.kill());
// ... but including those generated by the block.
result->Union(*rd_.gen());
}
void BranchNode::ComputeRDOut(BitVector* result) {
// Branch nodes don't kill or generate definitions.
*result = *rd_.rd_in();
}
void JoinNode::ComputeRDOut(BitVector* result) {
// Join nodes don't kill or generate definitions.
*result = *rd_.rd_in();
}
void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
// The exit node has no successors so we can just update in place. New
// RD_in is the union over all predecessors.
int definition_count = rd_.rd_in()->length();
rd_.rd_in()->Clear();
BitVector temp(definition_count);
for (int i = 0, len = predecessors_.length(); i < len; i++) {
// Because ComputeRDOut always overwrites temp and its value is
// always read out before calling ComputeRDOut again, we do not
// have to clear it on each iteration of the loop.
predecessors_[i]->ComputeRDOut(&temp);
rd_.rd_in()->Union(temp);
}
}
void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
// The entry block has no predecessor. Its RD_in does not change.
if (predecessor_ == NULL) return;
BitVector new_rd_in(rd_.rd_in()->length());
predecessor_->ComputeRDOut(&new_rd_in);
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
*rd_.rd_in() = new_rd_in;
// Add the successor to the worklist if not already present.
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
worklist->Insert(successor_);
}
}
void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
BitVector new_rd_in(rd_.rd_in()->length());
predecessor_->ComputeRDOut(&new_rd_in);
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
*rd_.rd_in() = new_rd_in;
// Add the successors to the worklist if not already present.
if (!successor0_->IsMarkedWith(mark)) {
successor0_->MarkWith(mark);
worklist->Insert(successor0_);
}
if (!successor1_->IsMarkedWith(mark)) {
successor1_->MarkWith(mark);
worklist->Insert(successor1_);
}
}
void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
int definition_count = rd_.rd_in()->length();
BitVector new_rd_in(definition_count);
// New RD_in is the union over all predecessors.
BitVector temp(definition_count);
for (int i = 0, len = predecessors_.length(); i < len; i++) {
predecessors_[i]->ComputeRDOut(&temp);
new_rd_in.Union(temp);
}
if (rd_.rd_in()->Equals(new_rd_in)) return;
// Update RD_in.
*rd_.rd_in() = new_rd_in;
// Add the successor to the worklist if not already present.
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
worklist->Insert(successor_);
}
}
void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
// Nothing to do.
}
void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
// Propagate RD_in from the start of the block to all the variable
// references.
int variable_count = variables->length();
BitVector rd = *rd_.rd_in();
for (int i = 0, len = instructions_.length(); i < len; i++) {
Expression* expr = instructions_[i]->AsExpression();
if (expr == NULL) continue;
// Look for a variable reference to record its reaching definitions.
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy == NULL) {
// Not a VariableProxy? Maybe it's a count operation.
CountOperation* count_operation = expr->AsCountOperation();
if (count_operation != NULL) {
proxy = count_operation->expression()->AsVariableProxy();
}
}
if (proxy == NULL) {
// OK, Maybe it's a compound assignment.
Assignment* assignment = expr->AsAssignment();
if (assignment != NULL && assignment->is_compound()) {
proxy = assignment->target()->AsVariableProxy();
}
}
if (proxy != NULL &&
proxy->var()->IsStackAllocated() &&
!proxy->var()->is_this()) {
// All definitions for this variable.
BitVector* definitions =
variables->at(ReachingDefinitions::IndexFor(proxy->var(),
variable_count));
BitVector* reaching_definitions = new BitVector(*definitions);
// Intersected with all definitions (of any variable) reaching this
// instruction.
reaching_definitions->Intersect(rd);
proxy->set_reaching_definitions(reaching_definitions);
}
// It may instead (or also) be a definition. If so update the running
// value of reaching definitions for the block.
Variable* var = expr->AssignedVariable();
if (var == NULL || !var->IsStackAllocated()) continue;
// All definitions of this variable are killed.
BitVector* def_set =
variables->at(ReachingDefinitions::IndexFor(var, variable_count));
rd.Subtract(*def_set);
// This definition is generated.
rd.Add(expr->num());
}
}
void ReachingDefinitions::Compute() {
// The definitions in the body plus an implicit definition for each
// variable at function entry.
int definition_count = body_definitions_->length() + variable_count_;
int node_count = postorder_->length();
// Step 1: For each stack-allocated variable, identify the set of all its
// definitions.
List<BitVector*> variables;
for (int i = 0; i < variable_count_; i++) {
// Add the initial definition for each variable.
BitVector* initial = new BitVector(definition_count);
initial->Add(i);
variables.Add(initial);
}
for (int i = 0, len = body_definitions_->length(); i < len; i++) {
// Account for each definition in the body as a definition of the
// defined variable.
Variable* var = body_definitions_->at(i)->AssignedVariable();
variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
}
// Step 2: Compute KILL and GEN for each block node, initialize RD_in for
// all nodes, and mark and add all nodes to the worklist in reverse
// postorder. All nodes should currently have the same mark.
bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
WorkList<Node> worklist(node_count);
for (int i = node_count - 1; i >= 0; i--) {
postorder_->at(i)->InitializeReachingDefinitions(definition_count,
&variables,
&worklist,
mark);
}
// Step 3: Until the worklist is empty, remove an item compute and update
// its rd_in based on its predecessor's rd_out. If rd_in has changed, add
// all necessary successors to the worklist.
while (!worklist.is_empty()) {
Node* node = worklist.Remove();
node->MarkWith(!mark);
node->UpdateRDIn(&worklist, mark);
}
// Step 4: Based on RD_in for block nodes, propagate reaching definitions
// to all variable uses in the block.
for (int i = 0; i < node_count; i++) {
postorder_->at(i)->PropagateReachingDefinitions(&variables);
}
}
bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
if (def_num < param_count_) return false;
if (def_num < variable_count_) return true;
return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
}
void TypeAnalyzer::Compute() {
bool changed;
int count = 0;
do {
changed = false;
if (FLAG_print_graph_text) {
PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
}
for (int i = postorder_->length() - 1; i >= 0; --i) {
Node* node = postorder_->at(i);
if (node->IsBlockNode()) {
BlockNode* block = BlockNode::cast(node);
for (int j = 0; j < block->instructions()->length(); j++) {
Expression* expr = block->instructions()->at(j)->AsExpression();
if (expr != NULL) {
// For variable uses: Compute new type from reaching definitions.
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->reaching_definitions() != NULL) {
BitVector* rd = proxy->reaching_definitions();
bool prim_type = true;
// TODO(fsc): A sparse set representation of reaching
// definitions would speed up iterating here.
for (int k = 0; k < rd->length(); k++) {
if (rd->Contains(k) && !IsPrimitiveDef(k)) {
prim_type = false;
break;
}
}
// Reset changed flag if new type information was computed.
if (prim_type != proxy->IsPrimitive()) {
changed = true;
proxy->SetIsPrimitive(prim_type);
}
}
}
}
}
}
} while (changed);
}
void Node::MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
}
void BlockNode::MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count) {
for (int i = instructions_.length() - 1; i >= 0; i--) {
// Only expressions can appear in the flow graph for now.
Expression* expr = instructions_[i]->AsExpression();
if (expr != NULL && !expr->is_live() &&
(expr->is_loop_condition() || expr->IsCritical())) {
expr->mark_as_live();
expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
}
}
}
void MarkLiveCode(ZoneList<Node*>* nodes,
ZoneList<Expression*>* body_definitions,
int variable_count) {
List<AstNode*> stack(20);
// Mark the critical AST nodes as live; mark their dependencies and
// add them to the marking stack.
for (int i = nodes->length() - 1; i >= 0; i--) {
nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
variable_count);
}
// Continue marking dependencies until no more.
while (!stack.is_empty()) {
// Only expressions can appear in the flow graph for now.
Expression* expr = stack.RemoveLast()->AsExpression();
if (expr != NULL) {
expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
}
}
}
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph. Using
// the AstVisitor is overkill because there is no recursion here. It is
// only used for printing in debug mode.
class TextInstructionPrinter: public AstVisitor {
public:
TextInstructionPrinter() : number_(0) {}
int NextNumber() { return number_; }
void AssignNumber(AstNode* node) { node->set_num(number_++); }
private:
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
int number_;
DISALLOW_COPY_AND_ASSIGN(TextInstructionPrinter);
};
void TextInstructionPrinter::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitBlock(Block* stmt) {
PrintF("Block");
}
void TextInstructionPrinter::VisitExpressionStatement(
ExpressionStatement* stmt) {
PrintF("ExpressionStatement");
}
void TextInstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
PrintF("EmptyStatement");
}
void TextInstructionPrinter::VisitIfStatement(IfStatement* stmt) {
PrintF("IfStatement");
}
void TextInstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
PrintF("return @%d", stmt->expression()->num());
}
void TextInstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
PrintF("WithEnterStatement");
}
void TextInstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
PrintF("WithExitStatement");
}
void TextInstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
PrintF("DoWhileStatement");
}
void TextInstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
PrintF("WhileStatement");
}
void TextInstructionPrinter::VisitForStatement(ForStatement* stmt) {
PrintF("ForStatement");
}
void TextInstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
PrintF("ForInStatement");
}
void TextInstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
PrintF("DebuggerStatement");
}
void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
PrintF("FunctionLiteral");
}
void TextInstructionPrinter::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
PrintF("SharedFunctionInfoLiteral");
}
void TextInstructionPrinter::VisitConditional(Conditional* expr) {
PrintF("Conditional");
}
void TextInstructionPrinter::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void TextInstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
PrintF("%s", *var->name()->ToCString());
if (var->IsStackAllocated() && expr->reaching_definitions() != NULL) {
expr->reaching_definitions()->Print();
}
} else {
ASSERT(expr->AsProperty() != NULL);
VisitProperty(expr->AsProperty());
}
}
void TextInstructionPrinter::VisitLiteral(Literal* expr) {
expr->handle()->ShortPrint();
}
void TextInstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
PrintF("RegExpLiteral");
}
void TextInstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
PrintF("ObjectLiteral");
}
void TextInstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
PrintF("ArrayLiteral");
}
void TextInstructionPrinter::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
PrintF("CatchExtensionObject");
}
void TextInstructionPrinter::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
if (var == NULL && prop == NULL) {
// Throw reference error.
Visit(expr->target());
return;
}
// Print the left-hand side.
if (var != NULL) {
PrintF("%s", *var->name()->ToCString());
} else if (prop != NULL) {
PrintF("@%d", prop->obj()->num());
if (prop->key()->IsPropertyName()) {
PrintF(".");
ASSERT(prop->key()->AsLiteral() != NULL);
prop->key()->AsLiteral()->handle()->Print();
} else {
PrintF("[@%d]", prop->key()->num());
}
}
// Print the operation.
if (expr->is_compound()) {
PrintF(" = ");
// Print the left-hand side again when compound.
if (var != NULL) {
PrintF("@%d", expr->target()->num());
} else {
PrintF("@%d", prop->obj()->num());
if (prop->key()->IsPropertyName()) {
PrintF(".");
ASSERT(prop->key()->AsLiteral() != NULL);
prop->key()->AsLiteral()->handle()->Print();
} else {
PrintF("[@%d]", prop->key()->num());
}
}
// Print the corresponding binary operator.
PrintF(" %s ", Token::String(expr->binary_op()));
} else {
PrintF(" %s ", Token::String(expr->op()));
}
// Print the right-hand side.
PrintF("@%d", expr->value()->num());
if (expr->num() != AstNode::kNoNumber) {
PrintF(" ;; D%d", expr->num());
}
}
void TextInstructionPrinter::VisitThrow(Throw* expr) {
PrintF("throw @%d", expr->exception()->num());
}
void TextInstructionPrinter::VisitProperty(Property* expr) {
if (expr->key()->IsPropertyName()) {
PrintF("@%d.", expr->obj()->num());
ASSERT(expr->key()->AsLiteral() != NULL);
expr->key()->AsLiteral()->handle()->Print();
} else {
PrintF("@%d[@%d]", expr->obj()->num(), expr->key()->num());
}
}
void TextInstructionPrinter::VisitCall(Call* expr) {
PrintF("@%d(", expr->expression()->num());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitCallNew(CallNew* expr) {
PrintF("new @%d(", expr->expression()->num());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
PrintF("%s(", *expr->name()->ToCString());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("@%d", arguments->at(i)->num());
}
PrintF(")");
}
void TextInstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
}
void TextInstructionPrinter::VisitCountOperation(CountOperation* expr) {
if (expr->is_prefix()) {
PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
} else {
PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
}
if (expr->num() != AstNode::kNoNumber) {
PrintF(" ;; D%d", expr->num());
}
}
void TextInstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(expr->op() != Token::COMMA);
ASSERT(expr->op() != Token::OR);
ASSERT(expr->op() != Token::AND);
PrintF("@%d %s @%d",
expr->left()->num(),
Token::String(expr->op()),
expr->right()->num());
}
void TextInstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
PrintF("@%d %s @%d",
expr->left()->num(),
Token::String(expr->op()),
expr->right()->num());
}
void TextInstructionPrinter::VisitThisFunction(ThisFunction* expr) {
PrintF("ThisFunction");
}
static int node_count = 0;
static int instruction_count = 0;
void Node::AssignNodeNumber() {
set_number(node_count++);
}
void Node::PrintReachingDefinitions() {
if (rd_.rd_in() != NULL) {
ASSERT(rd_.kill() != NULL && rd_.gen() != NULL);
PrintF("RD_in = ");
rd_.rd_in()->Print();
PrintF("\n");
PrintF("RD_kill = ");
rd_.kill()->Print();
PrintF("\n");
PrintF("RD_gen = ");
rd_.gen()->Print();
PrintF("\n");
}
}
void ExitNode::PrintText() {
PrintReachingDefinitions();
PrintF("L%d: Exit\n\n", number());
}
void BlockNode::PrintText() {
PrintReachingDefinitions();
// Print the instructions in the block.
PrintF("L%d: Block\n", number());
TextInstructionPrinter printer;
for (int i = 0, len = instructions_.length(); i < len; i++) {
AstNode* instr = instructions_[i];
// Print a star next to dead instructions.
if (instr->AsExpression() != NULL && instr->AsExpression()->is_live()) {
PrintF(" ");
} else {
PrintF("* ");
}
PrintF("%d ", printer.NextNumber());
printer.Visit(instr);
printer.AssignNumber(instr);
PrintF("\n");
}
PrintF("goto L%d\n\n", successor_->number());
}
void BranchNode::PrintText() {
PrintReachingDefinitions();
PrintF("L%d: Branch\n", number());
PrintF("goto (L%d, L%d)\n\n", successor0_->number(), successor1_->number());
}
void JoinNode::PrintText() {
PrintReachingDefinitions();
PrintF("L%d: Join(", number());
for (int i = 0, len = predecessors_.length(); i < len; i++) {
if (i != 0) PrintF(", ");
PrintF("L%d", predecessors_[i]->number());
}
PrintF(")\ngoto L%d\n\n", successor_->number());
}
void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder) {
PrintF("\n========\n");
PrintF("name = %s\n", *fun->name()->ToCString());
// Number nodes and instructions in reverse postorder.
node_count = 0;
instruction_count = 0;
for (int i = postorder->length() - 1; i >= 0; i--) {
postorder->at(i)->AssignNodeNumber();
}
// Print basic blocks in reverse postorder.
for (int i = postorder->length() - 1; i >= 0; i--) {
postorder->at(i)->PrintText();
}
}
#endif // DEBUG
} } // namespace v8::internal

59
deps/v8/src/data-flow.h

@ -272,65 +272,6 @@ class AssignedVariablesAnalyzer : public AstVisitor {
};
class ReachingDefinitions BASE_EMBEDDED {
public:
ReachingDefinitions(ZoneList<Node*>* postorder,
ZoneList<Expression*>* body_definitions,
int variable_count)
: postorder_(postorder),
body_definitions_(body_definitions),
variable_count_(variable_count) {
}
static int IndexFor(Variable* var, int variable_count);
void Compute();
private:
// A (postorder) list of flow-graph nodes in the body.
ZoneList<Node*>* postorder_;
// A list of all the definitions in the body.
ZoneList<Expression*>* body_definitions_;
int variable_count_;
DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions);
};
class TypeAnalyzer BASE_EMBEDDED {
public:
TypeAnalyzer(ZoneList<Node*>* postorder,
ZoneList<Expression*>* body_definitions,
int variable_count,
int param_count)
: postorder_(postorder),
body_definitions_(body_definitions),
variable_count_(variable_count),
param_count_(param_count) {}
void Compute();
private:
// Get the primitity of definition number i. Definitions are numbered
// by the flow graph builder.
bool IsPrimitiveDef(int def_num);
ZoneList<Node*>* postorder_;
ZoneList<Expression*>* body_definitions_;
int variable_count_;
int param_count_;
DISALLOW_COPY_AND_ASSIGN(TypeAnalyzer);
};
void MarkLiveCode(ZoneList<Node*>* nodes,
ZoneList<Expression*>* body_definitions,
int variable_count);
} } // namespace v8::internal

23
deps/v8/src/date.js

@ -588,6 +588,20 @@ function TimeString(time) {
function LocalTimezoneString(time) {
var old_timezone = timezone_cache_timezone;
var timezone = LocalTimezone(time);
if (old_timezone && timezone != old_timezone) {
// If the timezone string has changed from the one that we cached,
// the local time offset may now be wrong. So we need to update it
// and try again.
local_time_offset = %DateLocalTimeOffset();
// We also need to invalidate the DST cache as the new timezone may have
// different DST times.
var dst_cache = DST_offset_cache;
dst_cache.start = 0;
dst_cache.end = -1;
}
var timezoneOffset =
(DaylightSavingsOffset(time) + local_time_offset) / msPerMinute;
var sign = (timezoneOffset >= 0) ? 1 : -1;
@ -595,7 +609,7 @@ function LocalTimezoneString(time) {
var min = FLOOR((sign * timezoneOffset)%60);
var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + LocalTimezone(time) + ')';
return gmt + ' (' + timezone + ')';
}
@ -654,7 +668,8 @@ function DateNow() {
function DateToString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
var time_zone_string = LocalTimezoneString(t); // May update local offset.
return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string;
}
@ -670,8 +685,8 @@ function DateToDateString() {
function DateToTimeString() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return kInvalidDate;
var lt = LocalTimeNoCheck(t);
return TimeString(lt) + LocalTimezoneString(lt);
var time_zone_string = LocalTimezoneString(t); // May update local offset.
return TimeString(LocalTimeNoCheck(t)) + time_zone_string;
}

48
deps/v8/src/debug-debugger.js

@ -124,6 +124,12 @@ BreakPoint.prototype.source_position = function() {
};
BreakPoint.prototype.updateSourcePosition = function(new_position, script) {
this.source_position_ = new_position;
// TODO(635): also update line and column.
};
BreakPoint.prototype.hit_count = function() {
return this.hit_count_;
};
@ -327,7 +333,7 @@ ScriptBreakPoint.prototype.matchesScript = function(script) {
if (this.type_ == Debug.ScriptBreakPointType.ScriptId) {
return this.script_id_ == script.id;
} else { // this.type_ == Debug.ScriptBreakPointType.ScriptName
return this.script_name_ == script.name &&
return this.script_name_ == script.nameOrSourceURL() &&
script.line_offset <= this.line_ &&
this.line_ < script.line_offset + script.lineCount();
}
@ -474,6 +480,11 @@ Debug.disassembleConstructor = function(f) {
return %DebugDisassembleConstructor(f);
};
Debug.ExecuteInDebugContext = function(f, without_debugger) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
return %ExecuteInDebugContext(f, !!without_debugger);
};
Debug.sourcePosition = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
return %FunctionGetScriptSourcePosition(f);
@ -1966,13 +1977,6 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
return response.failed('Missing arguments');
}
var script_id = request.arguments.script_id;
var change_pos = parseInt(request.arguments.change_pos);
var change_len = parseInt(request.arguments.change_len);
var new_string = request.arguments.new_string;
if (!IS_STRING(new_string)) {
response.failed('Argument "new_string" is not a string value');
return;
}
var scripts = %DebugGetLoadedScripts();
@ -1987,15 +1991,37 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response)
return;
}
// A function that calls a proper signature of LiveEdit API.
var invocation;
var change_log = new Array();
if (IS_STRING(request.arguments.new_source)) {
var new_source = request.arguments.new_source;
invocation = function() {
return Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
}
} else {
var change_pos = parseInt(request.arguments.change_pos);
var change_len = parseInt(request.arguments.change_len);
var new_string = request.arguments.new_string;
if (!IS_STRING(new_string)) {
response.failed('Argument "new_string" is not a string value');
return;
}
invocation = function() {
return Debug.LiveEditChangeScript(the_script, change_pos, change_len,
new_string, change_log);
}
}
try {
Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string,
change_log);
invocation();
} catch (e) {
if (e instanceof Debug.LiveEditChangeScript.Failure) {
// Let's treat it as a "success" so that body with change_log will be
// sent back. "change_log" will have "failure" field set.
change_log.push( { failure: true } );
change_log.push( { failure: true, message: e.toString() } );
} else {
throw e;
}

17
deps/v8/src/debug.cc

@ -814,6 +814,8 @@ Object* Debug::Break(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 0);
thread_local_.frames_are_dropped_ = false;
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it;
JavaScriptFrame* frame = it.frame();
@ -890,8 +892,13 @@ Object* Debug::Break(Arguments args) {
PrepareStep(step_action, step_count);
}
// Install jump to the call address which was overwritten.
SetAfterBreakTarget(frame);
if (thread_local_.frames_are_dropped_) {
// We must have been calling IC stub. Do not return there anymore.
Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
thread_local_.after_break_target_ = plain_return->entry();
} else {
SetAfterBreakTarget(frame);
}
return Heap::undefined_value();
}
@ -1655,6 +1662,12 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
}
void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
thread_local_.frames_are_dropped_ = true;
thread_local_.break_frame_id_ = new_break_frame_id;
}
bool Debug::IsDebugGlobal(GlobalObject* global) {
return IsLoaded() && global == Debug::debug_context()->global();
}

11
deps/v8/src/debug.h

@ -377,10 +377,18 @@ class Debug {
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
static void GenerateReturnDebugBreak(MacroAssembler* masm);
static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
// Called from stub-cache.cc.
static void GenerateCallICDebugBreak(MacroAssembler* masm);
static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code);
static const int kFrameDropperFrameSize;
private:
static bool CompileDebuggerScript(int index);
static void ClearOneShot();
@ -446,6 +454,9 @@ class Debug {
// Storage location for jump when exiting debug break calls.
Address after_break_target_;
// Indicates that LiveEdit has patched the stack.
bool frames_are_dropped_;
// Top debugger entry.
EnterDebugger* debugger_entry_;

21
deps/v8/src/execution.cc

@ -221,8 +221,8 @@ bool StackGuard::IsStackOverflow() {
void StackGuard::EnableInterrupts() {
ExecutionAccess access;
if (IsSet(access)) {
set_limits(kInterruptLimit, access);
if (has_pending_interrupts(access)) {
set_interrupt_limits(access);
}
}
@ -249,11 +249,6 @@ void StackGuard::DisableInterrupts() {
}
bool StackGuard::IsSet(const ExecutionAccess& lock) {
return thread_local_.interrupt_flags_ != 0;
}
bool StackGuard::IsInterrupted() {
ExecutionAccess access;
return thread_local_.interrupt_flags_ & INTERRUPT;
@ -263,7 +258,7 @@ bool StackGuard::IsInterrupted() {
void StackGuard::Interrupt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= INTERRUPT;
set_limits(kInterruptLimit, access);
set_interrupt_limits(access);
}
@ -276,7 +271,7 @@ bool StackGuard::IsPreempted() {
void StackGuard::Preempt() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= PREEMPT;
set_limits(kInterruptLimit, access);
set_interrupt_limits(access);
}
@ -289,7 +284,7 @@ bool StackGuard::IsTerminateExecution() {
void StackGuard::TerminateExecution() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= TERMINATE;
set_limits(kInterruptLimit, access);
set_interrupt_limits(access);
}
@ -303,7 +298,7 @@ bool StackGuard::IsDebugBreak() {
void StackGuard::DebugBreak() {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= DEBUGBREAK;
set_limits(kInterruptLimit, access);
set_interrupt_limits(access);
}
@ -317,7 +312,7 @@ void StackGuard::DebugCommand() {
if (FLAG_debugger_auto_break) {
ExecutionAccess access;
thread_local_.interrupt_flags_ |= DEBUGCOMMAND;
set_limits(kInterruptLimit, access);
set_interrupt_limits(access);
}
}
#endif
@ -325,7 +320,7 @@ void StackGuard::DebugCommand() {
void StackGuard::Continue(InterruptFlag after_what) {
ExecutionAccess access;
thread_local_.interrupt_flags_ &= ~static_cast<int>(after_what);
if (thread_local_.interrupt_flags_ == 0) {
if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
reset_limits(access);
}
}

20
deps/v8/src/execution.h

@ -199,12 +199,24 @@ class StackGuard : public AllStatic {
private:
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
static bool has_pending_interrupts(const ExecutionAccess& lock) {
// Sanity check: We shouldn't be asking about pending interrupts
// unless we're not postponing them anymore.
ASSERT(!should_postpone_interrupts(lock));
return thread_local_.interrupt_flags_ != 0;
}
// You should hold the ExecutionAccess lock when calling this method.
static bool should_postpone_interrupts(const ExecutionAccess& lock) {
return thread_local_.postpone_interrupts_nesting_ > 0;
}
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
static void set_interrupt_limits(const ExecutionAccess& lock) {
// Ignore attempts to interrupt when interrupts are postponed.
if (should_postpone_interrupts(lock)) return;
thread_local_.jslimit_ = kInterruptLimit;
thread_local_.climit_ = kInterruptLimit;
Heap::SetStackLimits();
}

2
deps/v8/src/flag-definitions.h

@ -391,6 +391,8 @@ DEFINE_bool(prof_auto, true,
DEFINE_bool(prof_lazy, false,
"Used with --prof, only does sampling and logging"
" when profiler is active (implies --noprof_auto).")
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")

731
deps/v8/src/flow-graph.cc

@ -26,232 +26,87 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "flow-graph.h"
#include "scopes.h"
namespace v8 {
namespace internal {
void FlowGraph::AppendInstruction(AstNode* instruction) {
// Add a (non-null) AstNode to the end of the graph fragment.
ASSERT(instruction != NULL);
if (exit()->IsExitNode()) return;
if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
BlockNode::cast(exit())->AddInstruction(instruction);
}
void FlowGraph::AppendNode(Node* node) {
// Add a node to the end of the graph. An empty block is added to
// maintain edge-split form (that no join nodes or exit nodes as
// successors to branch nodes).
ASSERT(node != NULL);
if (exit()->IsExitNode()) return;
if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
AppendNode(new BlockNode());
}
exit()->AddSuccessor(node);
node->AddPredecessor(exit());
exit_ = node;
}
void FlowGraph::AppendGraph(FlowGraph* graph) {
// Add a flow graph fragment to the end of this one. An empty block is
// added to maintain edge-split form (that no join nodes or exit nodes as
// successors to branch nodes).
ASSERT(graph != NULL);
if (exit()->IsExitNode()) return;
Node* node = graph->entry();
if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
AppendNode(new BlockNode());
}
exit()->AddSuccessor(node);
node->AddPredecessor(exit());
exit_ = graph->exit();
}
void FlowGraph::Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* join) {
// Add the branch node, left flowgraph, join node.
AppendNode(branch);
AppendGraph(left);
AppendNode(join);
// Splice in the right flowgraph.
right->AppendNode(join);
branch->AddSuccessor(right->entry());
right->entry()->AddPredecessor(branch);
}
void FlowGraph::Loop(JoinNode* join,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body) {
// Add the join, condition and branch. Add join's predecessors in
// left-to-right order.
AppendNode(join);
body->AppendNode(join);
AppendGraph(condition);
AppendNode(branch);
// Splice in the body flowgraph.
branch->AddSuccessor(body->entry());
body->entry()->AddPredecessor(branch);
}
void ExitNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
preorder->Add(this);
postorder->Add(this);
}
void BlockNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
ZoneList<BasicBlock*>* postorder,
bool mark) {
if (mark_ == mark) return;
mark_ = mark;
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
if (right_successor_ != NULL) {
right_successor_->BuildTraversalOrder(preorder, postorder, mark);
}
postorder->Add(this);
}
void BranchNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor0_ != NULL && successor1_ != NULL);
preorder->Add(this);
if (!successor1_->IsMarkedWith(mark)) {
successor1_->MarkWith(mark);
successor1_->Traverse(mark, preorder, postorder);
}
if (!successor0_->IsMarkedWith(mark)) {
successor0_->MarkWith(mark);
successor0_->Traverse(mark, preorder, postorder);
if (left_successor_ != NULL) {
left_successor_->BuildTraversalOrder(preorder, postorder, mark);
}
postorder->Add(this);
}
void JoinNode::Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) {
ASSERT(successor_ != NULL);
preorder->Add(this);
if (!successor_->IsMarkedWith(mark)) {
successor_->MarkWith(mark);
successor_->Traverse(mark, preorder, postorder);
}
postorder->Add(this);
}
FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
// Create new entry and exit nodes. These will not change during
// construction.
entry_ = new BasicBlock(NULL);
exit_ = new BasicBlock(NULL);
// Begin accumulating instructions in the entry block.
current_ = entry_;
void FlowGraphBuilder::Build(FunctionLiteral* lit) {
global_exit_ = new ExitNode();
VisitDeclarations(lit->scope()->declarations());
VisitStatements(lit->body());
if (HasStackOverflow()) return;
// The graph can end with a branch node (if the function ended with a
// loop). Maintain edge-split form (no join nodes or exit nodes as
// successors to branch nodes).
if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
graph_.AppendNode(global_exit_);
// Build preorder and postorder traversal orders. All the nodes in
// the graph have the same mark flag. For the traversal, use that
// flag's negation. Traversal will flip all the flags.
bool mark = graph_.entry()->IsMarkedWith(false);
graph_.entry()->MarkWith(mark);
graph_.entry()->Traverse(mark, &preorder_, &postorder_);
}
// This function peels off one iteration of a for-loop. The return value
// is either a block statement containing the peeled loop or NULL in case
// there is a stack overflow.
static Statement* PeelForLoop(ForStatement* stmt) {
// Mark this for-statement as processed.
stmt->set_peel_this_loop(false);
// Create new block containing the init statement of the for-loop and
// an if-statement containing the peeled iteration and the original
// loop without the init-statement.
Block* block = new Block(NULL, 2, false);
if (stmt->init() != NULL) {
Statement* init = stmt->init();
// The init statement gets the statement position of the for-loop
// to make debugging of peeled loops possible.
init->set_statement_pos(stmt->statement_pos());
block->AddStatement(init);
// In the event of stack overflow or failure to handle a syntactic
// construct, return an invalid flow graph.
if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
// If current is not the exit, add a link to the exit.
if (current_ != exit_) {
// If current already has a successor (i.e., will be a branch node) and
// if the exit already has a predecessor, insert an empty block to
// maintain edge split form.
if (current_->HasSuccessor() && exit_->HasPredecessor()) {
current_ = new BasicBlock(current_);
}
Literal* undefined = new Literal(Factory::undefined_value());
current_->AddInstruction(new ReturnStatement(undefined));
exit_->AddPredecessor(current_);
}
// Copy the condition.
CopyAstVisitor copy_visitor;
Expression* cond_copy = stmt->cond() != NULL
? copy_visitor.DeepCopyExpr(stmt->cond())
: new Literal(Factory::true_value());
if (copy_visitor.HasStackOverflow()) return NULL;
// Construct a block with the peeled body and the rest of the for-loop.
Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
if (copy_visitor.HasStackOverflow()) return NULL;
Statement* next_copy = stmt->next() != NULL
? copy_visitor.DeepCopyStmt(stmt->next())
: new EmptyStatement();
if (copy_visitor.HasStackOverflow()) return NULL;
FlowGraph* graph = new FlowGraph(entry_, exit_);
bool mark = !entry_->GetMark();
entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
Block* peeled_body = new Block(NULL, 3, false);
peeled_body->AddStatement(body_copy);
peeled_body->AddStatement(next_copy);
peeled_body->AddStatement(stmt);
// Remove the duplicated init statement from the for-statement.
stmt->set_init(NULL);
// Create new test at the top and add it to the newly created block.
IfStatement* test = new IfStatement(cond_copy,
peeled_body,
new EmptyStatement());
block->AddStatement(test);
return block;
}
void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) {
stmts->at(i) = ProcessStatement(stmts->at(i));
#ifdef DEBUG
// Number the nodes in reverse postorder.
int n = 0;
for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
graph->postorder()->at(i)->set_number(n++);
}
}
#endif
Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
if (FLAG_loop_peeling &&
stmt->AsForStatement() != NULL &&
stmt->AsForStatement()->peel_this_loop()) {
Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
if (tmp_stmt == NULL) {
SetStackOverflow();
} else {
stmt = tmp_stmt;
}
}
Visit(stmt);
return stmt;
return graph;
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
Variable* var = decl->proxy()->AsVariable();
Slot* slot = var->slot();
// We allow only declarations that do not require code generation.
// The following all require code generation: global variables and
// functions, variables with slot type LOOKUP, declarations with
// mode CONST, and functions.
if (var->is_global() ||
(slot != NULL && slot->type() == Slot::LOOKUP) ||
decl->mode() == Variable::CONST ||
decl->fun() != NULL) {
// Here and in the rest of the flow graph builder we indicate an
// unsupported syntactic construct by setting the stack overflow
// flag on the visitor. This causes bailout of the visitor.
SetStackOverflow();
}
}
@ -271,21 +126,24 @@ void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
// Build a diamond in the flow graph. First accumulate the instructions
// of the test in the current basic block.
Visit(stmt->condition());
BranchNode* branch = new BranchNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
// Remember the branch node and accumulate the true branch as its left
// successor. This relies on the successors being added left to right.
BasicBlock* branch = current_;
current_ = new BasicBlock(branch);
Visit(stmt->then_statement());
FlowGraph left = graph_;
graph_ = FlowGraph::Empty();
stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
// Construct a join node and then accumulate the false branch in a fresh
// successor of the branch node.
BasicBlock* join = new BasicBlock(current_);
current_ = new BasicBlock(branch);
Visit(stmt->else_statement());
join->AddPredecessor(current_);
if (HasStackOverflow()) return;
JoinNode* join = new JoinNode();
original.Split(branch, &left, &graph_, join);
graph_ = original;
current_ = join;
}
@ -330,23 +188,26 @@ void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
// Build a loop in the flow graph. First accumulate the instructions of
// the initializer in the current basic block.
if (stmt->init() != NULL) Visit(stmt->init());
JoinNode* join = new JoinNode();
FlowGraph original = graph_;
graph_ = FlowGraph::Empty();
// Create a new basic block for the test. This will be the join node.
BasicBlock* join = new BasicBlock(current_);
current_ = join;
if (stmt->cond() != NULL) Visit(stmt->cond());
BranchNode* branch = new BranchNode();
FlowGraph condition = graph_;
graph_ = FlowGraph::Empty();
stmt->set_body(ProcessStatement(stmt->body()));
// The current node is the branch node. Create a new basic block to begin
// the body.
BasicBlock* branch = current_;
current_ = new BasicBlock(branch);
Visit(stmt->body());
if (stmt->next() != NULL) Visit(stmt->next());
if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
if (HasStackOverflow()) return;
original.Loop(join, &condition, branch, &graph_);
graph_ = original;
// Add the backward edge from the end of the body and continue with the
// false arm of the branch.
join->AddPredecessor(current_);
current_ = new BasicBlock(branch);
}
@ -387,17 +248,18 @@ void FlowGraphBuilder::VisitConditional(Conditional* expr) {
void FlowGraphBuilder::VisitSlot(Slot* expr) {
// Slots do not appear in the AST.
UNREACHABLE();
}
void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
graph_.AppendInstruction(expr);
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitLiteral(Literal* expr) {
graph_.AppendInstruction(expr);
current_->AddInstruction(expr);
}
@ -422,29 +284,30 @@ void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
// There are three basic kinds of assignment: variable assignments,
// property assignments, and invalid left-hand sides (which are translated
// to "throw ReferenceError" by the parser).
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// Left-hand side can be a variable or property (or reference error) but
// not both.
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (expr->is_compound()) Visit(expr->target());
Visit(expr->value());
if (var->IsStackAllocated()) {
// The first definition in the body is numbered n, where n is the
// number of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
if (expr->is_compound() && !expr->target()->IsTrivial()) {
Visit(expr->target());
}
if (!expr->value()->IsTrivial()) Visit(expr->value());
current_->AddInstruction(expr);
} else if (prop != NULL) {
Visit(prop->obj());
if (!prop->key()->IsPropertyName()) Visit(prop->key());
Visit(expr->value());
}
if (!prop->obj()->IsTrivial()) Visit(prop->obj());
if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
Visit(prop->key());
}
if (!expr->value()->IsTrivial()) Visit(expr->value());
current_->AddInstruction(expr);
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
} else {
Visit(expr->target());
}
}
@ -454,23 +317,18 @@ void FlowGraphBuilder::VisitThrow(Throw* expr) {
void FlowGraphBuilder::VisitProperty(Property* expr) {
Visit(expr->obj());
if (!expr->key()->IsPropertyName()) Visit(expr->key());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
if (!expr->obj()->IsTrivial()) Visit(expr->obj());
if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
Visit(expr->key());
}
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitCall(Call* expr) {
Visit(expr->expression());
ZoneList<Expression*>* arguments = expr->arguments();
for (int i = 0, len = arguments->length(); i < len; i++) {
Visit(arguments->at(i));
}
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
VisitExpressions(expr->arguments());
current_->AddInstruction(expr);
}
@ -497,8 +355,7 @@ void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
case Token::ADD:
case Token::SUB:
Visit(expr->expression());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
current_->AddInstruction(expr);
break;
default:
@ -509,16 +366,7 @@ void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (var != NULL && var->IsStackAllocated()) {
// The first definition in the body is numbered n, where n is the number
// of parameters and stack-allocated locals.
expr->set_num(body_definitions_.length() + variable_count_);
body_definitions_.Add(expr);
}
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
current_->AddInstruction(expr);
}
@ -534,17 +382,16 @@ void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::SAR:
Visit(expr->left());
Visit(expr->right());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
if (!expr->left()->IsTrivial()) Visit(expr->left());
if (!expr->right()->IsTrivial()) Visit(expr->right());
current_->AddInstruction(expr);
break;
default:
@ -568,10 +415,9 @@ void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
case Token::GT:
case Token::LTE:
case Token::GTE:
Visit(expr->left());
Visit(expr->right());
if (HasStackOverflow()) return;
graph_.AppendInstruction(expr);
if (!expr->left()->IsTrivial()) Visit(expr->left());
if (!expr->right()->IsTrivial()) Visit(expr->right());
current_->AddInstruction(expr);
break;
default:
@ -585,4 +431,333 @@ void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph.
class InstructionPrinter: public AstVisitor {
public:
InstructionPrinter() {}
private:
// Overridden from the base class.
virtual void VisitExpressions(ZoneList<Expression*>* exprs);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
};
static void PrintSubexpression(Expression* expr) {
if (!expr->IsTrivial()) {
PrintF("@%d", expr->num());
} else if (expr->AsLiteral() != NULL) {
expr->AsLiteral()->handle()->Print();
} else if (expr->AsVariableProxy() != NULL) {
PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
} else {
UNREACHABLE();
}
}
void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
if (i != 0) PrintF(", ");
PrintF("@%d", exprs->at(i)->num());
}
}
// We only define printing functions for the node types that can occur as
// instructions in a flow graph. The rest are unreachable.
void InstructionPrinter::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void InstructionPrinter::VisitBlock(Block* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
PrintF("return ");
PrintSubexpression(stmt->expression());
}
void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
PrintF("%s", *var->name()->ToCString());
} else {
ASSERT(expr->AsProperty() != NULL);
Visit(expr->AsProperty());
}
}
void InstructionPrinter::VisitLiteral(Literal* expr) {
expr->handle()->Print();
}
void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// Print the left-hand side.
Visit(expr->target());
if (var == NULL && prop == NULL) return; // Throw reference error.
PrintF(" = ");
// For compound assignments, print the left-hand side again and the
// corresponding binary operator.
if (expr->is_compound()) {
PrintSubexpression(expr->target());
PrintF(" %s ", Token::String(expr->binary_op()));
}
// Print the right-hand side.
PrintSubexpression(expr->value());
}
void InstructionPrinter::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitProperty(Property* expr) {
PrintSubexpression(expr->obj());
if (expr->key()->IsPropertyName()) {
PrintF(".");
ASSERT(expr->key()->AsLiteral() != NULL);
expr->key()->AsLiteral()->handle()->Print();
} else {
PrintF("[");
PrintSubexpression(expr->key());
PrintF("]");
}
}
void InstructionPrinter::VisitCall(Call* expr) {
PrintF("@%d(", expr->expression()->num());
VisitExpressions(expr->arguments());
PrintF(")");
}
void InstructionPrinter::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
}
void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
if (expr->is_prefix()) {
PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
} else {
PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
}
}
void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
PrintSubexpression(expr->left());
PrintF(" %s ", Token::String(expr->op()));
PrintSubexpression(expr->right());
}
void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
PrintSubexpression(expr->left());
PrintF(" %s ", Token::String(expr->op()));
PrintSubexpression(expr->right());
}
void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
int BasicBlock::PrintAsText(int instruction_number) {
// Print a label for all blocks except the entry.
if (HasPredecessor()) {
PrintF("L%d:", number());
}
// Number and print the instructions. Since AST child nodes are visited
// before their parents, the parent nodes can refer to them by number.
InstructionPrinter printer;
for (int i = 0; i < instructions_.length(); ++i) {
PrintF("\n%d ", instruction_number);
instructions_[i]->set_num(instruction_number++);
instructions_[i]->Accept(&printer);
}
// If this is the exit, print "exit". If there is a single successor,
// print "goto" successor on a separate line. If there are two
// successors, print "goto" successor on the same line as the last
// instruction in the block. There is a blank line between blocks (and
// after the last one).
if (left_successor_ == NULL) {
PrintF("\nexit\n\n");
} else if (right_successor_ == NULL) {
PrintF("\ngoto L%d\n\n", left_successor_->number());
} else {
PrintF(", goto (L%d, L%d)\n\n",
left_successor_->number(),
right_successor_->number());
}
return instruction_number;
}
void FlowGraph::PrintAsText(Handle<String> name) {
PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
// Print nodes in reverse postorder. Note that AST node numbers are used
// during printing of instructions and thus their current values are
// destroyed.
int number = 0;
for (int i = postorder_.length() - 1; i >= 0; --i) {
number = postorder_[i]->PrintAsText(number);
}
}
#endif // DEBUG
} } // namespace v8::internal

369
deps/v8/src/flow-graph.h

@ -36,339 +36,140 @@
namespace v8 {
namespace internal {
// Flow-graph nodes.
class Node: public ZoneObject {
public:
Node() : number_(-1), mark_(false) {}
virtual ~Node() {}
virtual bool IsExitNode() { return false; }
virtual bool IsBlockNode() { return false; }
virtual bool IsBranchNode() { return false; }
virtual bool IsJoinNode() { return false; }
virtual void AddPredecessor(Node* predecessor) = 0;
virtual void AddSuccessor(Node* successor) = 0;
bool IsMarkedWith(bool mark) { return mark_ == mark; }
void MarkWith(bool mark) { mark_ = mark; }
// Perform a depth first search and record preorder and postorder
// traversal orders.
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder) = 0;
int number() { return number_; }
void set_number(int number) { number_ = number; }
// Functions used by data-flow analyses.
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result) = 0;
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
// Functions used by dead-code elimination.
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
#ifdef DEBUG
void AssignNodeNumber();
void PrintReachingDefinitions();
virtual void PrintText() = 0;
#endif
protected:
ReachingDefinitionsData rd_;
private:
int number_;
bool mark_;
DISALLOW_COPY_AND_ASSIGN(Node);
};
// An exit node has a arbitrarily many predecessors and no successors.
class ExitNode: public Node {
// The nodes of a flow graph are basic blocks. Basic blocks consist of
// instructions represented as pointers to AST nodes in the order that they
// would be visited by the code generator. A block can have arbitrarily many
// (even zero) predecessors and up to two successors. Blocks with multiple
// predecessors are "join nodes" and blocks with multiple successors are
// "branch nodes". A block can be both a branch and a join node.
//
// Flow graphs are in edge split form: a branch node is never the
// predecessor of a merge node. Empty basic blocks are inserted to maintain
// edge split form.
class BasicBlock: public ZoneObject {
public:
ExitNode() : predecessors_(4) {}
// Construct a basic block with a given predecessor. NULL indicates no
// predecessor or that the predecessor will be set later.
explicit BasicBlock(BasicBlock* predecessor)
: predecessors_(2),
instructions_(8),
left_successor_(NULL),
right_successor_(NULL),
mark_(false) {
if (predecessor != NULL) AddPredecessor(predecessor);
}
virtual bool IsExitNode() { return true; }
bool HasPredecessor() { return !predecessors_.is_empty(); }
bool HasSuccessor() { return left_successor_ != NULL; }
virtual void AddPredecessor(Node* predecessor) {
// Add a given basic block as a predecessor of this block. This function
// also adds this block as a successor of the given block.
void AddPredecessor(BasicBlock* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
predecessor->AddSuccessor(this);
}
virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
ZoneList<Node*> predecessors_;
DISALLOW_COPY_AND_ASSIGN(ExitNode);
};
// Block nodes have a single successor and predecessor and a list of
// instructions.
class BlockNode: public Node {
public:
BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
static BlockNode* cast(Node* node) {
ASSERT(node->IsBlockNode());
return reinterpret_cast<BlockNode*>(node);
}
virtual bool IsBlockNode() { return true; }
bool is_empty() { return instructions_.is_empty(); }
ZoneList<AstNode*>* instructions() { return &instructions_; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
// Add an instruction to the end of this block. The block must be "open"
// by not having a successor yet.
void AddInstruction(AstNode* instruction) {
ASSERT(!HasSuccessor() && instruction != NULL);
instructions_.Add(instruction);
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void InitializeReachingDefinitions(int definition_count,
List<BitVector*>* variables,
WorkList<Node>* worklist,
bool mark);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
virtual void MarkCriticalInstructions(
List<AstNode*>* stack,
ZoneList<Expression*>* body_definitions,
int variable_count);
// Perform a depth-first traversal of graph rooted at this node,
// accumulating pre- and postorder traversal orders. Visited nodes are
// marked with mark.
void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
ZoneList<BasicBlock*>* postorder,
bool mark);
bool GetMark() { return mark_; }
#ifdef DEBUG
virtual void PrintText();
// In debug mode, blocks are numbered in reverse postorder to help with
// printing.
int number() { return number_; }
void set_number(int n) { number_ = n; }
// Print a basic block, given the number of the first instruction.
// Returns the next number after the number of the last instruction.
int PrintAsText(int instruction_number);
#endif
private:
Node* predecessor_;
Node* successor_;
ZoneList<AstNode*> instructions_;
DISALLOW_COPY_AND_ASSIGN(BlockNode);
};
// Branch nodes have a single predecessor and a pair of successors.
class BranchNode: public Node {
public:
BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
virtual bool IsBranchNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor_ == NULL && predecessor != NULL);
predecessor_ = predecessor;
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor1_ == NULL && successor != NULL);
if (successor0_ == NULL) {
successor0_ = successor;
// Add a given basic block as successor to this block. This function does
// not add this block as a predecessor of the given block so as to avoid
// circularity.
void AddSuccessor(BasicBlock* successor) {
ASSERT(right_successor_ == NULL && successor != NULL);
if (HasSuccessor()) {
right_successor_ = successor;
} else {
successor1_ = successor;
left_successor_ = successor;
}
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
#ifdef DEBUG
virtual void PrintText();
#endif
private:
Node* predecessor_;
Node* successor0_;
Node* successor1_;
DISALLOW_COPY_AND_ASSIGN(BranchNode);
};
// Join nodes have arbitrarily many predecessors and a single successor.
class JoinNode: public Node {
public:
JoinNode() : predecessors_(2), successor_(NULL) {}
static JoinNode* cast(Node* node) {
ASSERT(node->IsJoinNode());
return reinterpret_cast<JoinNode*>(node);
}
virtual bool IsJoinNode() { return true; }
virtual void AddPredecessor(Node* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
}
virtual void AddSuccessor(Node* successor) {
ASSERT(successor_ == NULL && successor != NULL);
successor_ = successor;
}
virtual void Traverse(bool mark,
ZoneList<Node*>* preorder,
ZoneList<Node*>* postorder);
ZoneList<BasicBlock*> predecessors_;
ZoneList<AstNode*> instructions_;
BasicBlock* left_successor_;
BasicBlock* right_successor_;
virtual void ComputeRDOut(BitVector* result);
virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
// Support for graph traversal. Before traversal, all nodes in the graph
// have the same mark (true or false). Traversal marks already-visited
// nodes with the opposite mark. After traversal, all nodes again have
// the same mark. Traversal of the same graph is not reentrant.
bool mark_;
#ifdef DEBUG
virtual void PrintText();
int number_;
#endif
private:
ZoneList<Node*> predecessors_;
Node* successor_;
DISALLOW_COPY_AND_ASSIGN(JoinNode);
DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
// Flow graphs have a single entry and single exit. The empty flowgraph is
// represented by both entry and exit being NULL.
class FlowGraph BASE_EMBEDDED {
// A flow graph has distinguished entry and exit blocks. The entry block is
// the only one with no predecessors and the exit block is the only one with
// no successors.
class FlowGraph: public ZoneObject {
public:
static FlowGraph Empty() {
FlowGraph graph;
graph.entry_ = new BlockNode();
graph.exit_ = graph.entry_;
return graph;
FlowGraph(BasicBlock* entry, BasicBlock* exit)
: entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
}
bool is_empty() const {
return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
}
Node* entry() const { return entry_; }
Node* exit() const { return exit_; }
// Add a single instruction to the end of this flowgraph.
void AppendInstruction(AstNode* instruction);
// Add a single node to the end of this flow graph.
void AppendNode(Node* node);
// Add a flow graph fragment to the end of this one.
void AppendGraph(FlowGraph* graph);
// Concatenate an if-then-else flow-graph to this one. Control is split
// and merged, so the graph remains single-entry, single-exit.
void Split(BranchNode* branch,
FlowGraph* left,
FlowGraph* right,
JoinNode* merge);
// Concatenate a forward loop (e.g., while or for loop) flow-graph to this
// one. Control is split by the condition and merged back from the back
// edge at end of the body to the beginning of the condition. The single
// (free) exit of the result graph is the right (false) arm of the branch
// node.
void Loop(JoinNode* merge,
FlowGraph* condition,
BranchNode* branch,
FlowGraph* body);
ZoneList<BasicBlock*>* preorder() { return &preorder_; }
ZoneList<BasicBlock*>* postorder() { return &postorder_; }
#ifdef DEBUG
void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
void PrintAsText(Handle<String> name);
#endif
private:
FlowGraph() : entry_(NULL), exit_(NULL) {}
Node* entry_;
Node* exit_;
BasicBlock* entry_;
BasicBlock* exit_;
ZoneList<BasicBlock*> preorder_;
ZoneList<BasicBlock*> postorder_;
};
// Construct a flow graph from a function literal. Build pre- and postorder
// traversal orders as a byproduct.
// The flow graph builder walks the AST adding reachable AST nodes to the
// flow graph as instructions. It remembers the entry and exit nodes of the
// graph, and keeps a pointer to the current block being constructed.
class FlowGraphBuilder: public AstVisitor {
public:
explicit FlowGraphBuilder(int variable_count)
: graph_(FlowGraph::Empty()),
global_exit_(NULL),
preorder_(4),
postorder_(4),
variable_count_(variable_count),
body_definitions_(4) {
}
void Build(FunctionLiteral* lit);
FlowGraphBuilder() {}
FlowGraph* graph() { return &graph_; }
ZoneList<Node*>* preorder() { return &preorder_; }
ZoneList<Node*>* postorder() { return &postorder_; }
ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
FlowGraph* Build(FunctionLiteral* lit);
private:
ExitNode* global_exit() { return global_exit_; }
// Helpers to allow tranforming the ast during flow graph construction.
void VisitStatements(ZoneList<Statement*>* stmts);
Statement* ProcessStatement(Statement* stmt);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
FlowGraph graph_;
ExitNode* global_exit_;
ZoneList<Node*> preorder_;
ZoneList<Node*> postorder_;
// The flow graph builder collects a list of explicit definitions
// (assignments and count operations) to stack-allocated variables to use
// for reaching definitions analysis. It does not count the implicit
// definition at function entry. AST node numbers in the AST are used to
// refer into this list.
int variable_count_;
ZoneList<Expression*> body_definitions_;
BasicBlock* entry_;
BasicBlock* exit_;
BasicBlock* current_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};

53
deps/v8/src/frames.cc

@ -382,6 +382,12 @@ void EntryFrame::ComputeCallerState(State* state) const {
}
void EntryFrame::SetCallerFp(Address caller_fp) {
const int offset = EntryFrameConstants::kCallerFPOffset;
Memory::Address_at(this->fp() + offset) = caller_fp;
}
StackFrame::Type EntryFrame::GetCallerState(State* state) const {
const int offset = EntryFrameConstants::kCallerFPOffset;
Address fp = Memory::Address_at(this->fp() + offset);
@ -414,6 +420,11 @@ void ExitFrame::ComputeCallerState(State* state) const {
}
void ExitFrame::SetCallerFp(Address caller_fp) {
Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp;
}
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
@ -443,6 +454,12 @@ void StandardFrame::ComputeCallerState(State* state) const {
}
void StandardFrame::SetCallerFp(Address caller_fp) {
Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset) =
caller_fp;
}
bool StandardFrame::IsExpressionInsideHandler(int n) const {
Address address = GetExpressionAddress(n);
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
@ -767,4 +784,40 @@ int JSCallerSavedCode(int n) {
}
#define DEFINE_WRAPPER(type, field) \
class field##_Wrapper : public ZoneObject { \
public: /* NOLINT */ \
field##_Wrapper(const field& original) : frame_(original) { \
} \
field frame_; \
};
STACK_FRAME_TYPE_LIST(DEFINE_WRAPPER)
#undef DEFINE_WRAPPER
static StackFrame* AllocateFrameCopy(StackFrame* frame) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: { \
field##_Wrapper* wrapper = \
new field##_Wrapper(*(reinterpret_cast<field*>(frame))); \
return &wrapper->frame_; \
}
switch (frame->type()) {
STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
default: UNREACHABLE();
}
#undef FRAME_TYPE_CASE
return NULL;
}
Vector<StackFrame*> CreateStackMap() {
ZoneList<StackFrame*> list(10);
for (StackFrameIterator it; !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame());
list.Add(frame);
}
return list.ToVector();
}
} } // namespace v8::internal

20
deps/v8/src/frames.h

@ -114,6 +114,12 @@ class StackFrame BASE_EMBEDDED {
// by the debugger.
enum Id { NO_ID = 0 };
// Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) {
this->state_ = original.state_;
this->iterator_ = NULL;
}
// Type testers.
bool is_entry() const { return type() == ENTRY; }
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
@ -132,6 +138,8 @@ class StackFrame BASE_EMBEDDED {
Address pc() const { return *pc_address(); }
void set_pc(Address pc) { *pc_address() = pc; }
virtual void SetCallerFp(Address caller_fp) = 0;
Address* pc_address() const { return state_.pc_address; }
// Get the id of this stack frame.
@ -200,7 +208,8 @@ class StackFrame BASE_EMBEDDED {
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrame);
private:
void operator=(const StackFrame& original);
};
@ -218,6 +227,7 @@ class EntryFrame: public StackFrame {
ASSERT(frame->is_entry());
return static_cast<EntryFrame*>(frame);
}
virtual void SetCallerFp(Address caller_fp);
protected:
explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
@ -268,6 +278,8 @@ class ExitFrame: public StackFrame {
// Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const;
virtual void SetCallerFp(Address caller_fp);
static ExitFrame* cast(StackFrame* frame) {
ASSERT(frame->is_exit());
return static_cast<ExitFrame*>(frame);
@ -303,6 +315,8 @@ class StandardFrame: public StackFrame {
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
virtual void SetCallerFp(Address caller_fp);
static StandardFrame* cast(StackFrame* frame) {
ASSERT(frame->is_standard());
return static_cast<StandardFrame*>(frame);
@ -658,6 +672,10 @@ class StackFrameLocator BASE_EMBEDDED {
};
// Reads all frames on the current stack and copies them into the current
// zone memory.
Vector<StackFrame*> CreateStackMap();
} } // namespace v8::internal
#endif // V8_FRAMES_H_

7
deps/v8/src/globals.h

@ -112,8 +112,9 @@ typedef byte* Address;
#define V8PRIxPTR "lx"
#endif
#if defined(__APPLE__) && defined(__MACH__)
#define USING_MAC_ABI
#if (defined(__APPLE__) && defined(__MACH__)) || \
defined(__FreeBSD__) || defined(__OpenBSD__)
#define USING_BSD_ABI
#endif
// Code-point values in Unicode 4.0 are 21 bits wide.
@ -457,7 +458,7 @@ struct AccessorDescriptor {
// Logging and profiling.
// A StateTag represents a possible state of the VM. When compiled with
// ENABLE_LOGGING_AND_PROFILING, the logger maintains a stack of these.
// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these.
// Creating a VMState object enters a state by pushing on the stack, and
// destroying a VMState object leaves a state by popping the current state
// from the stack.

4
deps/v8/src/handles.cc

@ -737,7 +737,7 @@ bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
CompilationInfo info(function, 0, receiver);
bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
PROFILE(FunctionCreateEvent(*function));
return result;
}
@ -747,7 +747,7 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) {
CompilationInfo info(function, 1, receiver);
bool result = CompileLazyHelper(&info, flag);
LOG(FunctionCreateEvent(*function));
PROFILE(FunctionCreateEvent(*function));
return result;
}

28
deps/v8/src/heap-inl.h

@ -236,19 +236,27 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(dst, src, byte_size / kPointerSize);
}
// Use block copying memcpy if the segment we're copying is
// enough to justify the extra call/setup overhead.
static const int kBlockCopyLimit = 16 * kPointerSize;
if (byte_size >= kBlockCopyLimit) {
memcpy(dst, src, byte_size);
} else {
int remaining = byte_size / kPointerSize;
do {
remaining--;
void Heap::MoveBlock(Object** dst, Object** src, size_t byte_size) {
ASSERT(IsAligned<size_t>(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;
if ((dst < src) || (dst >= (src + size_in_words))) {
ASSERT((dst >= (src + size_in_words)) ||
((OffsetFrom(reinterpret_cast<Address>(src)) -
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
Object** end = src + size_in_words;
while (src != end) {
*dst++ = *src++;
} while (remaining > 0);
}
} else {
memmove(dst, src, byte_size);
}
}

100
deps/v8/src/heap.cc

@ -562,23 +562,18 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
EnsureFromSpaceIsCommitted();
// Perform mark-sweep with optional compaction.
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
MarkCompact(tracer);
}
// Always perform a scavenge to make room in new space.
Scavenge();
// Update the old space promotion limits after the scavenge due to
// promotions during scavenge.
if (collector == MARK_COMPACTOR) {
int old_gen_size = PromotedSpaceSize();
old_gen_promotion_limit_ =
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
} else {
Scavenge();
}
Counters::objs_since_last_young.Set(0);
@ -764,6 +759,17 @@ static void VerifyNonPointerSpacePointers() {
#endif
void Heap::CheckNewSpaceExpansionCriteria() {
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.Capacity()) {
// Grow the size of new space if there is room to grow and enough
// data has survived scavenge since the last expansion.
new_space_.Grow();
survived_since_last_expansion_ = 0;
}
}
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@ -780,13 +786,7 @@ void Heap::Scavenge() {
// Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize();
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.Capacity()) {
// Grow the size of new space if there is room to grow and enough
// data has survived scavenge since the last expansion.
new_space_.Grow();
survived_since_last_expansion_ = 0;
}
CheckNewSpaceExpansionCriteria();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@ -837,15 +837,17 @@ void Heap::Scavenge() {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
ScavengeExternalStringTable();
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
ASSERT(new_space_front == new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
// Update how much has survived scavenge.
survived_since_last_expansion_ +=
(PromotedSpaceSize() - survived_watermark) + new_space_.Size();
IncrementYoungSurvivorsCounter(
(PromotedSpaceSize() - survived_watermark) + new_space_.Size());
LOG(ResourceEvent("scavenge", "end"));
@ -853,7 +855,22 @@ void Heap::Scavenge() {
}
void Heap::ScavengeExternalStringTable() {
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
FinalizeExternalString(String::cast(*p));
return NULL;
}
// String is still reachable.
return String::cast(first_word.ToForwardingAddress());
}
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
ExternalStringTable::Verify();
if (ExternalStringTable::new_space_strings_.is_empty()) return;
@ -864,16 +881,10 @@ void Heap::ScavengeExternalStringTable() {
for (Object** p = start; p < end; ++p) {
ASSERT(Heap::InFromSpace(*p));
MapWord first_word = HeapObject::cast(*p)->map_word();
String* target = updater_func(p);
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
FinalizeExternalString(String::cast(*p));
continue;
}
if (target == NULL) continue;
// String is still reachable.
String* target = String::cast(first_word.ToForwardingAddress());
ASSERT(target->IsExternalString());
if (Heap::InNewSpace(target)) {
@ -1487,10 +1498,9 @@ Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
}
Object* Heap::CreateOddball(Map* map,
const char* to_string,
Object* Heap::CreateOddball(const char* to_string,
Object* to_number) {
Object* result = Allocate(map, OLD_DATA_SPACE);
Object* result = Allocate(oddball_map(), OLD_DATA_SPACE);
if (result->IsFailure()) return result;
return Oddball::cast(result)->Initialize(to_string, to_number);
}
@ -1594,34 +1604,27 @@ bool Heap::CreateInitialObjects() {
Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
Oddball::cast(undefined_value())->set_to_number(nan_value());
// Assign the print strings for oddballs after creating symboltable.
symbol = LookupAsciiSymbol("null");
if (symbol->IsFailure()) return false;
Oddball::cast(null_value())->set_to_string(String::cast(symbol));
Oddball::cast(null_value())->set_to_number(Smi::FromInt(0));
// Allocate the null_value
obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
if (obj->IsFailure()) return false;
obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1));
obj = CreateOddball("true", Smi::FromInt(1));
if (obj->IsFailure()) return false;
set_true_value(obj);
obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0));
obj = CreateOddball("false", Smi::FromInt(0));
if (obj->IsFailure()) return false;
set_false_value(obj);
obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1));
obj = CreateOddball("hole", Smi::FromInt(-1));
if (obj->IsFailure()) return false;
set_the_hole_value(obj);
obj = CreateOddball(
oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
obj = CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
if (obj->IsFailure()) return false;
set_no_interceptor_result_sentinel(obj);
obj = CreateOddball(oddball_map(), "termination_exception", Smi::FromInt(-3));
obj = CreateOddball("termination_exception", Smi::FromInt(-3));
if (obj->IsFailure()) return false;
set_termination_exception(obj);
@ -1797,11 +1800,13 @@ Object* Heap::SmiOrNumberFromDouble(double value,
}
Object* Heap::NumberToString(Object* number) {
Object* Heap::NumberToString(Object* number, bool check_number_string_cache) {
Counters::number_to_string_runtime.Increment();
Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) {
return cached;
if (check_number_string_cache) {
Object* cached = GetNumberStringCache(number);
if (cached != undefined_value()) {
return cached;
}
}
char arr[100];
@ -2313,7 +2318,8 @@ Object* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Address old_addr = code->address();
int relocation_offset = code->relocation_start() - old_addr;
size_t relocation_offset =
static_cast<size_t>(code->relocation_start() - old_addr);
Object* result;
if (new_obj_size > MaxObjectSizeInPagedSpace()) {

54
deps/v8/src/heap.h

@ -149,6 +149,13 @@ class ZoneScopeInfo;
V(number_symbol, "number") \
V(Number_symbol, "Number") \
V(RegExp_symbol, "RegExp") \
V(source_symbol, "source") \
V(global_symbol, "global") \
V(ignore_case_symbol, "ignoreCase") \
V(multiline_symbol, "multiline") \
V(input_symbol, "input") \
V(index_symbol, "index") \
V(last_index_symbol, "lastIndex") \
V(object_symbol, "object") \
V(prototype_symbol, "prototype") \
V(string_symbol, "string") \
@ -195,6 +202,9 @@ class GCTracer;
class HeapStats;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@ -930,7 +940,8 @@ class Heap : public AllStatic {
kRootListLength
};
static Object* NumberToString(Object* number);
static Object* NumberToString(Object* number,
bool check_number_string_cache = true);
static Map* MapForExternalArrayType(ExternalArrayType array_type);
static RootListIndex RootIndexForExternalArrayType(
@ -938,6 +949,30 @@ class Heap : public AllStatic {
static void RecordStats(HeapStats* stats);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
static inline void CopyBlock(Object** dst, Object** src, int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
static inline void MoveBlock(Object** dst, Object** src, size_t byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();
static inline void IncrementYoungSurvivorsCounter(int survived) {
survived_since_last_expansion_ += survived;
}
static void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
// we try to promote this object.
static inline bool ShouldBePromoted(Address old_address, int object_size);
static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
private:
@ -1125,16 +1160,17 @@ class Heap : public AllStatic {
static void CreateFixedStubs();
static Object* CreateOddball(Map* map,
const char* to_string,
Object* to_number);
static Object* CreateOddball(const char* to_string, Object* to_number);
// Allocate empty fixed array.
static Object* AllocateEmptyFixedArray();
// Performs a minor collection in new generation.
static void Scavenge();
static void ScavengeExternalStringTable();
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Object** pointer);
static Address DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front);
@ -1152,11 +1188,6 @@ class Heap : public AllStatic {
HeapObject* target,
int size);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
// mark or if we've already filled the bottom 1/16th of the to space,
// we try to promote this object.
static inline bool ShouldBePromoted(Address old_address, int object_size);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// Record the copy of an object in the NewSpace's statistics.
static void RecordCopiedObject(HeapObject* obj);
@ -1175,9 +1206,6 @@ class Heap : public AllStatic {
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Copy memory from src to dst.
static inline void CopyBlock(Object** dst, Object** src, int byte_size);
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that

4
deps/v8/src/ia32/assembler-ia32.cc

@ -123,8 +123,8 @@ void CpuFeatures::Probe() {
Code::ComputeFlags(Code::STUB),
Handle<Code>::null());
if (!code->IsCode()) return;
LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
PROFILE(CodeCreateEvent(Logger::BUILTIN_TAG,
Code::cast(code), "CpuFeatures::Probe"));
typedef uint64_t (*F0)();
F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
supported_ = probe();

665
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

25
deps/v8/src/ia32/codegen-ia32.h

@ -492,11 +492,8 @@ class CodeGenerator: public AstVisitor {
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
OverwriteMode overwrite_mode,
bool no_negative_zero);
void GenericBinaryOperation(BinaryOperation* expr,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
@ -505,22 +502,19 @@ class CodeGenerator: public AstVisitor {
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result operand.
Result ConstantSmiBinaryOperation(Token::Value op,
Result ConstantSmiBinaryOperation(BinaryOperation* expr,
Result* operand,
Handle<Object> constant_operand,
StaticType* type,
bool reversed,
OverwriteMode overwrite_mode,
bool no_negative_zero);
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results left and right.
Result LikelySmiBinaryOperation(Token::Value op,
Result LikelySmiBinaryOperation(BinaryOperation* expr,
Result* left,
Result* right,
OverwriteMode overwrite_mode,
bool no_negative_zero);
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two untagged int32 values.
@ -620,7 +614,7 @@ class CodeGenerator: public AstVisitor {
void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
@ -634,9 +628,14 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);

50
deps/v8/src/ia32/debug-ia32.cc

@ -206,8 +206,58 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->ret(0);
}
// FrameDropper is a code replacement for a JavaScript frame with possibly
// several frames above.
// There is no calling conventions here, because it never actually gets called,
// it only gets returned to.
// Frame structure (conforms InternalFrame structure):
// -- JSFunction
// -- code
// -- SMI maker
// -- context
// -- frame base
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// We do not know our frame height, but set esp based on ebp.
__ lea(esp, Operand(ebp, -4 * kPointerSize));
__ pop(edi); // function
// Skip code self-reference and marker.
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ pop(esi); // Context.
__ pop(ebp);
// Get function code.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
__ jmp(Operand(edx));
}
#undef __
void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
ASSERT(bottom_js_frame->is_java_script());
Address fp = bottom_js_frame->fp();
Memory::Object_at(fp - 4 * kPointerSize) =
Memory::Object_at(fp - 2 * kPointerSize); // Move edi (function).
Memory::Object_at(fp - 3 * kPointerSize) = *code;
Memory::Object_at(fp - 2 * kPointerSize) = Smi::FromInt(StackFrame::INTERNAL);
}
const int Debug::kFrameDropperFrameSize = 5;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

2
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -832,7 +832,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
NULL,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
LOG(RegExpCodeCreateEvent(*code, *source));
PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}

19
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -909,6 +909,25 @@ Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
}
Result VirtualFrame::CallJSFunction(int arg_count) {
Result function = Pop();
// InvokeFunction requires function in edi. Move it in there.
function.ToRegister(edi);
function.Unuse();
// +1 for receiver.
PrepareForCall(arg_count + 1, arg_count + 1);
ASSERT(cgen()->HasValidEntryRegisters());
ParameterCount count(arg_count);
__ InvokeFunction(edi, count, CALL_FUNCTION);
RestoreContextRegister();
Result result = cgen()->allocator()->Allocate(eax);
ASSERT(result.is_valid());
return result;
}
Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
PrepareForCall(arg_count, arg_count);
ASSERT(cgen()->HasValidEntryRegisters());

4
deps/v8/src/ia32/virtual-frame-ia32.h

@ -331,6 +331,10 @@ class VirtualFrame: public ZoneObject {
// arguments are consumed by the call.
Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
// Call JS function from top of the stack with arguments
// taken from the stack.
Result CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
Result CallRuntime(Runtime::Function* f, int arg_count);

27
deps/v8/src/ic.cc

@ -224,7 +224,8 @@ void IC::Clear(Address address) {
case Code::STORE_IC: return StoreIC::Clear(address, target);
case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::BINARY_OP_IC: return BinaryOpIC::Clear(address, target);
case Code::BINARY_OP_IC: return; // Clearing these is tricky and does not
// make any performance difference.
default: UNREACHABLE();
}
}
@ -1404,25 +1405,6 @@ void BinaryOpIC::patch(Code* code) {
}
void BinaryOpIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
// At the end of a fast case stub there should be a reference to
// a corresponding UNINITIALIZED stub, so look for the last reloc info item.
RelocInfo* rinfo = NULL;
for (RelocIterator it(target, RelocInfo::kCodeTargetMask);
!it.done(); it.next()) {
rinfo = it.rinfo();
}
ASSERT(rinfo != NULL);
Code* uninit_stub = Code::GetCodeFromTargetAddress(rinfo->target_address());
ASSERT(uninit_stub->ic_state() == UNINITIALIZED &&
uninit_stub->kind() == Code::BINARY_OP_IC);
SetTargetAtAddress(address, uninit_stub);
}
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case DEFAULT: return "Default";
@ -1451,8 +1433,9 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
Object* right) {
// Patching is never requested for the two smis.
ASSERT(!left->IsSmi() || !right->IsSmi());
if (left->IsSmi() && right->IsSmi()) {
return GENERIC;
}
if (left->IsNumber() && right->IsNumber()) {
return HEAP_NUMBERS;

17
deps/v8/src/jump-target-light.cc

@ -77,23 +77,10 @@ DeferredCode::DeferredCode()
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
comment_ = "";
CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
#endif
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
if (loc == VirtualFrame::kIllegalIndex) {
registers_[i] = kIgnore;
} else {
// Needs to be restored on exit but not saved on entry.
registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
}
}
}
} } // namespace v8::internal

134
deps/v8/src/liveedit-debugger.js

@ -180,11 +180,18 @@ Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str,
var position_patch_report;
function PatchPositions(new_info, shared_info) {
if (!shared_info) {
// TODO: explain what is happening.
// TODO(LiveEdit): explain what is happening.
return;
}
%LiveEditPatchFunctionPositions(shared_info.raw_array,
position_change_array);
var breakpoint_position_update = %LiveEditPatchFunctionPositions(
shared_info.raw_array, position_change_array);
for (var i = 0; i < breakpoint_position_update.length; i += 2) {
var new_pos = breakpoint_position_update[i];
var break_point_object = breakpoint_position_update[i + 1];
change_log.push( { breakpoint_position_update:
{ from: break_point_object.source_position(), to: new_pos } } );
break_point_object.updateSourcePosition(new_pos, script);
}
position_patch_report.push( { name: new_info.function_name } );
}
@ -389,19 +396,32 @@ Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
for (var i = 0; i < shared_wrapper_list.length; i++) {
shared_list[i] = shared_wrapper_list[i].info;
}
var result = %LiveEditCheckStackActivations(shared_list);
var result = %LiveEditCheckAndDropActivations(shared_list, true);
if (result[shared_list.length]) {
// Extra array element may contain error message.
throw new liveedit.Failure(result[shared_list.length]);
}
var problems = new Array();
var dropped = new Array();
for (var i = 0; i < shared_list.length; i++) {
if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) {
var shared = shared_list[i];
var shared = shared_wrapper_list[i];
if (result[i] == liveedit.FunctionPatchabilityStatus.REPLACED_ON_ACTIVE_STACK) {
dropped.push({ name: shared.function_name } );
} else if (result[i] != liveedit.FunctionPatchabilityStatus.AVAILABLE_FOR_PATCH) {
var description = {
name: shared.function_name,
start_pos: shared.start_position,
end_pos: shared.end_position
end_pos: shared.end_position,
replace_problem:
liveedit.FunctionPatchabilityStatus.SymbolName(result[i])
};
problems.push(description);
}
}
if (dropped.length > 0) {
change_log.push({ dropped_from_stack: dropped });
}
if (problems.length > 0) {
change_log.push( { functions_on_stack: problems } );
throw new liveedit.Failure("Blocked by functions on stack");
@ -410,8 +430,21 @@ Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list,
// A copy of the FunctionPatchabilityStatus enum from liveedit.h
Debug.LiveEditChangeScript.FunctionPatchabilityStatus = {
FUNCTION_AVAILABLE_FOR_PATCH: 0,
FUNCTION_BLOCKED_ON_STACK: 1
AVAILABLE_FOR_PATCH: 1,
BLOCKED_ON_ACTIVE_STACK: 2,
BLOCKED_ON_OTHER_STACK: 3,
BLOCKED_UNDER_NATIVE_CODE: 4,
REPLACED_ON_ACTIVE_STACK: 5
}
Debug.LiveEditChangeScript.FunctionPatchabilityStatus.SymbolName =
function(code) {
var enum = Debug.LiveEditChangeScript.FunctionPatchabilityStatus;
for (name in enum) {
if (enum[name] == code) {
return name;
}
}
}
@ -429,3 +462,86 @@ Debug.LiveEditChangeScript.Failure.prototype.toString = function() {
Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
}
// A LiveEdit namespace is declared inside a single function constructor.
Debug.LiveEdit = new function() {
var LiveEdit = this;
// LiveEdit main entry point: changes a script text to a new string.
LiveEdit.SetScriptSource = function(script, new_source, change_log) {
var old_source = script.source;
var diff = FindSimpleDiff(old_source, new_source);
if (!diff) {
return;
}
Debug.LiveEditChangeScript(script, diff.change_pos, diff.old_len,
new_source.substring(diff.change_pos, diff.change_pos + diff.new_len),
change_log);
}
// Finds a difference between 2 strings in form of a single chunk.
// This is a temporary solution. We should calculate a read diff instead.
function FindSimpleDiff(old_source, new_source) {
var change_pos;
var old_len;
var new_len;
// A find range block. Whenever control leaves it, it should set 3 local
// variables declared above.
find_range:
{
// First look from the beginning of strings.
var pos1;
{
var next_pos;
for (pos1 = 0; true; pos1 = next_pos) {
if (pos1 >= old_source.length) {
change_pos = pos1;
old_len = 0;
new_len = new_source.length - pos1;
break find_range;
}
if (pos1 >= new_source.length) {
change_pos = pos1;
old_len = old_source.length - pos1;
new_len = 0;
break find_range;
}
if (old_source[pos1] != new_source[pos1]) {
break;
}
next_pos = pos1 + 1;
}
}
// Now compare strings from the ends.
change_pos = pos1;
var pos_old;
var pos_new;
{
for (pos_old = old_source.length - 1, pos_new = new_source.length - 1;
true;
pos_old--, pos_new--) {
if (pos_old - change_pos + 1 < 0 || pos_new - change_pos + 1 < 0) {
old_len = pos_old - change_pos + 2;
new_len = pos_new - change_pos + 2;
break find_range;
}
if (old_source[pos_old] != new_source[pos_new]) {
old_len = pos_old - change_pos + 1;
new_len = pos_new - change_pos + 1;
break find_range;
}
}
}
}
if (old_len == 0 && new_len == 0) {
// no change
return;
}
return { "change_pos": change_pos, "old_len": old_len, "new_len": new_len };
}
}

367
deps/v8/src/liveedit.cc

@ -34,6 +34,7 @@
#include "scopes.h"
#include "global-handles.h"
#include "debug.h"
#include "memory.h"
namespace v8 {
namespace internal {
@ -446,6 +447,13 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
}
// Check whether the code is natural function code (not a lazy-compile stub
// code).
static bool IsJSFunctionCode(Code* code) {
return code->kind() == Code::FUNCTION;
}
void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
HandleScope scope;
@ -455,15 +463,30 @@ void LiveEdit::ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode()));
if (IsJSFunctionCode(shared_info->code())) {
ReplaceCodeObject(shared_info->code(),
*(compile_info_wrapper.GetFunctionCode()));
}
if (shared_info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(shared_info->debug_info()));
Handle<Code> new_original_code =
Factory::CopyCode(compile_info_wrapper.GetFunctionCode());
debug_info->set_original_code(*new_original_code);
}
shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
// update breakpoints, original code, constructor stub
shared_info->set_construct_stub(
Builtins::builtin(Builtins::JSConstructStubGeneric));
// update breakpoints
}
// TODO(635): Eval caches its scripts (same text -- same compiled info).
// Make sure we clear such caches.
void LiveEdit::RelinkFunctionToScript(Handle<JSArray> shared_info_array,
Handle<Script> script_handle) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
@ -535,8 +558,9 @@ class RelocInfoBuffer {
Vector<byte> GetResult() {
// Return the bytes from pos up to end of buffer.
return Vector<byte>(reloc_info_writer_.pos(),
buffer_ + buffer_size_ - reloc_info_writer_.pos());
int result_size =
static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer_.pos());
return Vector<byte>(reloc_info_writer_.pos(), result_size);
}
private:
@ -558,7 +582,8 @@ class RelocInfoBuffer {
byte* new_buffer = NewArray<byte>(new_buffer_size);
// Copy the data.
int curently_used_size = buffer_ + buffer_size_ - reloc_info_writer_.pos();
int curently_used_size =
static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
memmove(new_buffer + new_buffer_size - curently_used_size,
reloc_info_writer_.pos(), curently_used_size);
@ -621,13 +646,28 @@ static Handle<Code> PatchPositionsInCode(Handle<Code> code,
}
void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array) {
static Handle<Object> GetBreakPointObjectsForJS(
Handle<BreakPointInfo> break_point_info) {
if (break_point_info->break_point_objects()->IsFixedArray()) {
Handle<FixedArray> fixed_array(
FixedArray::cast(break_point_info->break_point_objects()));
Handle<Object> array = Factory::NewJSArrayWithElements(fixed_array);
return array;
} else {
return Handle<Object>(break_point_info->break_point_objects());
}
}
Handle<JSArray> LiveEdit::PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> info = shared_info_wrapper.GetInfo();
info->set_start_position(TranslatePosition(info->start_position(),
position_change_array));
int old_function_start = info->start_position();
int new_function_start = TranslatePosition(old_function_start,
position_change_array);
info->set_start_position(new_function_start);
info->set_end_position(TranslatePosition(info->end_position(),
position_change_array));
@ -635,17 +675,24 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
TranslatePosition(info->function_token_position(),
position_change_array));
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
position_change_array);
if (*patched_code != info->code()) {
// Replace all references to the code across the heap. In particular,
// some stubs may refer to this code and this code may be being executed
// on stack (it is safe to substitute the code object on stack, because
// we only change the structure of rinfo and leave instructions untouched).
ReplaceCodeObject(info->code(), *patched_code);
if (IsJSFunctionCode(info->code())) {
// Patch relocation info section of the code.
Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
position_change_array);
if (*patched_code != info->code()) {
// Replace all references to the code across the heap. In particular,
// some stubs may refer to this code and this code may be being executed
// on stack (it is safe to substitute the code object on stack, because
// we only change the structure of rinfo and leave instructions
// untouched).
ReplaceCodeObject(info->code(), *patched_code);
}
}
Handle<JSArray> result = Factory::NewJSArray(0);
int result_len = 0;
if (info->debug_info()->IsDebugInfo()) {
Handle<DebugInfo> debug_info(DebugInfo::cast(info->debug_info()));
Handle<Code> patched_orig_code =
@ -664,12 +711,288 @@ void LiveEdit::PatchFunctionPositions(Handle<JSArray> shared_info_array,
}
Handle<BreakPointInfo> info(
BreakPointInfo::cast(break_point_infos->get(i)));
int new_position = TranslatePosition(info->source_position()->value(),
int old_in_script_position = info->source_position()->value() +
old_function_start;
int new_in_script_position = TranslatePosition(old_in_script_position,
position_change_array);
info->set_source_position(Smi::FromInt(new_position));
info->set_source_position(
Smi::FromInt(new_in_script_position - new_function_start));
if (old_in_script_position != new_in_script_position) {
SetElement(result, result_len,
Handle<Smi>(Smi::FromInt(new_in_script_position)));
SetElement(result, result_len + 1,
GetBreakPointObjectsForJS(info));
result_len += 2;
}
}
}
return result;
}
// Check an activation against list of functions. If there is a function
// that matches, its status in result array is changed to status argument value.
static bool CheckActivation(Handle<JSArray> shared_info_array,
Handle<JSArray> result, StackFrame* frame,
LiveEdit::FunctionPatchabilityStatus status) {
if (!frame->is_java_script()) {
return false;
}
int len = Smi::cast(shared_info_array->length())->value();
for (int i = 0; i < len; i++) {
JSValue* wrapper = JSValue::cast(shared_info_array->GetElement(i));
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(wrapper->value()));
if (frame->code() == shared->code()) {
SetElement(result, i, Handle<Smi>(Smi::FromInt(status)));
return true;
}
}
return false;
}
// Iterates over handler chain and removes all elements that are inside
// frames being dropped.
static bool FixTryCatchHandler(StackFrame* top_frame,
StackFrame* bottom_frame) {
Address* pointer_address =
&Memory::Address_at(Top::get_address_from_id(Top::k_handler_address));
while (*pointer_address < top_frame->sp()) {
pointer_address = &Memory::Address_at(*pointer_address);
}
Address* above_frame_address = pointer_address;
while (*pointer_address < bottom_frame->fp()) {
pointer_address = &Memory::Address_at(*pointer_address);
}
bool change = *above_frame_address != *pointer_address;
*above_frame_address = *pointer_address;
return change;
}
// Removes specified range of frames from stack. There may be 1 or more
// frames in range. Anyway the bottom frame is restarted rather than dropped,
// and therefore has to be a JavaScript frame.
// Returns error message or NULL.
static const char* DropFrames(Vector<StackFrame*> frames,
int top_frame_index,
int bottom_js_frame_index) {
StackFrame* pre_top_frame = frames[top_frame_index - 1];
StackFrame* top_frame = frames[top_frame_index];
StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
ASSERT(bottom_js_frame->is_java_script());
// Check the nature of the top frame.
if (pre_top_frame->code()->is_inline_cache_stub() &&
pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
// OK, we can drop inline cache calls.
} else if (pre_top_frame->code() ==
Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
// OK, we can drop our own code.
} else if (pre_top_frame->code()->kind() == Code::STUB &&
pre_top_frame->code()->major_key()) {
// Unit Test entry, it's fine, we support this case.
} else {
return "Unknown structure of stack above changing function";
}
Address unused_stack_top = top_frame->sp();
Address unused_stack_bottom = bottom_js_frame->fp()
- Debug::kFrameDropperFrameSize * kPointerSize // Size of the new frame.
+ kPointerSize; // Bigger address end is exclusive.
if (unused_stack_top > unused_stack_bottom) {
return "Not enough space for frame dropper frame";
}
// Committing now. After this point we should return only NULL value.
FixTryCatchHandler(pre_top_frame, bottom_js_frame);
// Make sure FixTryCatchHandler is idempotent.
ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
Handle<Code> code(Builtins::builtin(Builtins::FrameDropper_LiveEdit));
top_frame->set_pc(code->entry());
pre_top_frame->SetCallerFp(bottom_js_frame->fp());
Debug::SetUpFrameDropperFrame(bottom_js_frame, code);
for (Address a = unused_stack_top;
a < unused_stack_bottom;
a += kPointerSize) {
Memory::Object_at(a) = Smi::FromInt(0);
}
return NULL;
}
static bool IsDropableFrame(StackFrame* frame) {
return !frame->is_exit();
}
// Fills result array with statuses of functions. Modifies the stack
// removing all listed function if possible and if do_drop is true.
static const char* DropActivationsInActiveThread(
Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop) {
ZoneScope scope(DELETE_ON_EXIT);
Vector<StackFrame*> frames = CreateStackMap();
int array_len = Smi::cast(shared_info_array->length())->value();
int top_frame_index = -1;
int frame_index = 0;
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (frame->id() == Debug::break_frame_id()) {
top_frame_index = frame_index;
break;
}
if (CheckActivation(shared_info_array, result, frame,
LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// We are still above break_frame. It is not a target frame,
// it is a problem.
return "Debugger mark-up on stack is not found";
}
}
if (top_frame_index == -1) {
// We haven't found break frame, but no function is blocking us anyway.
return NULL;
}
bool target_frame_found = false;
int bottom_js_frame_index = top_frame_index;
bool c_code_found = false;
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (!IsDropableFrame(frame)) {
c_code_found = true;
break;
}
if (CheckActivation(shared_info_array, result, frame,
LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
target_frame_found = true;
bottom_js_frame_index = frame_index;
}
}
if (c_code_found) {
// There is a C frames on stack. Check that there are no target frames
// below them.
for (; frame_index < frames.length(); frame_index++) {
StackFrame* frame = frames[frame_index];
if (frame->is_java_script()) {
if (CheckActivation(shared_info_array, result, frame,
LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
// Cannot drop frame under C frames.
return NULL;
}
}
}
}
if (!do_drop) {
// We are in check-only mode.
return NULL;
}
if (!target_frame_found) {
// Nothing to drop.
return NULL;
}
const char* error_message = DropFrames(frames, top_frame_index,
bottom_js_frame_index);
if (error_message != NULL) {
return error_message;
}
// Adjust break_frame after some frames has been dropped.
StackFrame::Id new_id = StackFrame::NO_ID;
for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
if (frames[i]->type() == StackFrame::JAVA_SCRIPT) {
new_id = frames[i]->id();
break;
}
}
// TODO(635): Also patch breakpoint objects in JS.
Debug::FramesHaveBeenDropped(new_id);
// Replace "blocked on active" with "replaced on active" status.
for (int i = 0; i < array_len; i++) {
if (result->GetElement(i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
result->SetElement(i, Smi::FromInt(
LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
}
}
return NULL;
}
class InactiveThreadActivationsChecker : public ThreadVisitor {
public:
InactiveThreadActivationsChecker(Handle<JSArray> shared_info_array,
Handle<JSArray> result)
: shared_info_array_(shared_info_array), result_(result),
has_blocked_functions_(false) {
}
void VisitThread(ThreadLocalTop* top) {
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
has_blocked_functions_ |= CheckActivation(
shared_info_array_, result_, it.frame(),
LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
}
}
bool HasBlockedFunctions() {
return has_blocked_functions_;
}
private:
Handle<JSArray> shared_info_array_;
Handle<JSArray> result_;
bool has_blocked_functions_;
};
Handle<JSArray> LiveEdit::CheckAndDropActivations(
Handle<JSArray> shared_info_array, bool do_drop) {
int len = Smi::cast(shared_info_array->length())->value();
Handle<JSArray> result = Factory::NewJSArray(len);
// Fill the default values.
for (int i = 0; i < len; i++) {
SetElement(result, i,
Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
}
// First check inactive threads. Fail if some functions are blocked there.
InactiveThreadActivationsChecker inactive_threads_checker(shared_info_array,
result);
ThreadManager::IterateThreads(&inactive_threads_checker);
if (inactive_threads_checker.HasBlockedFunctions()) {
return result;
}
// Try to drop activations from the current stack.
const char* error_message =
DropActivationsInActiveThread(shared_info_array, result, do_drop);
if (error_message != NULL) {
// Add error message as an array extra element.
Vector<const char> vector_message(error_message, StrLength(error_message));
Handle<String> str = Factory::NewStringFromAscii(vector_message);
SetElement(result, len, str);
}
return result;
}

21
deps/v8/src/liveedit.h

@ -88,13 +88,26 @@ class LiveEdit : AllStatic {
static void RelinkFunctionToScript(Handle<JSArray> shared_info_array,
Handle<Script> script_handle);
static void PatchFunctionPositions(Handle<JSArray> shared_info_array,
Handle<JSArray> position_change_array);
// Returns an array of pairs (new source position, breakpoint_object/array)
// so that JS side could update positions in breakpoint objects.
static Handle<JSArray> PatchFunctionPositions(
Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array);
// Checks listed functions on stack and return array with corresponding
// FunctionPatchabilityStatus statuses; extra array element may
// contain general error message. Modifies the current stack and
// has restart the lowest found frames and drops all other frames above
// if possible and if do_drop is true.
static Handle<JSArray> CheckAndDropActivations(
Handle<JSArray> shared_info_array, bool do_drop);
// A copy of this is in liveedit-debugger.js.
enum FunctionPatchabilityStatus {
FUNCTION_AVAILABLE_FOR_PATCH = 0,
FUNCTION_BLOCKED_ON_STACK = 1
FUNCTION_AVAILABLE_FOR_PATCH = 1,
FUNCTION_BLOCKED_ON_ACTIVE_STACK = 2,
FUNCTION_BLOCKED_ON_OTHER_STACK = 3,
FUNCTION_BLOCKED_UNDER_NATIVE_CODE = 4,
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
};
};

97
deps/v8/src/log-inl.h

@ -29,96 +29,33 @@
#define V8_LOG_INL_H_
#include "log.h"
#include "cpu-profiler.h"
namespace v8 {
namespace internal {
//
// VMState class implementation. A simple stack of VM states held by the
// logger and partially threaded through the call stack. States are pushed by
// VMState construction and popped by destruction.
//
#ifdef ENABLE_LOGGING_AND_PROFILING
inline const char* StateToString(StateTag state) {
switch (state) {
case JS:
return "JS";
case GC:
return "GC";
case COMPILER:
return "COMPILER";
case OTHER:
return "OTHER";
default:
UNREACHABLE();
return NULL;
}
}
VMState::VMState(StateTag state) : disabled_(true), external_callback_(NULL) {
if (!Logger::is_logging()) {
return;
}
disabled_ = false;
#if !defined(ENABLE_HEAP_PROTECTION)
// When not protecting the heap, there is no difference between
// EXTERNAL and OTHER. As an optimization in that case, we will not
// perform EXTERNAL->OTHER transitions through the API. We thus
// compress the two states into one.
if (state == EXTERNAL) state = OTHER;
#endif
state_ = state;
previous_ = Logger::current_state_;
Logger::current_state_ = this;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Entering", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Protect();
} else if (previous_->state_ == EXTERNAL) {
// We are entering V8.
Heap::Unprotect();
Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
Script* script) {
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
&& script->type()->value() == Script::TYPE_NATIVE) {
switch (tag) {
case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
case SCRIPT_TAG: return NATIVE_SCRIPT_TAG;
default: return tag;
}
} else {
return tag;
}
#endif
#else
return tag;
#endif // ENABLE_CPP_PROFILES_PROCESSOR
}
VMState::~VMState() {
if (disabled_) return;
Logger::current_state_ = previous_;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are reentering V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Unprotect();
} else if (previous_->state_ == EXTERNAL) {
// We are leaving V8.
Heap::Protect();
}
}
#endif
}
#endif
#endif // ENABLE_LOGGING_AND_PROFILING
} } // namespace v8::internal

48
deps/v8/src/log.cc

@ -143,15 +143,14 @@ bool Profiler::paused_ = false;
// StackTracer implementation
//
void StackTracer::Trace(TickSample* sample) {
if (sample->state == GC) {
sample->frames_count = 0;
return;
}
sample->function = NULL;
sample->frames_count = 0;
if (sample->state == GC) return;
const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
if (js_entry_sp == 0) {
// Not executing JS now.
sample->frames_count = 0;
return;
}
@ -163,8 +162,7 @@ void StackTracer::Trace(TickSample* sample) {
}
int i = 0;
const Address callback = Logger::current_state_ != NULL ?
Logger::current_state_->external_callback() : NULL;
const Address callback = VMState::external_callback();
if (callback != NULL) {
sample->stack[i++] = callback;
}
@ -324,8 +322,6 @@ void Profiler::Run() {
//
Ticker* Logger::ticker_ = NULL;
Profiler* Logger::profiler_ = NULL;
VMState* Logger::current_state_ = NULL;
VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL;
@ -1300,7 +1296,7 @@ void Logger::LogCodeObject(Object* object) {
tag = Logger::CALL_IC_TAG;
break;
}
LOG(CodeCreateEvent(tag, code_object, description));
PROFILE(CodeCreateEvent(tag, code_object, description));
}
}
@ -1334,17 +1330,20 @@ void Logger::LogCompiledFunctions() {
Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) {
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
shared->code(), *func_name,
*script_name, line_num + 1));
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
shared->code(), *func_name,
*script_name, line_num + 1));
} else {
// Can't distinguish enum and script here, so always use Script.
LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
shared->code(), *script_name));
// Can't distinguish eval and script here, so always use Script.
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
shared->code(), *script_name));
}
} else {
LOG(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
shared->code(), *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
@ -1354,10 +1353,10 @@ void Logger::LogCompiledFunctions() {
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
LOG(CallbackEvent(*func_name, entry_point));
PROFILE(CallbackEvent(*func_name, entry_point));
}
} else {
LOG(CodeCreateEvent(
PROFILE(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
}
}
@ -1373,7 +1372,7 @@ void Logger::LogFunctionObjects() {
if (!obj->IsJSFunction()) continue;
JSFunction* jsf = JSFunction::cast(obj);
if (!jsf->is_compiled()) continue;
LOG(FunctionCreateEvent(jsf));
PROFILE(FunctionCreateEvent(jsf));
}
}
@ -1388,11 +1387,11 @@ void Logger::LogAccessorCallbacks() {
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
if (getter_entry != 0) {
LOG(GetterCallbackEvent(name, getter_entry));
PROFILE(GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
LOG(SetterCallbackEvent(name, setter_entry));
PROFILE(SetterCallbackEvent(name, setter_entry));
}
}
}
@ -1475,7 +1474,7 @@ bool Logger::Setup() {
}
}
current_state_ = &bottom_state_;
ASSERT(VMState::is_outermost_external());
ticker_ = new Ticker(kSamplingIntervalMs);
@ -1558,5 +1557,4 @@ void Logger::EnableSlidingStateWindow() {
#endif
}
} } // namespace v8::internal

56
deps/v8/src/log.h

@ -87,32 +87,7 @@ class CompressionHelper;
#define LOG(Call) ((void) 0)
#endif
class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING
public:
inline VMState(StateTag state);
inline ~VMState();
StateTag state() { return state_; }
Address external_callback() { return external_callback_; }
void set_external_callback(Address external_callback) {
external_callback_ = external_callback;
}
private:
bool disabled_;
StateTag state_;
VMState* previous_;
Address external_callback_;
#else
public:
explicit VMState(StateTag state) {}
#endif
};
#define LOG_EVENTS_AND_TAGS_LIST(V) \
#define LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V) \
V(CODE_CREATION_EVENT, "code-creation", "cc") \
V(CODE_MOVE_EVENT, "code-move", "cm") \
V(CODE_DELETE_EVENT, "code-delete", "cd") \
@ -143,6 +118,18 @@ class VMState BASE_EMBEDDED {
V(STORE_IC_TAG, "StoreIC", "sic") \
V(STUB_TAG, "Stub", "s")
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
// Add 'NATIVE_' cases for functions and scripts, but map them to
// original tags when writing to the log.
#define LOG_EVENTS_AND_TAGS_LIST(V) \
LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V) \
V(NATIVE_FUNCTION_TAG, "Function", "f") \
V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile", "lc") \
V(NATIVE_SCRIPT_TAG, "Script", "sc")
#else
#define LOG_EVENTS_AND_TAGS_LIST(V) LOG_EVENTS_AND_TAGS_LIST_NO_NATIVES(V)
#endif
class Logger {
public:
#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
@ -260,10 +247,6 @@ class Logger {
static void LogRuntime(Vector<const char> format, JSArray* args);
#ifdef ENABLE_LOGGING_AND_PROFILING
static StateTag state() {
return current_state_ ? current_state_->state() : OTHER;
}
static bool is_logging() {
return logging_nesting_ > 0;
}
@ -288,6 +271,9 @@ class Logger {
// Used for logging stubs found in the snapshot.
static void LogCodeObjects();
// Converts tag to a corresponding NATIVE_... if the script is native.
INLINE(static LogEventsAndTags ToNativeByScript(LogEventsAndTags, Script*));
private:
// Profiler's sampling interval (in milliseconds).
@ -347,12 +333,6 @@ class Logger {
// of samples.
static Profiler* profiler_;
// A stack of VM states.
static VMState* current_state_;
// Singleton bottom or default vm state.
static VMState bottom_state_;
// SlidingStateWindow instance keeping a sliding window of the most
// recent VM states.
static SlidingStateWindow* sliding_state_window_;
@ -378,6 +358,8 @@ class Logger {
static int logging_nesting_;
static int cpu_profiler_nesting_;
static int heap_profiler_nesting_;
friend class CpuProfiler;
#else
static bool is_logging() { return false; }
#endif
@ -390,7 +372,7 @@ class StackTracer : public AllStatic {
static void Trace(TickSample* sample);
};
} } // namespace v8::internal
#endif // V8_LOG_H_

409
deps/v8/src/mark-compact.cc

@ -53,13 +53,13 @@ MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
// Counters used for debugging the marking phase of mark-compact or mark-sweep
// collection.
int MarkCompactCollector::live_bytes_ = 0;
int MarkCompactCollector::live_young_objects_ = 0;
int MarkCompactCollector::live_old_data_objects_ = 0;
int MarkCompactCollector::live_old_pointer_objects_ = 0;
int MarkCompactCollector::live_code_objects_ = 0;
int MarkCompactCollector::live_map_objects_ = 0;
int MarkCompactCollector::live_cell_objects_ = 0;
int MarkCompactCollector::live_lo_objects_ = 0;
int MarkCompactCollector::live_young_objects_size_ = 0;
int MarkCompactCollector::live_old_data_objects_size_ = 0;
int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
int MarkCompactCollector::live_code_objects_size_ = 0;
int MarkCompactCollector::live_map_objects_size_ = 0;
int MarkCompactCollector::live_cell_objects_size_ = 0;
int MarkCompactCollector::live_lo_objects_size_ = 0;
#endif
void MarkCompactCollector::CollectGarbage() {
@ -136,13 +136,13 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
#ifdef DEBUG
live_bytes_ = 0;
live_young_objects_ = 0;
live_old_pointer_objects_ = 0;
live_old_data_objects_ = 0;
live_code_objects_ = 0;
live_map_objects_ = 0;
live_cell_objects_ = 0;
live_lo_objects_ = 0;
live_young_objects_size_ = 0;
live_old_pointer_objects_size_ = 0;
live_old_data_objects_size_ = 0;
live_code_objects_size_ = 0;
live_map_objects_size_ = 0;
live_cell_objects_size_ = 0;
live_lo_objects_size_ = 0;
#endif
}
@ -742,21 +742,21 @@ static int CountMarkedCallback(HeapObject* obj) {
void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
live_bytes_ += obj->Size();
if (Heap::new_space()->Contains(obj)) {
live_young_objects_++;
live_young_objects_size_ += obj->Size();
} else if (Heap::map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_++;
live_map_objects_size_ += obj->Size();
} else if (Heap::cell_space()->Contains(obj)) {
ASSERT(obj->IsJSGlobalPropertyCell());
live_cell_objects_++;
live_cell_objects_size_ += obj->Size();
} else if (Heap::old_pointer_space()->Contains(obj)) {
live_old_pointer_objects_++;
live_old_pointer_objects_size_ += obj->Size();
} else if (Heap::old_data_space()->Contains(obj)) {
live_old_data_objects_++;
live_old_data_objects_size_ += obj->Size();
} else if (Heap::code_space()->Contains(obj)) {
live_code_objects_++;
live_code_objects_size_ += obj->Size();
} else if (Heap::lo_space()->Contains(obj)) {
live_lo_objects_++;
live_lo_objects_size_ += obj->Size();
} else {
UNREACHABLE();
}
@ -1068,31 +1068,210 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
}
static void SweepSpace(NewSpace* space) {
// We scavange new space simultaneously with sweeping. This is done in two
// passes.
// The first pass migrates all alive objects from one semispace to another or
// promotes them to old space. Forwading address is written directly into
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
// to encounter pointers to dead objects during traversal of remembered set for
// map space because remembered set bits corresponding to dead maps are cleared
// later during map space sweeping.
static void MigrateObject(Address dst, Address src, int size) {
Heap::CopyBlock(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
size);
Memory::Address_at(src) = dst;
}
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
public:
void VisitPointer(Object** p) {
UpdatePointer(p);
}
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
void VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
VisitPointer(&target);
rinfo->set_target_address(Code::cast(target)->instruction_start());
}
void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence());
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
VisitPointer(&target);
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
private:
void UpdatePointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
Address old_addr = obj->address();
if (Heap::new_space()->Contains(obj)) {
ASSERT(Heap::InFromSpace(*p));
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
}
}
};
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
static void UpdatePointerToNewGen(HeapObject** p) {
if (!(*p)->IsHeapObject()) return;
Address old_addr = (*p)->address();
ASSERT(Heap::InFromSpace(*p));
Address new_addr = Memory::Address_at(old_addr);
// Object pointed by *p is dead. Update is not required.
if (new_addr == NULL) return;
*p = HeapObject::FromAddress(new_addr);
}
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
Address old_addr = HeapObject::cast(*p)->address();
Address new_addr = Memory::Address_at(old_addr);
return String::cast(HeapObject::FromAddress(new_addr));
}
static bool TryPromoteObject(HeapObject* object, int object_size) {
Object* result;
if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
Heap::UpdateRSet(target);
return true;
}
} else {
OldSpace* target_space = Heap::TargetSpace(object);
ASSERT(target_space == Heap::old_pointer_space() ||
target_space == Heap::old_data_space());
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(), object->address(), object_size);
if (target_space == Heap::old_pointer_space()) {
Heap::UpdateRSet(target);
}
return true;
}
}
return false;
}
static void SweepNewSpace(NewSpace* space) {
Heap::CheckNewSpaceExpansionCriteria();
Address from_bottom = space->bottom();
Address from_top = space->top();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
space->Flip();
space->ResetAllocationInfo();
int size = 0;
int survivors_size = 0;
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses.
for (Address current = from_bottom; current < from_top; current += size) {
HeapObject* object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
size = object->Size();
survivors_size += size;
if (Heap::ShouldBePromoted(current, size) &&
TryPromoteObject(object, size)) {
continue;
}
// Promotion either failed or not required.
// Copy the content of the object.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
MigrateObject(HeapObject::cast(target)->address(), current, size);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
}
}
// Second pass: find pointers to new space and update them.
PointersToNewGenUpdatingVisitor updating_visitor;
// Update pointers in to space.
HeapObject* object;
for (Address current = space->bottom();
current < space->top();
current += object->Size()) {
object = HeapObject::FromAddress(current);
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
} else {
// We give non-live objects a map that will correctly give their size,
// since their existing map might not be live after the collection.
int size = object->Size();
if (size >= ByteArray::kHeaderSize) {
object->set_map(Heap::raw_unchecked_byte_array_map());
ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
} else {
ASSERT(size == kPointerSize);
object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
}
ASSERT(object->Size() == size);
object->IterateBody(object->map()->instance_type(),
object->Size(),
&updating_visitor);
}
// Update roots.
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
for (HeapObject* cell = cell_iterator.next();
cell != NULL;
cell = cell_iterator.next()) {
if (cell->IsJSGlobalPropertyCell()) {
Address value_address =
reinterpret_cast<Address>(cell) +
(JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
// The object is now unmarked for the call to Size() at the top of the
// loop.
}
// Update pointers from external string table.
Heap::UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
// All pointers were updated. Update auxiliary allocation info.
Heap::IncrementYoungSurvivorsCounter(survivors_size);
space->set_age_mark(space->top());
}
@ -1382,10 +1561,12 @@ class MapCompact {
ASSERT(FreeListNode::IsFreeListNode(vacant_map));
ASSERT(map_to_evacuate->IsMap());
memcpy(
reinterpret_cast<void*>(vacant_map->address()),
reinterpret_cast<void*>(map_to_evacuate->address()),
Map::kSize);
ASSERT(Map::kSize % 4 == 0);
Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
reinterpret_cast<Object**>(map_to_evacuate->address()),
Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
@ -1465,10 +1646,11 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
int live_maps = Heap::map_space()->Size() / Map::kSize;
ASSERT(live_map_objects_ == live_maps);
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
if (Heap::map_space()->NeedsCompaction(live_maps)) {
MapCompact map_compact(live_maps);
@ -1500,7 +1682,7 @@ int MarkCompactCollector::IterateLiveObjectsInRange(
Address start,
Address end,
HeapObjectCallback size_func) {
int live_objects = 0;
int live_objects_size = 0;
Address current = start;
while (current < end) {
uint32_t encoded_map = Memory::uint32_at(current);
@ -1509,11 +1691,12 @@ int MarkCompactCollector::IterateLiveObjectsInRange(
} else if (encoded_map == kMultiFreeEncoding) {
current += Memory::int_at(current + kIntSize);
} else {
live_objects++;
current += size_func(HeapObject::FromAddress(current));
int size = size_func(HeapObject::FromAddress(current));
current += size;
live_objects_size += size;
}
}
return live_objects;
return live_objects_size;
}
@ -1639,36 +1822,36 @@ void MarkCompactCollector::UpdatePointers() {
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&updating_visitor);
int live_maps = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
&UpdatePointersInOldObject);
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_codes = IterateLiveObjects(Heap::code_space(),
&UpdatePointersInOldObject);
int live_cells = IterateLiveObjects(Heap::cell_space(),
&UpdatePointersInOldObject);
int live_news = IterateLiveObjects(Heap::new_space(),
&UpdatePointersInNewObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
&UpdatePointersInOldObject);
int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
&UpdatePointersInOldObject);
int live_codes_size = IterateLiveObjects(Heap::code_space(),
&UpdatePointersInOldObject);
int live_cells_size = IterateLiveObjects(Heap::cell_space(),
&UpdatePointersInOldObject);
int live_news_size = IterateLiveObjects(Heap::new_space(),
&UpdatePointersInNewObject);
// Large objects do not move, the map word can be updated directly.
LargeObjectIterator it(Heap::lo_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
UpdatePointersInNewObject(obj);
USE(live_maps);
USE(live_pointer_olds);
USE(live_data_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
USE(live_maps_size);
USE(live_pointer_olds_size);
USE(live_data_olds_size);
USE(live_codes_size);
USE(live_cells_size);
USE(live_news_size);
ASSERT(live_maps_size == live_map_objects_size_);
ASSERT(live_data_olds_size == live_old_data_objects_size_);
ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
ASSERT(live_codes_size == live_code_objects_size_);
ASSERT(live_cells_size == live_cell_objects_size_);
ASSERT(live_news_size == live_young_objects_size_);
}
@ -1783,27 +1966,31 @@ void MarkCompactCollector::RelocateObjects() {
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
&RelocateOldPointerObject);
int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
&RelocateOldDataObject);
int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
USE(live_maps);
USE(live_data_olds);
USE(live_pointer_olds);
USE(live_codes);
USE(live_cells);
USE(live_news);
ASSERT(live_maps == live_map_objects_);
ASSERT(live_data_olds == live_old_data_objects_);
ASSERT(live_pointer_olds == live_old_pointer_objects_);
ASSERT(live_codes == live_code_objects_);
ASSERT(live_cells == live_cell_objects_);
ASSERT(live_news == live_young_objects_);
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&RelocateMapObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
&RelocateOldPointerObject);
int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
&RelocateOldDataObject);
int live_codes_size = IterateLiveObjects(Heap::code_space(),
&RelocateCodeObject);
int live_cells_size = IterateLiveObjects(Heap::cell_space(),
&RelocateCellObject);
int live_news_size = IterateLiveObjects(Heap::new_space(),
&RelocateNewObject);
USE(live_maps_size);
USE(live_pointer_olds_size);
USE(live_data_olds_size);
USE(live_codes_size);
USE(live_cells_size);
USE(live_news_size);
ASSERT(live_maps_size == live_map_objects_size_);
ASSERT(live_data_olds_size == live_old_data_objects_size_);
ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
ASSERT(live_codes_size == live_code_objects_size_);
ASSERT(live_cells_size == live_cell_objects_size_);
ASSERT(live_news_size == live_young_objects_size_);
// Flip from and to spaces
Heap::new_space()->Flip();
@ -1821,6 +2008,9 @@ void MarkCompactCollector::RelocateObjects() {
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
Heap::CheckNewSpaceExpansionCriteria();
Heap::IncrementYoungSurvivorsCounter(live_news_size);
}
@ -1840,7 +2030,10 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, Map::kSize); // copy contents
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
Map::kSize);
}
#ifdef DEBUG
@ -1896,14 +2089,17 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // Copy contents
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
LOG(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
return obj_size;
@ -1940,7 +2136,10 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
Address old_addr = obj->address();
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // Copy contents.
// Move contents.
Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@ -1948,7 +2147,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
// May also update inline cache target.
Code::cast(copied_to)->Relocate(new_addr - old_addr);
// Notify the logger that compiled code has moved.
LOG(CodeMoveEvent(old_addr, new_addr));
PROFILE(CodeMoveEvent(old_addr, new_addr));
}
return obj_size;
@ -1976,9 +2175,9 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
memcpy(reinterpret_cast<void*>(new_addr),
reinterpret_cast<void*>(old_addr),
obj_size);
Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
reinterpret_cast<Object**>(old_addr),
obj_size);
#ifdef DEBUG
if (FLAG_gc_verbose) {
@ -1988,7 +2187,7 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
LOG(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionMoveEvent(old_addr, new_addr));
}
return obj_size;
@ -2010,9 +2209,9 @@ void MarkCompactCollector::RebuildRSets() {
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
LOG(CodeDeleteEvent(obj->address()));
PROFILE(CodeDeleteEvent(obj->address()));
} else if (obj->IsJSFunction()) {
LOG(FunctionDeleteEvent(obj->address()));
PROFILE(FunctionDeleteEvent(obj->address()));
}
#endif
}

28
deps/v8/src/mark-compact.h

@ -407,26 +407,26 @@ class MarkCompactCollector: public AllStatic {
// Counters used for debugging the marking phase of mark-compact or
// mark-sweep collection.
// Number of live objects in Heap::to_space_.
static int live_young_objects_;
// Size of live objects in Heap::to_space_.
static int live_young_objects_size_;
// Number of live objects in Heap::old_pointer_space_.
static int live_old_pointer_objects_;
// Size of live objects in Heap::old_pointer_space_.
static int live_old_pointer_objects_size_;
// Number of live objects in Heap::old_data_space_.
static int live_old_data_objects_;
// Size of live objects in Heap::old_data_space_.
static int live_old_data_objects_size_;
// Number of live objects in Heap::code_space_.
static int live_code_objects_;
// Size of live objects in Heap::code_space_.
static int live_code_objects_size_;
// Number of live objects in Heap::map_space_.
static int live_map_objects_;
// Size of live objects in Heap::map_space_.
static int live_map_objects_size_;
// Number of live objects in Heap::cell_space_.
static int live_cell_objects_;
// Size of live objects in Heap::cell_space_.
static int live_cell_objects_size_;
// Number of live objects in Heap::lo_space_.
static int live_lo_objects_;
// Size of live objects in Heap::lo_space_.
static int live_lo_objects_size_;
// Number of live bytes in this collection.
static int live_bytes_;

2
deps/v8/src/math.js

@ -165,7 +165,7 @@ function MathPow(x, y) {
// ECMA 262 - 15.8.2.14
function MathRandom() {
return %_RandomPositiveSmi() / 0x40000000;
return %_RandomHeapNumber();
}
// ECMA 262 - 15.8.2.15

24
deps/v8/src/messages.js

@ -432,6 +432,30 @@ Script.prototype.lineCount = function() {
};
/**
* Returns the name of script if available, contents of sourceURL comment
* otherwise. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
* for details on using //@ sourceURL comment to identify scritps that don't
* have name.
*
* @return {?string} script name if present, value for //@ sourceURL comment
* otherwise.
*/
Script.prototype.nameOrSourceURL = function() {
if (this.name)
return this.name;
// TODO(608): the spaces in a regexp below had to be escaped as \040
// because this file is being processed by js2c whose handling of spaces
// in regexps is broken. Also, ['"] are excluded from allowed URLs to
// avoid matches against sources that invoke evals with sourceURL.
var sourceUrlPattern =
/\/\/@[\040\t]sourceURL=[\040\t]*([^\s'"]*)[\040\t]*$/m;
var match = sourceUrlPattern.exec(this.source);
return match ? match[1] : this.name;
}
/**
* Class for source location. A source location is a position within some
* source with the following properties:

2
deps/v8/src/mips/codegen-mips.cc

@ -890,7 +890,7 @@ void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}

2
deps/v8/src/mips/codegen-mips.h

@ -352,7 +352,7 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);

16
deps/v8/src/mips/debug-mips.cc

@ -104,8 +104,24 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
}
void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
masm->Abort("LiveEdit frame dropping is not supported on mips");
}
#undef __
void Debug::SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
Handle<Code> code) {
UNREACHABLE();
}
const int Debug::kFrameDropperFrameSize = -1;
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

26
deps/v8/src/mirror-debugger.js

@ -1728,8 +1728,7 @@ ScriptMirror.prototype.value = function() {
ScriptMirror.prototype.name = function() {
// If we have name, we trust it more than sourceURL from comments
return this.script_.name || this.sourceUrlFromComment_();
return this.script_.name || this.script_.nameOrSourceURL();
};
@ -1824,29 +1823,6 @@ ScriptMirror.prototype.toText = function() {
}
/**
* Returns a suggested script URL from comments in script code (if found),
* undefined otherwise. Used primarily by debuggers for identifying eval()'ed
* scripts. See
* http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt
* for details.
*
* @return {?string} value for //@ sourceURL comment
*/
ScriptMirror.prototype.sourceUrlFromComment_ = function() {
if (!('sourceUrl_' in this) && this.source()) {
// TODO(608): the spaces in a regexp below had to be escaped as \040
// because this file is being processed by js2c whose handling of spaces
// in regexps is broken.
// We're not using \s here to prevent \n from matching.
var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m;
var match = sourceUrlPattern.exec(this.source());
this.sourceUrl_ = match ? match[1] : undefined;
}
return this.sourceUrl_;
};
/**
* Mirror object for context.
* @param {Object} data The context data

27
deps/v8/src/objects.h

@ -3667,6 +3667,13 @@ class JSRegExp: public JSObject {
FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
static const int kIrregexpCaptureCountOffset =
FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
// In-object fields.
static const int kSourceFieldIndex = 0;
static const int kGlobalFieldIndex = 1;
static const int kIgnoreCaseFieldIndex = 2;
static const int kMultilineFieldIndex = 3;
static const int kLastIndexFieldIndex = 4;
};
@ -4625,6 +4632,26 @@ class JSArray: public JSObject {
};
// JSRegExpResult is just a JSArray with a specific initial map.
// This initial map adds in-object properties for "index" and "input"
// properties, as assigned by RegExp.prototype.exec, which allows
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
class JSRegExpResult: public JSArray {
public:
// Offsets of object fields.
static const int kIndexOffset = JSArray::kSize;
static const int kInputOffset = kIndexOffset + kPointerSize;
static const int kSize = kInputOffset + kPointerSize;
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSRegExpResult);
};
// An accessor must have a getter, but can have no setter.
//
// When setting a property, V8 searches accessors in prototypes.

2
deps/v8/src/platform-freebsd.cc

@ -569,7 +569,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
// We always sample the VM state.
sample.state = Logger::state();
sample.state = VMState::current_state();
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {

41
deps/v8/src/platform-linux.cc

@ -727,44 +727,49 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
if (signal != SIGPROF) return;
if (active_sampler_ == NULL) return;
TickSample sample;
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.
sample.state = Logger::state();
sample->state = VMState::current_state();
// If profiling, we extract the current pc and sp.
if (active_sampler_->IsProfiling()) {
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
mcontext_t& mcontext = ucontext->uc_mcontext;
#if V8_HOST_ARCH_IA32
sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
#elif V8_HOST_ARCH_X64
sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
#elif V8_HOST_ARCH_ARM
// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
#else
sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#elif V8_HOST_ARCH_MIPS
// Implement this on MIPS.
UNIMPLEMENTED();
#endif
if (IsVmThread())
active_sampler_->SampleStack(&sample);
if (IsVmThread()) {
active_sampler_->SampleStack(sample);
}
}
active_sampler_->Tick(&sample);
active_sampler_->Tick(sample);
#endif
}

27
deps/v8/src/platform-macos.cc

@ -544,13 +544,17 @@ class Sampler::PlatformData : public Malloced {
// Sampler thread handler.
void Runner() {
// Loop until the sampler is disengaged.
while (sampler_->IsActive()) {
TickSample sample;
// Loop until the sampler is disengaged, keeping the specified samling freq.
for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.
sample.state = Logger::state();
sample->state = VMState::current_state();
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& KERN_SUCCESS == thread_suspend(profiled_thread_)) {
@ -580,19 +584,16 @@ class Sampler::PlatformData : public Malloced {
flavor,
reinterpret_cast<natural_t*>(&state),
&count) == KERN_SUCCESS) {
sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(&sample);
sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
sampler_->SampleStack(sample);
}
thread_resume(profiled_thread_);
}
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
// Wait until next sampling.
usleep(sampler_->interval_ * 1000);
sampler_->Tick(sample);
}
}
};

2
deps/v8/src/platform-openbsd.cc

@ -542,7 +542,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
TickSample sample;
// We always sample the VM state.
sample.state = Logger::state();
sample.state = VMState::current_state();
active_sampler_->Tick(&sample);
}

2
deps/v8/src/platform-solaris.cc

@ -533,7 +533,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
sample.fp = 0;
// We always sample the VM state.
sample.state = Logger::state();
sample.state = VMState::current_state();
active_sampler_->Tick(&sample);
}

33
deps/v8/src/platform-win32.cc

@ -1803,37 +1803,38 @@ class Sampler::PlatformData : public Malloced {
// Context used for sampling the register state of the profiled thread.
CONTEXT context;
memset(&context, 0, sizeof(context));
// Loop until the sampler is disengaged.
while (sampler_->IsActive()) {
TickSample sample;
// Loop until the sampler is disengaged, keeping the specified samling freq.
for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
TickSample sample_obj;
TickSample* sample = NULL;
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
sample = CpuProfiler::TickSampleEvent();
#endif
if (sample == NULL) sample = &sample_obj;
// We always sample the VM state.
sample.state = Logger::state();
sample->state = VMState::current_state();
// If profiling, we record the pc and sp of the profiled thread.
if (sampler_->IsProfiling()
&& SuspendThread(profiled_thread_) != (DWORD)-1) {
context.ContextFlags = CONTEXT_FULL;
if (GetThreadContext(profiled_thread_, &context) != 0) {
#if V8_HOST_ARCH_X64
sample.pc = reinterpret_cast<Address>(context.Rip);
sample.sp = reinterpret_cast<Address>(context.Rsp);
sample.fp = reinterpret_cast<Address>(context.Rbp);
sample->pc = reinterpret_cast<Address>(context.Rip);
sample->sp = reinterpret_cast<Address>(context.Rsp);
sample->fp = reinterpret_cast<Address>(context.Rbp);
#else
sample.pc = reinterpret_cast<Address>(context.Eip);
sample.sp = reinterpret_cast<Address>(context.Esp);
sample.fp = reinterpret_cast<Address>(context.Ebp);
sample->pc = reinterpret_cast<Address>(context.Eip);
sample->sp = reinterpret_cast<Address>(context.Esp);
sample->fp = reinterpret_cast<Address>(context.Ebp);
#endif
sampler_->SampleStack(&sample);
sampler_->SampleStack(sample);
}
ResumeThread(profiled_thread_);
}
// Invoke tick handler with program counter and stack pointer.
sampler_->Tick(&sample);
// Wait until next sampling.
Sleep(sampler_->interval_);
sampler_->Tick(sample);
}
}
};

8
deps/v8/src/platform.h

@ -516,18 +516,18 @@ class Socket {
class TickSample {
public:
TickSample()
: pc(NULL),
: state(OTHER),
pc(NULL),
sp(NULL),
fp(NULL),
function(NULL),
state(OTHER),
frames_count(0) {}
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address function; // The last called JS function.
StateTag state; // The state of the VM.
static const int kMaxFramesCount = 100;
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.
};

54
deps/v8/src/profile-generator-inl.h

@ -28,27 +28,34 @@
#ifndef V8_PROFILE_GENERATOR_INL_H_
#define V8_PROFILE_GENERATOR_INL_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "profile-generator.h"
namespace v8 {
namespace internal {
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
const char* resource_name,
int line_number)
: tag_(tag),
: call_uid_(next_call_uid_++),
tag_(tag),
name_prefix_(name_prefix),
name_(name),
resource_name_(resource_name),
line_number_(line_number) {
}
bool CodeEntry::is_js_function() {
return tag_ == Logger::FUNCTION_TAG
|| tag_ == Logger::LAZY_COMPILE_TAG
|| tag_ == Logger::SCRIPT_TAG;
bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
return tag == Logger::FUNCTION_TAG
|| tag == Logger::LAZY_COMPILE_TAG
|| tag == Logger::SCRIPT_TAG
|| tag == Logger::NATIVE_FUNCTION_TAG
|| tag == Logger::NATIVE_LAZY_COMPILE_TAG
|| tag == Logger::NATIVE_SCRIPT_TAG;
}
@ -76,6 +83,41 @@ void CodeMap::DeleteCode(Address addr) {
}
bool CpuProfilesCollection::is_last_profile() {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
return current_profiles_.length() == 1;
}
const char* CpuProfilesCollection::GetFunctionName(String* name) {
return GetFunctionName(GetName(name));
}
const char* CpuProfilesCollection::GetFunctionName(const char* name) {
return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName;
}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
case GC:
return gc_entry_;
case JS:
case COMPILER:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
case OTHER:
case EXTERNAL:
return program_entry_;
default: return NULL;
}
}
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // V8_PROFILE_GENERATOR_INL_H_

247
deps/v8/src/profile-generator.cc

@ -25,15 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "v8.h"
#include "profile-generator-inl.h"
#include "../include/v8-profiler.h"
namespace v8 {
namespace internal {
const char* CodeEntry::kEmptyNamePrefix = "";
unsigned CodeEntry::next_call_uid_ = 1;
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
HashMap::Entry* map_entry =
children_.Lookup(entry, CodeEntryHash(entry), false);
@ -47,26 +54,23 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
children_.Lookup(entry, CodeEntryHash(entry), true);
if (map_entry->value == NULL) {
// New node added.
map_entry->value = new ProfileNode(entry);
ProfileNode* new_node = new ProfileNode(entry);
map_entry->value = new_node;
children_list_.Add(new_node);
}
return reinterpret_cast<ProfileNode*>(map_entry->value);
}
void ProfileNode::GetChildren(List<ProfileNode*>* children) {
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
children->Add(reinterpret_cast<ProfileNode*>(p->value));
}
}
void ProfileNode::Print(int indent) {
OS::Print("%4u %4u %*c %s\n",
OS::Print("%5u %5u %*c %s%s",
total_ticks_, self_ticks_,
indent, ' ',
entry_ != NULL ? entry_->name() : "");
entry_->name_prefix(),
entry_->name());
if (entry_->resource_name()[0] != '\0')
OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
OS::Print("\n");
for (HashMap::Entry* p = children_.Start();
p != NULL;
p = children_.Next(p)) {
@ -89,6 +93,12 @@ class DeleteNodesCallback {
} // namespace
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG, "", "(root)", "", 0),
root_(new ProfileNode(&root_entry_)) {
}
ProfileTree::~ProfileTree() {
DeleteNodesCallback cb;
TraverseBreadthFirstPostOrder(&cb);
@ -123,39 +133,46 @@ void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
namespace {
struct Position {
Position(ProfileNode* a_node, HashMap::Entry* a_p)
: node(a_node), p(a_p) { }
class Position {
public:
explicit Position(ProfileNode* node)
: node(node), child_idx_(0) { }
INLINE(ProfileNode* current_child()) {
return reinterpret_cast<ProfileNode*>(p->value);
return node->children()->at(child_idx_);
}
INLINE(bool has_current_child()) {
return child_idx_ < node->children()->length();
}
INLINE(void next_child()) { ++child_idx_; }
ProfileNode* node;
HashMap::Entry* p;
private:
int child_idx_;
};
} // namespace
// Non-recursive implementation of breadth-first post-order tree traversal.
template <typename Callback>
void ProfileTree::TraverseBreadthFirstPostOrder(Callback* callback) {
List<Position> stack(10);
stack.Add(Position(root_, root_->children_.Start()));
stack.Add(Position(root_));
do {
Position& current = stack.last();
if (current.p != NULL) {
stack.Add(Position(current.current_child(),
current.current_child()->children_.Start()));
if (current.has_current_child()) {
stack.Add(Position(current.current_child()));
} else {
callback->AfterAllChildrenTraversed(current.node);
if (stack.length() > 1) {
Position& parent = stack[stack.length() - 2];
callback->AfterChildTraversed(parent.node, current.node);
parent.p = parent.node->children_.Next(parent.p);
parent.next_child();
// Remove child from the stack.
stack.RemoveLast();
}
}
} while (stack.length() > 1 || stack.last().p != NULL);
} while (stack.length() > 1 || stack.last().has_current_child());
}
@ -175,7 +192,6 @@ class CalculateTotalTicksCallback {
} // namespace
// Non-recursive implementation of breadth-first tree traversal.
void ProfileTree::CalculateTotalTicks() {
CalculateTotalTicksCallback cb;
TraverseBreadthFirstPostOrder(&cb);
@ -242,8 +258,22 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
}
void CodeMap::CodeTreePrinter::Call(
const Address& key, const CodeMap::CodeEntryInfo& value) {
OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
}
void CodeMap::Print() {
CodeTreePrinter printer;
tree_.ForEach(&printer);
}
CpuProfilesCollection::CpuProfilesCollection()
: function_and_resource_names_(StringsMatch) {
: function_and_resource_names_(StringsMatch),
profiles_uids_(CpuProfilesMatch),
current_profiles_semaphore_(OS::CreateSemaphore(1)) {
}
@ -262,6 +292,8 @@ static void DeleteCpuProfile(CpuProfile** profile_ptr) {
CpuProfilesCollection::~CpuProfilesCollection() {
delete current_profiles_semaphore_;
current_profiles_.Iterate(DeleteCpuProfile);
profiles_.Iterate(DeleteCpuProfile);
code_entries_.Iterate(DeleteCodeEntry);
args_count_names_.Iterate(DeleteArgsCountName);
@ -273,8 +305,63 @@ CpuProfilesCollection::~CpuProfilesCollection() {
}
void CpuProfilesCollection::AddProfile(unsigned uid) {
profiles_.Add(new CpuProfile());
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
ASSERT(uid > 0);
current_profiles_semaphore_->Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
if (strcmp(current_profiles_[i]->title(), title) == 0) {
// Ignore attempts to start profile with the same title.
current_profiles_semaphore_->Signal();
return false;
}
}
current_profiles_.Add(new CpuProfile(title, uid));
current_profiles_semaphore_->Signal();
return true;
}
bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) {
return StartProfiling(GetName(title), uid);
}
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
current_profiles_semaphore_->Wait();
for (int i = current_profiles_.length() - 1; i >= 0; --i) {
if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
profile = current_profiles_.Remove(i);
break;
}
}
current_profiles_semaphore_->Signal();
if (profile != NULL) {
profile->CalculateTotalTicks();
profiles_.Add(profile);
HashMap::Entry* entry =
profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
static_cast<uint32_t>(profile->uid()),
true);
ASSERT(entry->value == NULL);
entry->value = profile;
}
return profile;
}
CpuProfile* CpuProfilesCollection::StopProfiling(String* title) {
return StopProfiling(GetName(title));
}
CpuProfile* CpuProfilesCollection::GetProfile(unsigned uid) {
HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
static_cast<uint32_t>(uid),
false);
return entry != NULL ? reinterpret_cast<CpuProfile*>(entry->value) : NULL;
}
@ -283,7 +370,8 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
String* resource_name,
int line_number) {
CodeEntry* entry = new CodeEntry(tag,
GetName(name),
CodeEntry::kEmptyNamePrefix,
GetFunctionName(name),
GetName(resource_name),
line_number);
code_entries_.Add(entry);
@ -293,7 +381,24 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name) {
CodeEntry* entry = new CodeEntry(tag, name, "", 0);
CodeEntry* entry = new CodeEntry(tag,
CodeEntry::kEmptyNamePrefix,
GetFunctionName(name),
"",
v8::CpuProfileNode::kNoLineNumberInfo);
code_entries_.Add(entry);
return entry;
}
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
String* name) {
CodeEntry* entry = new CodeEntry(tag,
name_prefix,
GetName(name),
"",
v8::CpuProfileNode::kNoLineNumberInfo);
code_entries_.Add(entry);
return entry;
}
@ -301,7 +406,11 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count) {
CodeEntry* entry = new CodeEntry(tag, GetName(args_count), "", 0);
CodeEntry* entry = new CodeEntry(tag,
"args_count: ",
GetName(args_count),
"",
v8::CpuProfileNode::kNoLineNumberInfo);
code_entries_.Add(entry);
return entry;
}
@ -337,48 +446,80 @@ const char* CpuProfilesCollection::GetName(int args_count) {
if (args_count_names_[args_count] == NULL) {
const int kMaximumNameLength = 32;
char* name = NewArray<char>(kMaximumNameLength);
OS::SNPrintF(Vector<char>(name, kMaximumNameLength),
"args_count: %d", args_count);
OS::SNPrintF(Vector<char>(name, kMaximumNameLength), "%d", args_count);
args_count_names_[args_count] = name;
}
return args_count_names_[args_count];
}
void CpuProfilesCollection::AddPathToCurrentProfiles(
const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_->Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
current_profiles_[i]->AddPath(path);
}
current_profiles_semaphore_->Signal();
}
const char* ProfileGenerator::kAnonymousFunctionName = "(anonymous function)";
const char* ProfileGenerator::kProgramEntryName = "(program)";
const char* ProfileGenerator::kGarbageCollectorEntryName =
"(garbage collector)";
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
: profiles_(profiles) {
: profiles_(profiles),
program_entry_(
profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
gc_entry_(
profiles->NewCodeEntry(Logger::BUILTIN_TAG,
kGarbageCollectorEntryName)) {
}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Allocate space for stack frames + pc + function.
ScopedVector<CodeEntry*> entries(sample.frames_count + 2);
// Allocate space for stack frames + pc + function + vm-state.
ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
// As actual number of decoded code entries may vary, initialize
// entries vector with NULL values.
CodeEntry** entry = entries.start();
*entry++ = code_map_.FindEntry(sample.pc);
memset(entry, 0, entries.length() * sizeof(*entry));
if (sample.pc != NULL) {
*entry++ = code_map_.FindEntry(sample.pc);
if (sample.function != NULL) {
*entry = code_map_.FindEntry(sample.function);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
} else {
CodeEntry* pc_entry = *entries.start();
if (pc_entry == NULL || pc_entry->is_js_function())
if (sample.function != NULL) {
*entry = code_map_.FindEntry(sample.function);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
} else {
CodeEntry* pc_entry = *entries.start();
if (pc_entry == NULL || pc_entry->is_js_function())
*entry = NULL;
}
entry++;
}
for (const Address *stack_pos = sample.stack,
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end;
++stack_pos) {
*entry++ = code_map_.FindEntry(*stack_pos);
}
entry++;
} else {
*entry++ = NULL;
}
for (const Address *stack_pos = sample.stack,
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end;
++stack_pos) {
*entry++ = code_map_.FindEntry(*stack_pos);
if (FLAG_prof_browser_mode) {
// Put VM state as the topmost entry.
*entry++ = EntryForVMState(sample.state);
}
profile()->AddPath(entries);
profiles_->AddPathToCurrentProfiles(entries);
}
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR

104
deps/v8/src/profile-generator.h

@ -28,29 +28,44 @@
#ifndef V8_PROFILE_GENERATOR_H_
#define V8_PROFILE_GENERATOR_H_
#ifdef ENABLE_CPP_PROFILES_PROCESSOR
#include "hashmap.h"
namespace v8 {
namespace internal {
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag_,
const char* name_,
const char* resource_name_,
int line_number_));
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
const char* resource_name,
int line_number));
INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
INLINE(const char* name_prefix() const) { return name_prefix_; }
INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; }
INLINE(const char* name() const) { return name_; }
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
INLINE(unsigned call_uid() const) { return call_uid_; }
INLINE(bool is_js_function());
INLINE(const char* name()) { return name_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
static const char* kEmptyNamePrefix;
private:
const unsigned call_uid_;
Logger::LogEventsAndTags tag_;
const char* name_prefix_;
const char* name_;
const char* resource_name_;
int line_number_;
static unsigned next_call_uid_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@ -67,7 +82,7 @@ class ProfileNode {
INLINE(CodeEntry* entry() const) { return entry_; }
INLINE(unsigned total_ticks() const) { return total_ticks_; }
INLINE(unsigned self_ticks() const) { return self_ticks_; }
void GetChildren(List<ProfileNode*>* children);
INLINE(const List<ProfileNode*>* children() const) { return &children_list_; }
void Print(int indent);
@ -85,23 +100,22 @@ class ProfileNode {
unsigned self_ticks_;
// CodeEntry* -> ProfileNode*
HashMap children_;
friend class ProfileTree;
List<ProfileNode*> children_list_;
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
class ProfileTree BASE_EMBEDDED {
class ProfileTree {
public:
ProfileTree() : root_(new ProfileNode(NULL)) { }
ProfileTree();
~ProfileTree();
void AddPathFromEnd(const Vector<CodeEntry*>& path);
void AddPathFromStart(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
ProfileNode* root() { return root_; }
ProfileNode* root() const { return root_; }
void ShortPrint();
void Print() {
@ -112,6 +126,7 @@ class ProfileTree BASE_EMBEDDED {
template <typename Callback>
void TraverseBreadthFirstPostOrder(Callback* callback);
CodeEntry root_entry_;
ProfileNode* root_;
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
@ -120,18 +135,24 @@ class ProfileTree BASE_EMBEDDED {
class CpuProfile {
public:
CpuProfile() { }
CpuProfile(const char* title, unsigned uid)
: title_(title), uid_(uid) { }
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
INLINE(ProfileTree* top_down()) { return &top_down_; }
INLINE(ProfileTree* bottom_up()) { return &bottom_up_; }
INLINE(const char* title() const) { return title_; }
INLINE(unsigned uid() const) { return uid_; }
INLINE(const ProfileTree* top_down() const) { return &top_down_; }
INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; }
void ShortPrint();
void Print();
private:
const char* title_;
unsigned uid_;
ProfileTree top_down_;
ProfileTree bottom_up_;
@ -139,7 +160,7 @@ class CpuProfile {
};
class CodeMap BASE_EMBEDDED {
class CodeMap {
public:
CodeMap() { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
@ -148,6 +169,8 @@ class CodeMap BASE_EMBEDDED {
void AddAlias(Address alias, Address addr);
CodeEntry* FindEntry(Address addr);
void Print();
private:
struct CodeEntryInfo {
CodeEntryInfo(CodeEntry* an_entry, unsigned a_size)
@ -167,6 +190,11 @@ class CodeMap BASE_EMBEDDED {
};
typedef SplayTree<CodeTreeConfig> CodeTree;
class CodeTreePrinter {
public:
void Call(const Address& key, const CodeEntryInfo& value);
};
CodeTree tree_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
@ -178,16 +206,27 @@ class CpuProfilesCollection {
CpuProfilesCollection();
~CpuProfilesCollection();
void AddProfile(unsigned uid);
bool StartProfiling(const char* title, unsigned uid);
bool StartProfiling(String* title, unsigned uid);
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String* title);
INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
CpuProfile* GetProfile(unsigned uid);
inline bool is_last_profile();
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix, String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
INLINE(CpuProfile* profile()) { return profiles_.last(); }
// Called from profile generator thread.
void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
private:
INLINE(const char* GetFunctionName(String* name));
INLINE(const char* GetFunctionName(const char* name));
const char* GetName(String* name);
const char* GetName(int args_count);
@ -196,12 +235,22 @@ class CpuProfilesCollection {
reinterpret_cast<char*>(key2)) == 0;
}
INLINE(static bool CpuProfilesMatch(void* key1, void* key2)) {
return key1 == key2;
}
// String::Hash -> const char*
HashMap function_and_resource_names_;
// args_count -> char*
List<char*> args_count_names_;
List<CodeEntry*> code_entries_;
List<CpuProfile*> profiles_;
// uid -> CpuProfile*
HashMap profiles_uids_;
// Accessed by VM thread and profile generator thread.
List<CpuProfile*> current_profiles_;
Semaphore* current_profiles_semaphore_;
DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
};
@ -223,6 +272,12 @@ class ProfileGenerator {
return profiles_->NewCodeEntry(tag, name);
}
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
String* name)) {
return profiles_->NewCodeEntry(tag, name_prefix, name);
}
INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
int args_count)) {
return profiles_->NewCodeEntry(tag, args_count);
@ -232,16 +287,23 @@ class ProfileGenerator {
INLINE(CodeMap* code_map()) { return &code_map_; }
static const char* kAnonymousFunctionName;
static const char* kProgramEntryName;
static const char* kGarbageCollectorEntryName;
private:
INLINE(CpuProfile* profile()) { return profiles_->profile(); }
INLINE(CodeEntry* EntryForVMState(StateTag tag));
CpuProfilesCollection* profiles_;
CodeMap code_map_;
CodeEntry* program_entry_;
CodeEntry* gc_entry_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};
} } // namespace v8::internal
#endif // ENABLE_CPP_PROFILES_PROCESSOR
#endif // V8_PROFILE_GENERATOR_H_

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save