Browse Source

Upgrade V8 to 2.4.2

v0.7.4-release
Ryan Dahl 15 years ago
parent
commit
8796ed2278
  1. 73
      deps/v8/ChangeLog
  2. 14
      deps/v8/SConstruct
  3. 32
      deps/v8/include/v8-profiler.h
  4. 40
      deps/v8/include/v8.h
  5. 918
      deps/v8/node_cygwin_patch.diff
  6. 5
      deps/v8/src/SConscript
  7. 324
      deps/v8/src/SConscript.orig
  8. 6
      deps/v8/src/accessors.h
  9. 55
      deps/v8/src/api.cc
  10. 76
      deps/v8/src/arm/assembler-arm.cc
  11. 15
      deps/v8/src/arm/assembler-arm.h
  12. 29
      deps/v8/src/arm/builtins-arm.cc
  13. 4688
      deps/v8/src/arm/code-stubs-arm.cc
  14. 491
      deps/v8/src/arm/code-stubs-arm.h
  15. 5309
      deps/v8/src/arm/codegen-arm.cc
  16. 522
      deps/v8/src/arm/codegen-arm.h
  17. 27
      deps/v8/src/arm/constants-arm.h
  18. 97
      deps/v8/src/arm/debug-arm.cc
  19. 22
      deps/v8/src/arm/disasm-arm.cc
  20. 73
      deps/v8/src/arm/frames-arm.cc
  21. 5
      deps/v8/src/arm/frames-arm.h
  22. 784
      deps/v8/src/arm/full-codegen-arm.cc
  23. 111
      deps/v8/src/arm/ic-arm.cc
  24. 263
      deps/v8/src/arm/macro-assembler-arm.cc
  25. 31
      deps/v8/src/arm/macro-assembler-arm.h
  26. 18
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  27. 16
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  28. 89
      deps/v8/src/arm/simulator-arm.cc
  29. 141
      deps/v8/src/arm/stub-cache-arm.cc
  30. 15
      deps/v8/src/array.js
  31. 3
      deps/v8/src/ast-inl.h
  32. 627
      deps/v8/src/ast.cc
  33. 371
      deps/v8/src/ast.h
  34. 11
      deps/v8/src/bootstrapper.cc
  35. 115
      deps/v8/src/builtins.cc
  36. 7
      deps/v8/src/builtins.h
  37. 12
      deps/v8/src/char-predicates-inl.h
  38. 5
      deps/v8/src/circular-queue.cc
  39. 622
      deps/v8/src/code-stubs.h
  40. 27
      deps/v8/src/codegen.cc
  41. 619
      deps/v8/src/codegen.h
  42. 45
      deps/v8/src/compilation-cache.cc
  43. 4
      deps/v8/src/compilation-cache.h
  44. 78
      deps/v8/src/compiler.cc
  45. 8
      deps/v8/src/contexts.h
  46. 9
      deps/v8/src/conversions.cc
  47. 6
      deps/v8/src/conversions.h
  48. 18
      deps/v8/src/cpu-profiler.cc
  49. 2
      deps/v8/src/d8.cc
  50. 278
      deps/v8/src/data-flow.cc
  51. 76
      deps/v8/src/data-flow.h
  52. 16
      deps/v8/src/date.js
  53. 6
      deps/v8/src/dateparser.h
  54. 16
      deps/v8/src/debug.cc
  55. 16
      deps/v8/src/debug.h
  56. 7
      deps/v8/src/disassembler.cc
  57. 2
      deps/v8/src/execution.cc
  58. 2
      deps/v8/src/flag-definitions.h
  59. 2
      deps/v8/src/flags.h
  60. 763
      deps/v8/src/flow-graph.cc
  61. 180
      deps/v8/src/flow-graph.h
  62. 15
      deps/v8/src/frames-inl.h
  63. 215
      deps/v8/src/frames.cc
  64. 102
      deps/v8/src/frames.h
  65. 750
      deps/v8/src/full-codegen.cc
  66. 215
      deps/v8/src/full-codegen.h
  67. 14
      deps/v8/src/func-name-inferrer.cc
  68. 54
      deps/v8/src/func-name-inferrer.h
  69. 12
      deps/v8/src/globals.h
  70. 2
      deps/v8/src/handles.cc
  71. 3
      deps/v8/src/heap-inl.h
  72. 440
      deps/v8/src/heap-profiler.cc
  73. 64
      deps/v8/src/heap-profiler.h
  74. 209
      deps/v8/src/heap.cc
  75. 195
      deps/v8/src/heap.h
  76. 148
      deps/v8/src/ia32/builtins-ia32.cc
  77. 4615
      deps/v8/src/ia32/code-stubs-ia32.cc
  78. 376
      deps/v8/src/ia32/code-stubs-ia32.h
  79. 4805
      deps/v8/src/ia32/codegen-ia32.cc
  80. 338
      deps/v8/src/ia32/codegen-ia32.h
  81. 87
      deps/v8/src/ia32/debug-ia32.cc
  82. 64
      deps/v8/src/ia32/frames-ia32.cc
  83. 1224
      deps/v8/src/ia32/full-codegen-ia32.cc
  84. 54
      deps/v8/src/ia32/ic-ia32.cc
  85. 259
      deps/v8/src/ia32/macro-assembler-ia32.cc
  86. 43
      deps/v8/src/ia32/macro-assembler-ia32.h
  87. 2
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  88. 27
      deps/v8/src/ia32/stub-cache-ia32.cc
  89. 4
      deps/v8/src/ia32/virtual-frame-ia32.cc
  90. 4
      deps/v8/src/ic-inl.h
  91. 6
      deps/v8/src/ic.cc
  92. 2
      deps/v8/src/ic.h
  93. 16
      deps/v8/src/json.js
  94. 11
      deps/v8/src/jsregexp.cc
  95. 4
      deps/v8/src/jump-target-heavy.h
  96. 50
      deps/v8/src/liveedit.cc
  97. 4
      deps/v8/src/log.cc
  98. 27
      deps/v8/src/macro-assembler.h
  99. 2
      deps/v8/src/macros.py
  100. 315
      deps/v8/src/mark-compact.cc

73
deps/v8/ChangeLog

@ -1,3 +1,76 @@
2010-09-08: Version 2.4.2
Fixed GC crash bug.
Fixed stack corruption bug.
Fixed compilation for newer C++ compilers that found Operand(0)
ambiguous.
2010-09-06: Version 2.4.1
Added the ability for an embedding application to receive a callback
when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates
(V8::RemoveMemoryAllocationCallback) from the OS.
Fixed several JSON bugs (including issue 855).
Fixed memory overrun crash bug triggered during V8's tick-based
profiling.
Performance improvements on all platforms.
2010-09-01: Version 2.4.0
Fix bug in Object.freeze and Object.seal when Array.prototype or
Object.prototype is changed (issue 842).
Update Array.splice to follow Safari and Firefox when called
with zero arguments.
Fix a missing live register when breaking at keyed loads on ARM.
Performance improvements on all platforms.
2010-08-25: Version 2.3.11
Fix bug in RegExp related to copy-on-write arrays.
Refactoring of tools/test.py script, including the introduction of
VARIANT_FLAGS that allows specification of sets of flags with which
all tests should be run.
Fix a bug in the handling of debug breaks in CallIC.
Performance improvements on all platforms.
2010-08-23: Version 2.3.10
Fix bug in bitops on ARM.
Build fixes for unusual compilers.
Track high water mark for RWX memory.
Performance improvements on all platforms.
2010-08-18: Version 2.3.9
Fix compilation for ARMv4 on OpenBSD/FreeBSD.
Removed specialized handling of GCC 4.4 (issue 830).
Fixed DST cache to take into account the suspension of DST in
Egypt during the 2010 Ramadan (issue http://crbug.com/51855).
Performance improvements on all platforms.
2010-08-16: Version 2.3.8 2010-08-16: Version 2.3.8
Fixed build with strict aliasing on GCC 4.4 (issue 463). Fixed build with strict aliasing on GCC 4.4 (issue 463).

14
deps/v8/SConstruct

@ -54,15 +54,8 @@ if ARM_TARGET_LIB:
else: else:
ARM_LINK_FLAGS = [] ARM_LINK_FLAGS = []
# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4 GCC_EXTRA_CCFLAGS = []
# on linux we need these compiler flags to avoid crashes in the v8 test suite GCC_DTOA_EXTRA_CCFLAGS = []
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = []
else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv7-a', ANDROID_FLAGS = ['-march=armv7-a',
'-mtune=cortex-a8', '-mtune=cortex-a8',
@ -299,6 +292,7 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
@ -672,7 +666,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')' 'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
}, },
'os': { 'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS, 'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')' 'help': 'the os to build for (' + OS_GUESS + ')'
}, },

32
deps/v8/include/v8-profiler.h

@ -260,10 +260,17 @@ class V8EXPORT HeapGraphNode {
/** /**
* Returns node id. For the same heap object, the id remains the same * Returns node id. For the same heap object, the id remains the same
* across all snapshots. * across all snapshots. Not applicable to aggregated heap snapshots
* as they only contain aggregated instances.
*/ */
uint64_t GetId() const; uint64_t GetId() const;
/**
* Returns the number of instances. Only applicable to aggregated
* heap snapshots.
*/
int GetInstancesCount() const;
/** Returns node's own size, in bytes. */ /** Returns node's own size, in bytes. */
int GetSelfSize() const; int GetSelfSize() const;
@ -313,6 +320,15 @@ class V8EXPORT HeapSnapshotsDiff {
*/ */
class V8EXPORT HeapSnapshot { class V8EXPORT HeapSnapshot {
public: public:
enum Type {
kFull = 0, // Heap snapshot with all instances and references.
kAggregated = 1 // Snapshot doesn't contain individual heap entries,
//instead they are grouped by constructor name.
};
/** Returns heap snapshot type. */
Type GetType() const;
/** Returns heap snapshot UID (assigned by the profiler.) */ /** Returns heap snapshot UID (assigned by the profiler.) */
unsigned GetUid() const; unsigned GetUid() const;
@ -322,7 +338,10 @@ class V8EXPORT HeapSnapshot {
/** Returns the root node of the heap graph. */ /** Returns the root node of the heap graph. */
const HeapGraphNode* GetRoot() const; const HeapGraphNode* GetRoot() const;
/** Returns a diff between this snapshot and another one. */ /**
* Returns a diff between this snapshot and another one. Only snapshots
* of the same type can be compared.
*/
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const; const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
}; };
@ -341,8 +360,13 @@ class V8EXPORT HeapProfiler {
/** Returns a profile by uid. */ /** Returns a profile by uid. */
static const HeapSnapshot* FindSnapshot(unsigned uid); static const HeapSnapshot* FindSnapshot(unsigned uid);
/** Takes a heap snapshot and returns it. Title may be an empty string. */ /**
static const HeapSnapshot* TakeSnapshot(Handle<String> title); * Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
*/
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull);
}; };

40
deps/v8/include/v8.h

@ -1763,8 +1763,6 @@ class V8EXPORT AccessorInfo {
typedef Handle<Value> (*InvocationCallback)(const Arguments& args); typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
/** /**
* NamedProperty[Getter|Setter] are used as interceptors on object. * NamedProperty[Getter|Setter] are used as interceptors on object.
* See ObjectTemplate::SetNamedPropertyHandler. * See ObjectTemplate::SetNamedPropertyHandler.
@ -2361,6 +2359,30 @@ typedef void* (*CreateHistogramCallback)(const char* name,
typedef void (*AddHistogramSampleCallback)(void* histogram, int sample); typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
// --- M e m o r y A l l o c a t i o n C a l l b a c k ---
enum ObjectSpace {
kObjectSpaceNewSpace = 1 << 0,
kObjectSpaceOldPointerSpace = 1 << 1,
kObjectSpaceOldDataSpace = 1 << 2,
kObjectSpaceCodeSpace = 1 << 3,
kObjectSpaceMapSpace = 1 << 4,
kObjectSpaceLoSpace = 1 << 5,
kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
kObjectSpaceLoSpace
};
enum AllocationAction {
kAllocationActionAllocate = 1 << 0,
kAllocationActionFree = 1 << 1,
kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree
};
typedef void (*MemoryAllocationCallback)(ObjectSpace space,
AllocationAction action,
int size);
// --- F a i l e d A c c e s s C h e c k C a l l b a c k --- // --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
typedef void (*FailedAccessCheckCallback)(Local<Object> target, typedef void (*FailedAccessCheckCallback)(Local<Object> target,
AccessType type, AccessType type,
@ -2580,6 +2602,20 @@ class V8EXPORT V8 {
*/ */
static void SetGlobalGCEpilogueCallback(GCCallback); static void SetGlobalGCEpilogueCallback(GCCallback);
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action);
/**
* This function removes callback which was installed by
* AddMemoryAllocationCallback function.
*/
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/** /**
* Allows the host application to group objects together. If one * Allows the host application to group objects together. If one
* object in the group is alive, all objects in the group are alive. * object in the group is alive, all objects in the group are alive.

918
deps/v8/node_cygwin_patch.diff

@ -1,918 +0,0 @@
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 7219e9d..b8de1b8 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -670,7 +670,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 8466a0c..9ff3414 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -206,6 +206,7 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
+ 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
new file mode 100644
index 0000000..34410e8
--- /dev/null
+++ b/deps/v8/src/platform-cygwin.cc
@@ -0,0 +1,858 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
+#ifdef __GLIBC__
+#include <execinfo.h> // backtrace, backtrace_symbols
+#endif // def __GLIBC__
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+#include "top.h"
+#include "v8threads.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on Linux since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ return ceil(x);
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we
+ // have VFP3 instructions. If gcc can assume it then so can we.
+ return 1u << VFP3;
+#elif CAN_USE_ARMV7_INSTRUCTIONS
+ return 1u << ARMv7;
+#else
+ return 0; // Linux runs on anything.
+#endif
+}
+
+
+#ifdef __arm__
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of VFP at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to ARM (mid 2009), no similar
+ // facility is universally available on the ARM architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+ switch (feature) {
+ case VFP3:
+ search_string = "vfp";
+ break;
+ case ARMv7:
+ search_string = "ARMv7";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
+}
+#endif // def __arm__
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
+ return 8;
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#endif
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
+}
+
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ // An x86 store acts as a release barrier.
+ *ptr = value;
+}
+
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on Cywin.
+}
+
+
+double OS::LocalTimeOffset() {
+ //
+ // On Cygwin, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ mprotect(address, size, prot);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+ unsigned int ms = static_cast<unsigned int>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
+// which is the architecture of generated code).
+#if (defined(__arm__) || defined(__thumb__)) && \
+ defined(CAN_USE_ARMV5_INSTRUCTIONS)
+ asm("bkpt 0");
+#elif defined(__mips__)
+ asm("break");
+#else
+ asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
+ while (true) {
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ LOG(SharedLibraryEvent(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
+ }
+ free(lib_name);
+ fclose(fp);
+#endif
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ // backtrace is a glibc extension.
+#ifdef __GLIBC__
+ int frames_size = frames.length();
+ ScopedVector<void*> addresses(frames_size);
+
+ int frames_count = backtrace(addresses.start(), frames_size);
+
+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
+ if (symbols == NULL) {
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+ "%s",
+ symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ free(symbols);
+
+ return frames_count;
+#else // ndef __GLIBC__
+ return 0;
+#endif // ndef __GLIBC__
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+
+#ifdef HAS_MAP_FIXED
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+#else
+ if (mprotect(address, size, prot) != 0) {
+ return false;
+ }
+#endif
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class CygwinMutex : public Mutex {
+ public:
+
+ CygwinMutex() {
+ pthread_mutexattr_t attrs;
+ memset(&attrs, 0, sizeof(attrs));
+
+ int result = pthread_mutexattr_init(&attrs);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attrs);
+ ASSERT(result == 0);
+ }
+
+ virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ return result;
+ }
+
+ virtual int Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ return result;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new CygwinMutex();
+}
+
+
+class CygwinSemaphore : public Semaphore {
+ public:
+ explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait();
+ virtual bool Wait(int timeout);
+ virtual void Signal() { sem_post(&sem_); }
+ private:
+ sem_t sem_;
+};
+
+
+void CygwinSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+} while (false)
+#endif
+
+
+bool CygwinSemaphore::Wait(int timeout) {
+ const long kOneSecondMicros = 1000000; // NOLINT
+
+ // Split timeout into second and nanosecond parts.
+ struct timeval delta;
+ delta.tv_usec = timeout % kOneSecondMicros;
+ delta.tv_sec = timeout / kOneSecondMicros;
+
+ struct timeval current_time;
+ // Get the current time.
+ if (gettimeofday(&current_time, NULL) == -1) {
+ return false;
+ }
+
+ // Calculate time for end of timeout.
+ struct timeval end_time;
+ timeradd(&current_time, &delta, &end_time);
+
+ struct timespec ts;
+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&sem_, &ts);
+ if (result == 0) return true; // Successfully got semaphore.
+ if (result > 0) {
+ // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
+ errno = result;
+ result = -1;
+ }
+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new CygwinSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+static pthread_t vm_thread_ = 0;
+
+
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
+// Android runs a fairly new Linux kernel, so signal info is there,
+// but the C library doesn't have the structs defined.
+
+struct sigcontext {
+ uint32_t trap_no;
+ uint32_t error_code;
+ uint32_t oldmask;
+ uint32_t gregs[16];
+ uint32_t arm_cpsr;
+ uint32_t fault_address;
+};
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ __sigset_t uc_sigmask;
+} ucontext_t;
+enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
+
+#endif
+
+
+// A function that determines if a signal handler is called in the context
+// of a VM thread.
+//
+// The problem is that SIGPROF signal can be delivered to an arbitrary thread
+// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
+// So, if the signal is being handled in the context of a non-VM thread,
+// it means that the VM thread is running, and trying to sample its stack can
+// cause a crash.
+static inline bool IsVmThread() {
+ // In the case of a single VM thread, this check is enough.
+ if (pthread_equal(pthread_self(), vm_thread_)) return true;
+ // If there are multiple threads that use VM, they must have a thread id
+ // stored in TLS. To verify that the thread is really executing VM,
+ // we check Top's data. Having that ThreadManager::RestoreThread first
+ // restores ThreadLocalTop from TLS, and only then erases the TLS value,
+ // reading Top::thread_id() should not be affected by races.
+ if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
+ ThreadManager::CurrentId() == Top::thread_id()) {
+ return true;
+ }
+ return false;
+}
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+#ifndef V8_HOST_ARCH_MIPS
+ USE(info);
+ if (signal != SIGPROF) return;
+ if (active_sampler_ == NULL) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ // We always sample the VM state.
+ sample->state = VMState::current_state();
+
+#if 0
+ // If profiling, we extract the current pc and sp.
+ if (active_sampler_->IsProfiling()) {
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+#else
+ sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+#endif
+#elif V8_HOST_ARCH_MIPS
+ // Implement this on MIPS.
+ UNIMPLEMENTED();
+#endif
+ if (IsVmThread()) {
+ active_sampler_->SampleStack(sample);
+ }
+ }
+#endif
+
+ active_sampler_->Tick(sample);
+#endif
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+ : interval_(interval), profiling_(profiling), active_(false) {
+ data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ vm_thread_ = pthread_self();
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void Sampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index d63ca5e..1091ba6 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -360,7 +360,11 @@ class ThreadHandle {
class Thread: public ThreadHandle {
public:
// Opaque data type for thread-local storage keys.
+#ifndef __CYGWIN__
enum LocalStorageKey {};
+#else
+ typedef void *LocalStorageKey;
+#endif
// Create new thread.
Thread();
diff --git a/deps/v8/tools/utils.py b/deps/v8/tools/utils.py
index 3a55722..505c398 100644
--- a/deps/v8/tools/utils.py
+++ b/deps/v8/tools/utils.py
@@ -59,6 +59,8 @@ def GuessOS():
return 'openbsd'
elif id == 'SunOS':
return 'solaris'
+ elif id.find('CYGWIN') >= 0:
+ return 'cygwin'
else:
return None

5
deps/v8/src/SConscript

@ -62,7 +62,6 @@ SOURCES = {
execution.cc execution.cc
factory.cc factory.cc
flags.cc flags.cc
flow-graph.cc
frame-element.cc frame-element.cc
frames.cc frames.cc
full-codegen.cc full-codegen.cc
@ -121,6 +120,7 @@ SOURCES = {
jump-target-light.cc jump-target-light.cc
virtual-frame-light.cc virtual-frame-light.cc
arm/builtins-arm.cc arm/builtins-arm.cc
arm/code-stubs-arm.cc
arm/codegen-arm.cc arm/codegen-arm.cc
arm/constants-arm.cc arm/constants-arm.cc
arm/cpu-arm.cc arm/cpu-arm.cc
@ -159,6 +159,7 @@ SOURCES = {
virtual-frame-heavy.cc virtual-frame-heavy.cc
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
ia32/code-stubs-ia32.cc
ia32/codegen-ia32.cc ia32/codegen-ia32.cc
ia32/cpu-ia32.cc ia32/cpu-ia32.cc
ia32/debug-ia32.cc ia32/debug-ia32.cc
@ -178,6 +179,7 @@ SOURCES = {
virtual-frame-heavy.cc virtual-frame-heavy.cc
x64/assembler-x64.cc x64/assembler-x64.cc
x64/builtins-x64.cc x64/builtins-x64.cc
x64/code-stubs-x64.cc
x64/codegen-x64.cc x64/codegen-x64.cc
x64/cpu-x64.cc x64/cpu-x64.cc
x64/debug-x64.cc x64/debug-x64.cc
@ -200,7 +202,6 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'], 'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'], 'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'], 'os:win32': ['platform-win32.cc'],
'mode:release': [], 'mode:release': [],

324
deps/v8/src/SConscript.orig

@ -1,324 +0,0 @@
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import join, dirname, abspath
root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
SOURCES = {
'all': Split("""
accessors.cc
allocation.cc
api.cc
assembler.cc
ast.cc
bootstrapper.cc
builtins.cc
checks.cc
circular-queue.cc
code-stubs.cc
codegen.cc
compilation-cache.cc
compiler.cc
contexts.cc
conversions.cc
counters.cc
cpu-profiler.cc
data-flow.cc
dateparser.cc
debug-agent.cc
debug.cc
disassembler.cc
diy-fp.cc
dtoa.cc
execution.cc
factory.cc
flags.cc
flow-graph.cc
frame-element.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
global-handles.cc
fast-dtoa.cc
fixed-dtoa.cc
handles.cc
hashmap.cc
heap-profiler.cc
heap.cc
ic.cc
interpreter-irregexp.cc
jsregexp.cc
jump-target.cc
liveedit.cc
log-utils.cc
log.cc
mark-compact.cc
messages.cc
objects.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
profile-generator.cc
property.cc
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
register-allocator.cc
rewriter.cc
runtime.cc
scanner.cc
scopeinfo.cc
scopes.cc
serialize.cc
snapshot-common.cc
spaces.cc
string-stream.cc
stub-cache.cc
token.cc
top.cc
type-info.cc
unicode.cc
utils.cc
v8-counters.cc
v8.cc
v8threads.cc
variables.cc
version.cc
virtual-frame.cc
vm-state.cc
zone.cc
"""),
'arch:arm': Split("""
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
arm/codegen-arm.cc
arm/constants-arm.cc
arm/cpu-arm.cc
arm/debug-arm.cc
arm/disasm-arm.cc
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/disasm-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
ia32/codegen-ia32.cc
ia32/cpu-ia32.cc
ia32/debug-ia32.cc
ia32/disasm-ia32.cc
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
ia32/register-allocator-ia32.cc
ia32/stub-cache-ia32.cc
ia32/virtual-frame-ia32.cc
"""),
'arch:x64': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
x64/codegen-x64.cc
x64/cpu-x64.cc
x64/debug-x64.cc
x64/disasm-x64.cc
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
x64/stub-cache-x64.cc
x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
]
}
D8_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
],
'os:linux': [
'd8-posix.cc'
],
'os:macos': [
'd8-posix.cc'
],
'os:android': [
'd8-posix.cc'
],
'os:freebsd': [
'd8-posix.cc'
],
'os:openbsd': [
'd8-posix.cc'
],
'os:solaris': [
'd8-posix.cc'
],
'os:win32': [
'd8-windows.cc'
],
'os:nullos': [
'd8-windows.cc' # Empty implementation at the moment.
],
'console:readline': [
'd8-readline.cc'
]
}
LIBRARY_FILES = '''
runtime.js
v8natives.js
array.js
string.js
uri.js
math.js
messages.js
apinatives.js
date.js
regexp.js
json.js
liveedit-debugger.js
mirror-debugger.js
debug-debugger.js
'''.split()
def Abort(message):
print message
sys.exit(1)
def ConfigureObjectFiles():
env = Environment()
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
d8_files = context.GetRelevantSources(D8_FILES)
d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
# Combine the JavaScript library files into a single C++ file and
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Build dtoa.
dtoa_env = env.Copy()
dtoa_env.Replace(**context.flags['dtoa'])
dtoa_files = ['dtoa-config.c']
dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files)
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [dtoa_obj, source_objs]
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
# resulting snapshot.cc file from obj/release and put it in the src
# directory. Then rebuild the VM with the cross compiler and specify
# snapshot=nobuild on the scons command line.
empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc')
mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot'])
mksnapshot_src = 'mksnapshot.cc'
mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
if context.use_snapshot:
if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
else:
snapshot_cc = 'snapshot.cc'
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
return (library_objs, d8_objs, [mksnapshot])
(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
Return('library_objs d8_objs mksnapshot')

6
deps/v8/src/accessors.h

@ -75,8 +75,10 @@ class Accessors : public AllStatic {
}; };
// Accessor functions called directly from the runtime system. // Accessor functions called directly from the runtime system.
static Object* FunctionGetPrototype(Object* object, void*); MUST_USE_RESULT static Object* FunctionGetPrototype(Object* object, void*);
static Object* FunctionSetPrototype(JSObject* object, Object* value, void*); MUST_USE_RESULT static Object* FunctionSetPrototype(JSObject* object,
Object* value,
void*);
private: private:
// Accessor functions only used through the descriptor. // Accessor functions only used through the descriptor.
static Object* FunctionGetLength(Object* object, void*); static Object* FunctionGetLength(Object* object, void*);

55
deps/v8/src/api.cc

@ -1136,13 +1136,18 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
ScriptData* ScriptData::New(const char* data, int length) { ScriptData* ScriptData::New(const char* data, int length) {
// Return an empty ScriptData if the length is obviously invalid. // Return an empty ScriptData if the length is obviously invalid.
if (length % sizeof(unsigned) != 0) { if (length % sizeof(unsigned) != 0) {
return new i::ScriptDataImpl(i::Vector<unsigned>()); return new i::ScriptDataImpl();
} }
// Copy the data to ensure it is properly aligned. // Copy the data to ensure it is properly aligned.
int deserialized_data_length = length / sizeof(unsigned); int deserialized_data_length = length / sizeof(unsigned);
// If aligned, don't create a copy of the data.
if (reinterpret_cast<intptr_t>(data) % sizeof(unsigned) == 0) {
return new i::ScriptDataImpl(data, length);
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length); unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
memcpy(deserialized_data, data, length); i::MemCopy(deserialized_data, data, length);
return new i::ScriptDataImpl( return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length)); i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@ -3905,6 +3910,22 @@ void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
} }
void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
ObjectSpace space,
AllocationAction action) {
if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return;
i::MemoryAllocator::AddMemoryAllocationCallback(callback,
space,
action);
}
void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return;
i::MemoryAllocator::RemoveMemoryAllocationCallback(callback);
}
void V8::PauseProfiler() { void V8::PauseProfiler() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
PauseProfilerEx(PROFILER_MODULE_CPU); PauseProfilerEx(PROFILER_MODULE_CPU);
@ -4592,10 +4613,18 @@ Handle<String> HeapGraphNode::GetName() const {
uint64_t HeapGraphNode::GetId() const { uint64_t HeapGraphNode::GetId() const {
IsDeadCheck("v8::HeapGraphNode::GetId"); IsDeadCheck("v8::HeapGraphNode::GetId");
ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated);
return ToInternal(this)->id(); return ToInternal(this)->id();
} }
int HeapGraphNode::GetInstancesCount() const {
IsDeadCheck("v8::HeapGraphNode::GetInstancesCount");
ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated);
return static_cast<int>(ToInternal(this)->id());
}
int HeapGraphNode::GetSelfSize() const { int HeapGraphNode::GetSelfSize() const {
IsDeadCheck("v8::HeapGraphNode::GetSelfSize"); IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
return ToInternal(this)->self_size(); return ToInternal(this)->self_size();
@ -4677,6 +4706,12 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
} }
HeapSnapshot::Type HeapSnapshot::GetType() const {
IsDeadCheck("v8::HeapSnapshot::GetType");
return static_cast<HeapSnapshot::Type>(ToInternal(this)->type());
}
unsigned HeapSnapshot::GetUid() const { unsigned HeapSnapshot::GetUid() const {
IsDeadCheck("v8::HeapSnapshot::GetUid"); IsDeadCheck("v8::HeapSnapshot::GetUid");
return ToInternal(this)->uid(); return ToInternal(this)->uid();
@ -4724,10 +4759,22 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) {
} }
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title) { const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type) {
IsDeadCheck("v8::HeapProfiler::TakeSnapshot"); IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
switch (type) {
case HeapSnapshot::kFull:
internal_type = i::HeapSnapshot::kFull;
break;
case HeapSnapshot::kAggregated:
internal_type = i::HeapSnapshot::kAggregated;
break;
default:
UNREACHABLE();
}
return reinterpret_cast<const HeapSnapshot*>( return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title))); i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
} }
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING

76
deps/v8/src/arm/assembler-arm.cc

@ -1809,6 +1809,7 @@ void Assembler::stc2(Coprocessor coproc,
// Support for VFP. // Support for VFP.
void Assembler::vldr(const DwVfpRegister dst, void Assembler::vldr(const DwVfpRegister dst,
const Register base, const Register base,
int offset, int offset,
@ -1820,6 +1821,7 @@ void Assembler::vldr(const DwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0); ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256); ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255)); 0xB*B8 | ((offset / 4) & 255));
} }
@ -1836,7 +1838,10 @@ void Assembler::vldr(const SwVfpRegister dst,
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0); ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256); ASSERT((offset / 4) < 256);
emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255)); 0xA*B8 | ((offset / 4) & 255));
} }
@ -1852,11 +1857,31 @@ void Assembler::vstr(const DwVfpRegister src,
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0); ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256); ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255)); 0xB*B8 | ((offset / 4) & 255));
} }
void Assembler::vstr(const SwVfpRegister src,
const Register base,
int offset,
const Condition cond) {
// MEM(Rbase + offset) = SSrc.
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i; uint64_t i;
memcpy(&i, &d, 8); memcpy(&i, &d, 8);
@ -1959,8 +1984,10 @@ void Assembler::vmov(const SwVfpRegister dst,
// Sd = Sm // Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642. // Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 | 0xB*B20 | int sd, d, sm, m;
dst.code()*B12 | 0x5*B9 | B6 | src.code()); dst.split_code(&sd, &d);
src.split_code(&sm, &m);
emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
} }
@ -2014,8 +2041,9 @@ void Assembler::vmov(const SwVfpRegister dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!src.is(pc)); ASSERT(!src.is(pc));
emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 | int sn, n;
src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4); dst.split_code(&sn, &n);
emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
} }
@ -2028,8 +2056,9 @@ void Assembler::vmov(const Register dst,
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(!dst.is(pc)); ASSERT(!dst.is(pc));
emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 | int sn, n;
dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4); src.split_code(&sn, &n);
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
} }
@ -2079,16 +2108,21 @@ static bool IsDoubleVFPType(VFPType type) {
} }
// Depending on split_last_bit split binary representation of reg_code into Vm:M // Split five bit reg_code based on size of reg_type.
// or M:Vm form (where M is single bit). // 32-bit register codes are Vm:M
static void SplitRegCode(bool split_last_bit, // 64-bit register codes are M:Vm
// where Vm is four bits, and M is a single bit.
static void SplitRegCode(VFPType reg_type,
int reg_code, int reg_code,
int* vm, int* vm,
int* m) { int* m) {
if (split_last_bit) { ASSERT((reg_code >= 0) && (reg_code <= 31));
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
// 32 bit type.
*m = reg_code & 0x1; *m = reg_code & 0x1;
*vm = reg_code >> 1; *vm = reg_code >> 1;
} else { } else {
// 64 bit type.
*m = (reg_code & 0x10) >> 4; *m = (reg_code & 0x10) >> 4;
*vm = reg_code & 0x0F; *vm = reg_code & 0x0F;
} }
@ -2101,6 +2135,11 @@ static Instr EncodeVCVT(const VFPType dst_type,
const VFPType src_type, const VFPType src_type,
const int src_code, const int src_code,
const Condition cond) { const Condition cond) {
ASSERT(src_type != dst_type);
int D, Vd, M, Vm;
SplitRegCode(src_type, src_code, &Vm, &M);
SplitRegCode(dst_type, dst_code, &Vd, &D);
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
// Conversion between IEEE floating point and 32-bit integer. // Conversion between IEEE floating point and 32-bit integer.
// Instruction details available in ARM DDI 0406B, A8.6.295. // Instruction details available in ARM DDI 0406B, A8.6.295.
@ -2108,22 +2147,17 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
int sz, opc2, D, Vd, M, Vm, op; int sz, opc2, op;
if (IsIntegerVFPType(dst_type)) { if (IsIntegerVFPType(dst_type)) {
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
op = 1; // round towards zero op = 1; // round towards zero
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
SplitRegCode(true, dst_code, &Vd, &D);
} else { } else {
ASSERT(IsIntegerVFPType(src_type)); ASSERT(IsIntegerVFPType(src_type));
opc2 = 0x0; opc2 = 0x0;
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
op = IsSignedVFPType(src_type) ? 0x1 : 0x0; op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(true, src_code, &Vm, &M);
SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D);
} }
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
@ -2133,13 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type,
// Instruction details available in ARM DDI 0406B, A8.6.298. // Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) | // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int sz, D, Vd, M, Vm; int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type));
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D);
SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M);
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm); Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
} }

15
deps/v8/src/arm/assembler-arm.h

@ -120,6 +120,11 @@ struct SwVfpRegister {
ASSERT(is_valid()); ASSERT(is_valid());
return 1 << code_; return 1 << code_;
} }
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = code_ & 0x1;
*vm = code_ >> 1;
}
int code_; int code_;
}; };
@ -152,6 +157,11 @@ struct DwVfpRegister {
ASSERT(is_valid()); ASSERT(is_valid());
return 1 << code_; return 1 << code_;
} }
void split_code(int* vm, int* m) const {
ASSERT(is_valid());
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
int code_; int code_;
}; };
@ -966,6 +976,11 @@ class Assembler : public Malloced {
int offset, // Offset must be a multiple of 4. int offset, // Offset must be a multiple of 4.
const Condition cond = al); const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
int offset, // Offset must be a multiple of 4.
const Condition cond = al);
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
double imm, double imm,
const Condition cond = al); const Condition cond = al);

29
deps/v8/src/arm/builtins-arm.cc

@ -125,7 +125,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset)); __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later. // Field JSArray::kElementsOffset is initialized later.
__ mov(scratch3, Operand(0)); __ mov(scratch3, Operand(0, RelocInfo::NONE));
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset)); __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
// Calculate the location of the elements array and set elements array member // Calculate the location of the elements array and set elements array member
@ -311,7 +311,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
Label argc_one_or_more, argc_two_or_more; Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one. // Check for array construction with zero arguments or one.
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &argc_one_or_more); __ b(ne, &argc_one_or_more);
// Handle construction of an empty array. // Handle construction of an empty array.
@ -481,6 +481,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
} }
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// TODO(849): implement custom construct stub.
// Generate a copy of the generic stub for now.
Generate_JSConstructStubGeneric(masm);
}
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0 : number of arguments // -- r0 : number of arguments
@ -505,12 +512,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// r0: number of arguments // r0: number of arguments
// r1: called object // r1: called object
__ bind(&non_function_call); __ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Set expected number of arguments to zero (not changing r0). // Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0)); __ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
@ -840,7 +843,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r7, cp may be clobbered // r5-r7, cp may be clobbered
// Clear the context before we push it when entering the JS frame. // Clear the context before we push it when entering the JS frame.
__ mov(cp, Operand(0)); __ mov(cp, Operand(0, RelocInfo::NONE));
// Enter an internal frame. // Enter an internal frame.
__ EnterInternalFrame(); __ EnterInternalFrame();
@ -1027,7 +1030,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ str(r1, MemOperand(r2, -kPointerSize)); __ str(r1, MemOperand(r2, -kPointerSize));
// Clear r1 to indicate a non-function being called. // Clear r1 to indicate a non-function being called.
__ mov(r1, Operand(0)); __ mov(r1, Operand(0, RelocInfo::NONE));
// 4. Shift arguments and return address one slot down on the stack // 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make // (overwriting the original receiver). Adjust argument count to make
@ -1057,7 +1060,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function; { Label function;
__ tst(r1, r1); __ tst(r1, r1);
__ b(ne, &function); __ b(ne, &function);
__ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION // Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
@ -1073,8 +1077,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r2, __ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize)); __ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset)); __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts. __ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, ne); RelocInfo::CODE_TARGET, ne);
@ -1121,7 +1124,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push current limit and index. // Push current limit and index.
__ bind(&okay); __ bind(&okay);
__ push(r0); // limit __ push(r0); // limit
__ mov(r1, Operand(0)); // initial index __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
__ push(r1); __ push(r1);
// Change context eagerly to get the right global object if necessary. // Change context eagerly to get the right global object if necessary.

4688
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

491
deps/v8/src/arm/code-stubs-arm.h

@ -0,0 +1,491 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_
#include "ic-inl.h"
namespace v8 {
namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub {
public:
static const int kUnknownIntValue = -1;
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = kUnknownIntValue)
: op_(op),
mode_(mode),
lhs_(lhs),
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
static const int kKnownRhsKeyBits = 6;
// Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 2> {};
class RegisterBits: public BitField<bool, 10, 1> {};
class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(r0));
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == kUnknownIntValue) return false;
if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
if (op == Token::MOD) {
if (constant_rhs <= 1) return false;
if (constant_rhs <= 10) return true;
if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
return false;
}
return false;
}
int MinorKeyForKnownInt() {
if (!specialized_on_rhs_) return 0;
if (constant_rhs_ <= 10) return constant_rhs_ + 1;
ASSERT(IsPowerOf2(constant_rhs_));
int key = 12;
int d = constant_rhs_;
while ((d & 1) == 0) {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
}
Register LhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r0 : r1;
}
Register RhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r1 : r0;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
const char* GetName();
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {
PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
} else {
PrintF("GenericBinaryOpStub (%s by %d)\n",
Token::String(op_),
constant_rhs_);
}
}
#endif
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
// Compare two flat ASCII strings and returns result in r0.
// Does not use the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
// This stub can do a fast mod operation without using fp.
// It is tail called from the GenericBinaryOpStub and it always
// returns an answer. It never causes GC so it doesn't need a real frame.
//
// The inputs are always positive Smis. This is never called
// where the denominator is a power of 2. We handle that separately.
//
// If we consider the denominator as an odd number multiplied by a power of 2,
// then:
// * The exponent (power of 2) is in the shift_distance register.
// * The odd number is in the odd_number register. It is always in the range
// of 3 to 25.
// * The bits from the numerator that are to be copied to the answer (there are
// shift_distance of them) are in the mask_bits register.
// * The other bits of the numerator have been shifted down and are in the lhs
// register.
class IntegerModStub : public CodeStub {
public:
IntegerModStub(Register result,
Register shift_distance,
Register odd_number,
Register mask_bits,
Register lhs,
Register scratch)
: result_(result),
shift_distance_(shift_distance),
odd_number_(odd_number),
mask_bits_(mask_bits),
lhs_(lhs),
scratch_(scratch) {
// We don't code these in the minor key, so they should always be the same.
// We don't really want to fix that since this stub is rather large and we
// don't want many copies of it.
ASSERT(shift_distance_.is(r9));
ASSERT(odd_number_.is(r4));
ASSERT(mask_bits_.is(r3));
ASSERT(scratch_.is(r5));
}
private:
Register result_;
Register shift_distance_;
Register odd_number_;
Register mask_bits_;
Register lhs_;
Register scratch_;
// Minor key encoding in 16 bits.
class ResultRegisterBits: public BitField<int, 0, 4> {};
class LhsRegisterBits: public BitField<int, 4, 4> {};
Major MajorKey() { return IntegerMod; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return ResultRegisterBits::encode(result_.code())
| LhsRegisterBits::encode(lhs_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "IntegerModStub"; }
// Utility functions.
void DigitSum(MacroAssembler* masm,
Register lhs,
int mask,
int shift,
Label* entry);
void DigitSum(MacroAssembler* masm,
Register lhs,
Register scratch,
int mask,
int shift1,
int shift2,
Label* entry);
void ModGetInRangeBySubtraction(MacroAssembler* masm,
Register lhs,
int shift,
int rhs);
void ModReduce(MacroAssembler* masm,
Register lhs,
int max,
int denominator);
void ModAnswer(MacroAssembler* masm,
Register result,
Register shift_distance,
Register mask_bits,
Register sum_of_digits);
#ifdef DEBUG
void Print() { PrintF("IntegerModStub\n"); }
#endif
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register offset, Register scratch)
: object_(object), offset_(offset), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register offset_;
Register scratch_;
// Minor key encoding in 12 bits. 4 bits for each of the three
// registers (object, offset and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class OffsetBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
OffsetBits::encode(offset_.code()) |
ScratchBits::encode(scratch_.code());
}
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
" (scratch reg %d)\n",
object_.code(), offset_.code(), scratch_.code());
}
#endif
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "RegExpCEntryStub"; }
};
} } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_

5309
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

522
deps/v8/src/arm/codegen-arm.h

@ -28,8 +28,9 @@
#ifndef V8_ARM_CODEGEN_ARM_H_ #ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_
#include "ic-inl.h"
#include "ast.h" #include "ast.h"
#include "code-stubs-arm.h"
#include "ic-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -270,15 +271,13 @@ class CodeGenerator: public AstVisitor {
void AddDeferred(DeferredCode* code) { deferred_.Add(code); } void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
static const int kUnknownIntValue = -1;
// If the name is an inline runtime function call return the number of // If the name is an inline runtime function call return the number of
// expected arguments. Otherwise return -1. // expected arguments. Otherwise return -1.
static int InlineRuntimeCallArgumentsCount(Handle<String> name); static int InlineRuntimeCallArgumentsCount(Handle<String> name);
// Constants related to patching of inlined load/store. // Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() { static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 27 : 13; return FLAG_debug_code ? 32 : 13;
} }
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() { static int GetInlinedNamedStoreInstructionsAfterPatch() {
@ -420,7 +419,8 @@ class CodeGenerator: public AstVisitor {
void GenericBinaryOperation(Token::Value op, void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode, OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi, GenerateInlineSmi inline_smi,
int known_rhs = kUnknownIntValue); int known_rhs =
GenericBinaryOpStub::kUnknownIntValue);
void Comparison(Condition cc, void Comparison(Condition cc,
Expression* left, Expression* left,
Expression* right, Expression* right,
@ -455,9 +455,6 @@ class CodeGenerator: public AstVisitor {
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name); static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node); bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
static Handle<Code> ComputeLazyCompile(int argc); static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations); void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@ -528,6 +525,8 @@ class CodeGenerator: public AstVisitor {
void GenerateRegExpConstructResult(ZoneList<Expression*>* args); void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
// Support for fast native caches. // Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args); void GenerateGetFromCache(ZoneList<Expression*>* args);
@ -548,6 +547,9 @@ class CodeGenerator: public AstVisitor {
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args); void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
// Simple condition analysis. // Simple condition analysis.
enum ConditionAnalysis { enum ConditionAnalysis {
ALWAYS_TRUE, ALWAYS_TRUE,
@ -610,510 +612,6 @@ class CodeGenerator: public AstVisitor {
}; };
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = CodeGenerator::kUnknownIntValue)
: op_(op),
mode_(mode),
lhs_(lhs),
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
private:
Token::Value op_;
OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
static const int kKnownRhsKeyBits = 6;
// Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 2> {};
class RegisterBits: public BitField<bool, 10, 1> {};
class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(r0));
}
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == CodeGenerator::kUnknownIntValue) return false;
if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
if (op == Token::MOD) {
if (constant_rhs <= 1) return false;
if (constant_rhs <= 10) return true;
if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
return false;
}
return false;
}
int MinorKeyForKnownInt() {
if (!specialized_on_rhs_) return 0;
if (constant_rhs_ <= 10) return constant_rhs_ + 1;
ASSERT(IsPowerOf2(constant_rhs_));
int key = 12;
int d = constant_rhs_;
while ((d & 1) == 0) {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
}
Register LhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r0 : r1;
}
Register RhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r1 : r0;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
const char* GetName();
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {
PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
} else {
PrintF("GenericBinaryOpStub (%s by %d)\n",
Token::String(op_),
constant_rhs_);
}
}
#endif
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
// overhead. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharactersLong(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
int flags);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Register scratch5,
Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
// Compare two flat ASCII strings and returns result in r0.
// Does not use the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
// This stub can do a fast mod operation without using fp.
// It is tail called from the GenericBinaryOpStub and it always
// returns an answer. It never causes GC so it doesn't need a real frame.
//
// The inputs are always positive Smis. This is never called
// where the denominator is a power of 2. We handle that separately.
//
// If we consider the denominator as an odd number multiplied by a power of 2,
// then:
// * The exponent (power of 2) is in the shift_distance register.
// * The odd number is in the odd_number register. It is always in the range
// of 3 to 25.
// * The bits from the numerator that are to be copied to the answer (there are
// shift_distance of them) are in the mask_bits register.
// * The other bits of the numerator have been shifted down and are in the lhs
// register.
class IntegerModStub : public CodeStub {
public:
IntegerModStub(Register result,
Register shift_distance,
Register odd_number,
Register mask_bits,
Register lhs,
Register scratch)
: result_(result),
shift_distance_(shift_distance),
odd_number_(odd_number),
mask_bits_(mask_bits),
lhs_(lhs),
scratch_(scratch) {
// We don't code these in the minor key, so they should always be the same.
// We don't really want to fix that since this stub is rather large and we
// don't want many copies of it.
ASSERT(shift_distance_.is(r9));
ASSERT(odd_number_.is(r4));
ASSERT(mask_bits_.is(r3));
ASSERT(scratch_.is(r5));
}
private:
Register result_;
Register shift_distance_;
Register odd_number_;
Register mask_bits_;
Register lhs_;
Register scratch_;
// Minor key encoding in 16 bits.
class ResultRegisterBits: public BitField<int, 0, 4> {};
class LhsRegisterBits: public BitField<int, 4, 4> {};
Major MajorKey() { return IntegerMod; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return ResultRegisterBits::encode(result_.code())
| LhsRegisterBits::encode(lhs_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "IntegerModStub"; }
// Utility functions.
void DigitSum(MacroAssembler* masm,
Register lhs,
int mask,
int shift,
Label* entry);
void DigitSum(MacroAssembler* masm,
Register lhs,
Register scratch,
int mask,
int shift1,
int shift2,
Label* entry);
void ModGetInRangeBySubtraction(MacroAssembler* masm,
Register lhs,
int shift,
int rhs);
void ModReduce(MacroAssembler* masm,
Register lhs,
int max,
int denominator);
void ModAnswer(MacroAssembler* masm,
Register result,
Register shift_distance,
Register mask_bits,
Register sum_of_digits);
#ifdef DEBUG
void Print() { PrintF("IntegerModStub\n"); }
#endif
};
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public CodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
Register scratch)
: the_int_(the_int),
the_heap_number_(the_heap_number),
scratch_(scratch) { }
private:
Register the_int_;
Register the_heap_number_;
Register scratch_;
// Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() { return WriteInt32ToHeapNumber; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
#ifdef DEBUG
void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
#endif
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
Register scratch3,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register offset, Register scratch)
: object_(object), offset_(offset), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register offset_;
Register scratch_;
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
" (scratch reg %d)\n",
object_.code(), offset_.code(), scratch_.code());
}
#endif
// Minor key encoding in 12 bits. 4 bits for each of the three
// registers (object, offset and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class OffsetBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
OffsetBits::encode(offset_.code()) |
ScratchBits::encode(scratch_.code());
}
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_ #endif // V8_ARM_CODEGEN_ARM_H_

27
deps/v8/src/arm/constants-arm.h

@ -194,6 +194,13 @@ enum SoftwareInterruptCodes {
}; };
// Type of VFP register. Determines register encoding.
enum VFPRegPrecision {
kSinglePrecision = 0,
kDoublePrecision = 1
};
typedef int32_t instr_t; typedef int32_t instr_t;
@ -269,6 +276,15 @@ class Instr {
inline int VCField() const { return Bit(8); } inline int VCField() const { return Bit(8); }
inline int VAField() const { return Bits(23, 21); } inline int VAField() const { return Bits(23, 21); }
inline int VBField() const { return Bits(6, 5); } inline int VBField() const { return Bits(6, 5); }
inline int VFPNRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 16, 7);
}
inline int VFPMRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 0, 5);
}
inline int VFPDRegCode(VFPRegPrecision pre) {
return VFPGlueRegCode(pre, 12, 22);
}
// Fields used in Data processing instructions // Fields used in Data processing instructions
inline Opcode OpcodeField() const { inline Opcode OpcodeField() const {
@ -343,6 +359,17 @@ class Instr {
static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); } static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
private: private:
// Join split register codes, depending on single or double precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
}
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
// We need to prevent the creation of instances of class Instr. // We need to prevent the creation of instances of class Instr.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
}; };

97
deps/v8/src/arm/debug-arm.cc

@ -130,41 +130,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList pointer_regs) { RegList object_regs,
// Save the content of all general purpose registers in memory. This copy in RegList non_object_regs) {
// memory is later pushed onto the JS expression stack for the fake JS frame
// generated and also to the C frame generated on top of that. In the JS
// frame ONLY the registers containing pointers will be pushed on the
// expression stack. This causes the GC to update these pointers so that
// they will have the correct value when returning from the debugger.
__ SaveRegistersToMemory(kJSCallerSaved);
__ EnterInternalFrame(); __ EnterInternalFrame();
// Store the registers containing object pointers on the expression stack to // Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. // make sure that these are correctly updated during GC. Non object values
// Use sp as base to push. // are stored as a smi causing it to be untouched by GC.
__ CopyRegistersFromMemoryToStack(sp, pointer_regs); ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
if ((object_regs | non_object_regs) != 0) {
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ tst(reg, Operand(0xc0000000));
__ Assert(eq, "Unable to encode value as smi");
}
__ mov(reg, Operand(reg, LSL, kSmiTagSize));
}
}
__ stm(db_w, sp, object_regs | non_object_regs);
}
#ifdef DEBUG #ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over"); __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif #endif
__ mov(r0, Operand(0)); // no arguments __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break())); __ mov(r1, Operand(ExternalReference::debug_break()));
CEntryStub ceb(1, ExitFrame::MODE_DEBUG); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Restore the register values containing object pointers from the expression // Restore the register values from the expression stack.
// stack in the reverse order as they where pushed. if ((object_regs | non_object_regs) != 0) {
// Use sp as base to pop. __ ldm(ia_w, sp, object_regs | non_object_regs);
__ CopyRegistersFromStackToMemory(sp, r3, pointer_regs); for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((non_object_regs & (1 << r)) != 0) {
__ mov(reg, Operand(reg, LSR, kSmiTagSize));
}
if (FLAG_debug_code &&
(((object_regs |non_object_regs) & (1 << r)) == 0)) {
__ mov(reg, Operand(kDebugZapValue));
}
}
}
__ LeaveInternalFrame(); __ LeaveInternalFrame();
// Finally restore all registers.
__ RestoreRegistersFromMemory(kJSCallerSaved);
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was // jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX. // overwritten by the address of DebugBreakXXX.
@ -184,7 +201,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
// Registers r0 and r2 contain objects that need to be pushed on the // Registers r0 and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame. // expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit()); Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
} }
@ -198,7 +215,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// ----------------------------------- // -----------------------------------
// Registers r0, r1, and r2 contain objects that need to be pushed on the // Registers r0, r1, and r2 contain objects that need to be pushed on the
// expression stack of the fake JS frame. // expression stack of the fake JS frame.
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit()); Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
} }
@ -206,9 +223,8 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// ---------- S t a t e -------------- // ---------- S t a t e --------------
// -- lr : return address // -- lr : return address
// -- r0 : key // -- r0 : key
// -- sp[0] : key // -- r1 : receiver
// -- sp[4] : receiver Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
Generate_DebugBreakCallHelper(masm, r0.bit());
} }
@ -218,31 +234,24 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- r1 : key // -- r1 : key
// -- r2 : receiver // -- r2 : receiver
// -- lr : return address // -- lr : return address
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit()); Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
} }
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC call (from ic-arm.cc) // Calling convention for IC call (from ic-arm.cc)
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r0: number of arguments // -- r2 : name
// -- r1: receiver
// -- lr: return address
// ----------------------------------- // -----------------------------------
// Register r1 contains an object that needs to be pushed on the expression Generate_DebugBreakCallHelper(masm, r2.bit(), 0);
// stack of the fake JS frame. r0 is the actual number of arguments not
// encoded as a smi, therefore it cannot be on the expression stack of the
// fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
// pushed on the stack of the C frame and restored from there.
Generate_DebugBreakCallHelper(masm, r1.bit());
} }
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) { void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which // Calling convention for construct call (from builtins-arm.cc)
// is an object - this is not generally the case so this should be used with // -- r0 : number of arguments (not smi)
// care. // -- r1 : constructor function
Generate_DebugBreakCallHelper(masm, r0.bit()); Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
} }
@ -250,7 +259,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which // In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with // is an object - this is not generally the case so this should be used with
// care. // care.
Generate_DebugBreakCallHelper(masm, r0.bit()); Generate_DebugBreakCallHelper(masm, r0.bit(), 0);
} }
@ -258,7 +267,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// No registers used on entry. // No registers used on entry.
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, 0); Generate_DebugBreakCallHelper(masm, 0, 0);
} }
@ -280,7 +289,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain // In the places where a debug break slot is inserted no registers can contain
// object pointers. // object pointers.
Generate_DebugBreakCallHelper(masm, 0); Generate_DebugBreakCallHelper(masm, 0, 0);
} }

22
deps/v8/src/arm/disasm-arm.cc

@ -463,7 +463,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
ASSERT((width + lsb) <= 32); ASSERT((width + lsb) <= 32);
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d", "%d",
instr->Bits(width + lsb - 1, lsb)); instr->Bits(width + lsb - 1, lsb));
return 8; return 8;
} }
@ -931,7 +931,7 @@ void Decoder::DecodeType3(Instr* instr) {
if (instr->HasW()) { if (instr->HasW()) {
ASSERT(instr->Bits(5, 4) == 0x1); ASSERT(instr->Bits(5, 4) == 0x1);
if (instr->Bit(22) == 0x1) { if (instr->Bit(22) == 0x1) {
Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat"); Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
} else { } else {
UNREACHABLE(); // SSAT. UNREACHABLE(); // SSAT.
} }
@ -1269,17 +1269,19 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) { if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) { switch (instr->OpcodeField()) {
case 0x8: case 0x8:
case 0xA:
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]"); Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]");
} else { } else {
Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]"); Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]");
} }
break; break;
case 0xC: case 0xC:
case 0xE:
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]"); Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]");
} else { } else {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]"); Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
} }
break; break;
default: default:
@ -1300,16 +1302,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
break; break;
case 0x8: case 0x8:
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]"); Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else { } else {
Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]"); Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]");
} }
break; break;
case 0xC: case 0xC:
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]"); Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else { } else {
Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]"); Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
} }
break; break;
default: default:

73
deps/v8/src/arm/frames-arm.cc

@ -37,87 +37,20 @@ namespace v8 {
namespace internal { namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE; if (fp == 0) return NONE;
// Compute frame type and stack pointer. // Compute frame type and stack pointer.
Address sp = fp + ExitFrameConstants::kSPDisplacement; Address sp = fp + ExitFrameConstants::kSPOffset;
const int offset = ExitFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp + offset);
bool is_debug_exit = code->IsSmi();
if (is_debug_exit) {
sp -= kNumJSCallerSaved * kPointerSize;
}
// Fill in the state. // Fill in the state.
state->sp = sp; state->sp = sp;
state->fp = fp; state->fp = fp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
ASSERT(*state->pc_address != NULL);
return EXIT; return EXIT;
} }
void ExitFrame::Iterate(ObjectVisitor* v) const {
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
// The arguments for cooked frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when the stack is cooked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM

5
deps/v8/src/arm/frames-arm.h

@ -96,11 +96,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic {
public: public:
// Exit frames have a debug marker on the stack.
static const int kSPDisplacement = -1 * kPointerSize;
// The debug marker is just above the frame pointer.
static const int kCodeOffset = -1 * kPointerSize; static const int kCodeOffset = -1 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
static const int kSavedRegistersOffset = 0 * kPointerSize; static const int kSavedRegistersOffset = 0 * kPointerSize;

784
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

111
deps/v8/src/arm/ic-arm.cc

@ -30,7 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
#include "assembler-arm.h" #include "assembler-arm.h"
#include "codegen.h" #include "code-stubs.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#include "disasm.h" #include "disasm.h"
#include "ic-inl.h" #include "ic-inl.h"
@ -414,17 +414,17 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// Falls through for regular JS object. // Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
Register receiver, Register receiver,
Register scratch1, Register map,
Register scratch2, Register scratch,
int interceptor_bit, int interceptor_bit,
Label* slow) { Label* slow) {
// Check that the object isn't a smi. // Check that the object isn't a smi.
__ BranchOnSmi(receiver, slow); __ BranchOnSmi(receiver, slow);
// Get the map of the receiver. // Get the map of the receiver.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check bit field. // Check bit field.
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(scratch2, __ tst(scratch,
Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
__ b(nz, slow); __ b(nz, slow);
// Check that the object is some kind of JS object EXCEPT JS Value type. // Check that the object is some kind of JS object EXCEPT JS Value type.
@ -432,13 +432,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// we enter the runtime system to make sure that indexing into string // we enter the runtime system to make sure that indexing into string
// objects work as intended. // objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ cmp(scratch1, Operand(JS_OBJECT_TYPE)); __ cmp(scratch, Operand(JS_OBJECT_TYPE));
__ b(lt, slow); __ b(lt, slow);
} }
// Loads an indexed element from a fast case array. // Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver, Register receiver,
Register key, Register key,
@ -471,11 +472,15 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// scratch2 - used to hold the loaded value. // scratch2 - used to hold the loaded value.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch1, ip); __ cmp(scratch1, ip);
__ b(ne, not_fast_array); __ b(ne, not_fast_array);
} else {
__ AssertFastElements(elements);
}
// Check that the key (index) is within bounds. // Check that the key (index) is within bounds.
__ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
__ cmp(key, Operand(scratch1)); __ cmp(key, Operand(scratch1));
@ -522,32 +527,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
} }
// Picks out an array index from the hash field.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
ASSERT(String::kHashShift >= kSmiTagSize);
__ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
__ mov(key, Operand(hash, LSL, kSmiTagSize));
}
// Defined in ic.cc. // Defined in ic.cc.
Object* CallIC_Miss(Arguments args); Object* CallIC_Miss(Arguments args);
@ -847,7 +826,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc); GenerateMiss(masm, argc);
__ bind(&index_string); __ bind(&index_string);
GenerateIndexFromHash(masm, r2, r3); __ IndexFromHash(r3, r2);
// Now jump to the place where smi keys are handled. // Now jump to the place where smi keys are handled.
__ jmp(&index_smi); __ jmp(&index_smi);
} }
@ -1120,16 +1099,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck( GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
// Check the "has fast elements" bit in the receiver's map which is
// now in r2.
__ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
__ tst(r3, Operand(1 << Map::kHasFastElements));
__ b(eq, &check_pixel_array);
GenerateFastArrayLoad( GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow); masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret(); __ Ret();
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// r0: key // r0: key
// r3: elements map // r1: receiver
// r4: elements
__ bind(&check_pixel_array); __ bind(&check_pixel_array);
__ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset));
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip); __ cmp(r3, ip);
__ b(ne, &check_number_dictionary); __ b(ne, &check_number_dictionary);
@ -1237,7 +1223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ Ret(); __ Ret();
__ bind(&index_string); __ bind(&index_string);
GenerateIndexFromHash(masm, key, r3); __ IndexFromHash(r3, key);
// Now jump to the place where smi keys are handled. // Now jump to the place where smi keys are handled.
__ jmp(&index_smi); __ jmp(&index_smi);
} }
@ -1306,7 +1292,7 @@ static void GenerateUInt2Double(MacroAssembler* masm,
__ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
__ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
} else { } else {
__ mov(loword, Operand(0)); __ mov(loword, Operand(0, RelocInfo::NONE));
__ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
} }
@ -1690,7 +1676,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Object case: Check key against length in the elements array. // Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode and writable.
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip); __ cmp(r4, ip);
@ -1748,8 +1734,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ b(&fast); __ b(&fast);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the // array. Check that the array is in fast mode (and writable); if it
// length is always a smi. // is the length is always a smi.
__ bind(&array); __ bind(&array);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
@ -1779,10 +1765,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
} }
// Convert int passed in register ival to IEE 754 single precision // Convert and store int passed in register ival to IEEE 754 single precision
// floating point value and store it into register fval. // floating point value at memory location (dst + 4 * wordoffset)
// If VFP3 is available use it for conversion. // If VFP3 is available use it for conversion.
static void ConvertIntToFloat(MacroAssembler* masm, static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival, Register ival,
Register fval, Register fval,
Register scratch1, Register scratch1,
@ -1790,8 +1778,9 @@ static void ConvertIntToFloat(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) { if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
__ vmov(s0, ival); __ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0); __ vcvt_f32_s32(s0, s0);
__ vmov(fval, s0); __ vstr(s0, scratch1, 0);
} else { } else {
Label not_special, done; Label not_special, done;
// Move sign bit from source to destination. This works because the sign // Move sign bit from source to destination. This works because the sign
@ -1801,7 +1790,7 @@ static void ConvertIntToFloat(MacroAssembler* masm,
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC); __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative. // Negate value if it is negative.
__ rsb(ival, ival, Operand(0), LeaveCC, ne); __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains // We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1), // absolute value: it is either equal to 1 (special case of -1 and 1),
@ -1841,6 +1830,7 @@ static void ConvertIntToFloat(MacroAssembler* masm,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done); __ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
} }
} }
@ -1935,9 +1925,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ str(r5, MemOperand(r3, r4, LSL, 2)); __ str(r5, MemOperand(r3, r4, LSL, 2));
break; break;
case kExternalFloatArray: case kExternalFloatArray:
// Need to perform int-to-float conversion. // Perform int-to-float conversion and store to memory.
ConvertIntToFloat(masm, r5, r6, r7, r9); StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
__ str(r6, MemOperand(r3, r4, LSL, 2));
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
@ -1971,9 +1960,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// include -kHeapObjectTag into it. // include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag)); __ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset); __ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0); __ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0); __ vstr(s0, r5, 0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else { } else {
// Need to perform float-to-int conversion. // Need to perform float-to-int conversion.
// Test for NaN or infinity (both give zero). // Test for NaN or infinity (both give zero).
@ -2086,18 +2075,18 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
// and infinities. All these should be converted to 0. // and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask)); __ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC); __ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand(0), LeaveCC, eq); __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done); __ b(eq, &done);
__ teq(r9, Operand(r7)); __ teq(r9, Operand(r7));
__ mov(r5, Operand(0), LeaveCC, eq); __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
__ b(eq, &done); __ b(eq, &done);
// Unbias exponent. // Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative than result is 0. // If exponent is negative than result is 0.
__ mov(r5, Operand(0), LeaveCC, mi); __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
__ b(mi, &done); __ b(mi, &done);
// If exponent is too big than result is minimal value. // If exponent is too big than result is minimal value.
@ -2113,14 +2102,14 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign); __ b(pl, &sign);
__ rsb(r9, r9, Operand(0)); __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
__ mov(r5, Operand(r5, LSL, r9)); __ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits)); __ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9)); __ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign); __ bind(&sign);
__ teq(r7, Operand(0)); __ teq(r7, Operand(0, RelocInfo::NONE));
__ rsb(r5, r5, Operand(0), LeaveCC, ne); __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done); __ bind(&done);
switch (array_type) { switch (array_type) {
@ -2217,6 +2206,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ b(ne, &miss); __ b(ne, &miss);
// Check that elements are FixedArray. // Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
__ b(ne, &miss); __ b(ne, &miss);

263
deps/v8/src/arm/macro-assembler-arm.cc

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h" #include "v8.h"
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
@ -224,7 +226,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
} }
int32_t immediate = src2.immediate(); int32_t immediate = src2.immediate();
if (immediate == 0) { if (immediate == 0) {
mov(dst, Operand(0), LeaveCC, cond); mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
return; return;
} }
if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) { if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
@ -303,7 +305,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
} }
tst(dst, Operand(~satval)); tst(dst, Operand(~satval));
b(eq, &done); b(eq, &done);
mov(dst, Operand(0), LeaveCC, mi); // 0 if negative. mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done); bind(&done);
} else { } else {
@ -513,7 +515,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
} }
void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { void MacroAssembler::EnterExitFrame() {
// Compute the argv pointer and keep it in a callee-saved register. // Compute the argv pointer and keep it in a callee-saved register.
// r0 is argc. // r0 is argc.
add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@ -556,16 +558,6 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
// Setup argc and the builtin function in callee-saved registers. // Setup argc and the builtin function in callee-saved registers.
mov(r4, Operand(r0)); mov(r4, Operand(r0));
mov(r5, Operand(r1)); mov(r5, Operand(r1));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (mode == ExitFrame::MODE_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
#endif
} }
@ -600,21 +592,9 @@ int MacroAssembler::ActivationFrameAlignment() {
} }
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { void MacroAssembler::LeaveExitFrame() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (mode == ExitFrame::MODE_DEBUG) {
// This code intentionally clobbers r2 and r3.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
#endif
// Clear top frame. // Clear top frame.
mov(r3, Operand(0)); mov(r3, Operand(0, RelocInfo::NONE));
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
str(r3, MemOperand(ip)); str(r3, MemOperand(ip));
@ -757,8 +737,7 @@ void MacroAssembler::InvokeFunction(Register fun,
SharedFunctionInfo::kFormalParameterCountOffset)); SharedFunctionInfo::kFormalParameterCountOffset));
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg, ldr(code_reg,
MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag)); FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
ParameterCount expected(expected_reg); ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag); InvokeCode(code_reg, expected, actual, flag);
@ -780,69 +759,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
str(reg, MemOperand(ip));
}
}
}
void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of memory location to registers.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
ldr(reg, MemOperand(ip));
}
}
}
void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of the memory location to the stack and adjust base.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
ldr(ip, MemOperand(ip));
str(ip, MemOperand(base, 4, NegPreIndex));
}
}
}
void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of the stack to the memory location and adjust base.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
ldr(scratch, MemOperand(base, 4, PostIndex));
str(scratch, MemOperand(ip));
}
}
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() { void MacroAssembler::DebugBreak() {
ASSERT(allow_stub_calls()); ASSERT(allow_stub_calls());
mov(r0, Operand(0)); mov(r0, Operand(0, RelocInfo::NONE));
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak))); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
CEntryStub ces(1); CEntryStub ces(1);
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@ -878,7 +799,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
// The frame pointer does not point to a JS frame so we save NULL // The frame pointer does not point to a JS frame so we save NULL
// for fp. We expect the code throwing an exception to check fp // for fp. We expect the code throwing an exception to check fp
// before dereferencing it to restore the context. // before dereferencing it to restore the context.
mov(ip, Operand(0)); // To save a NULL frame pointer. mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer.
mov(r6, Operand(StackHandler::ENTRY)); mov(r6, Operand(StackHandler::ENTRY));
ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
&& StackHandlerConstants::kFPOffset == 2 * kPointerSize && StackHandlerConstants::kFPOffset == 2 * kPointerSize
@ -917,7 +838,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set. // In debug mode, make sure the lexical context is set.
#ifdef DEBUG #ifdef DEBUG
cmp(scratch, Operand(0)); cmp(scratch, Operand(0, RelocInfo::NONE));
Check(ne, "we should not have an empty lexical context"); Check(ne, "we should not have an empty lexical context");
#endif #endif
@ -1338,6 +1259,21 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
} }
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// If the hash field contains an array index pick it out. The assert checks
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
STATIC_ASSERT(kSmiTag == 0);
Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
mov(index, Operand(hash, LSL, kSmiTagSize));
}
void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
Register outHighReg, Register outHighReg,
Register outLowReg) { Register outLowReg) {
@ -1399,6 +1335,104 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
} }
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
void MacroAssembler::ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
Label *not_int32) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
sub(scratch, source, Operand(kHeapObjectTag));
vldr(d0, scratch, HeapNumber::kValueOffset);
vcvt_s32_f64(s0, d0);
vmov(dest, s0);
// Signed vcvt instruction will saturate to the minimum (0x80000000) or
// maximun (0x7fffffff) signed 32bits integer when the double is out of
// range. When substracting one, the minimum signed integer becomes the
// maximun signed integer.
sub(scratch, dest, Operand(1));
cmp(scratch, Operand(LONG_MAX - 1));
// If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
b(ge, not_int32);
} else {
// This code is faster for doubles that are in the ranges -0x7fffffff to
// -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
// the range of signed int32 values that are not Smis. Jumps to the label
// 'not_int32' if the double isn't in the range -0x80000000.0 to
// 0x80000000.0 (excluding the endpoints).
Label right_exponent, done;
// Get exponent word.
ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
Ubfx(scratch2,
scratch,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
mov(dest, Operand(0, RelocInfo::NONE));
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
// The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
// split it up to avoid a constant pool entry. You can't do that in general
// for cmp because of the overflow flag, but we know the exponent is in the
// range 0-2047 so there is no overflow.
int fudge_factor = 0x400;
sub(scratch2, scratch2, Operand(fudge_factor));
cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
// numbers that don't fit in a signed int32, infinities and NaNs.
b(gt, not_int32);
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
b(lt, &done);
// We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
// get how much to shift down.
rsb(dest, scratch2, Operand(30));
bind(&right_exponent);
// Get the top bits of the mantissa.
and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1.
orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
// distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. For some exponents we don't
// actually need this because the bits get shifted out again, but
// it's probably slower to test than just to do it.
ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits.
orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
bind(&done);
}
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst, void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src, Register src,
int num_least_bits) { int num_least_bits) {
@ -1490,30 +1524,22 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
} }
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { void MacroAssembler::GetBuiltinFunction(Register target,
ASSERT(!target.is(r1)); Builtins::JavaScript id) {
// Load the builtins object into target register. // Load the builtins object into target register.
ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
// Load the JavaScript builtin function from the builtins object. // Load the JavaScript builtin function from the builtins object.
ldr(r1, FieldMemOperand(target, ldr(target, FieldMemOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id))); JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(r1));
GetBuiltinFunction(r1, id);
// Load the code entry point from the builtins object. // Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(target, ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
JSBuiltinsObject::OffsetOfCodeWithId(id)));
if (FLAG_debug_code) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(r1);
ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
cmp(r1, target);
Assert(eq, "Builtin code object changed");
pop(r1);
}
add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
} }
@ -1567,6 +1593,25 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg,
} }
void MacroAssembler::AssertFastElements(Register elements) {
if (FLAG_debug_code) {
ASSERT(!elements.is(ip));
Label ok;
push(elements);
ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
pop(elements);
}
}
void MacroAssembler::Check(Condition cc, const char* msg) { void MacroAssembler::Check(Condition cc, const char* msg) {
Label L; Label L;
b(cc, &L); b(cc, &L);
@ -1773,7 +1818,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
#ifdef CAN_USE_ARMV5_INSTRUCTIONS #ifdef CAN_USE_ARMV5_INSTRUCTIONS
clz(zeros, source); // This instruction is only supported after ARM5. clz(zeros, source); // This instruction is only supported after ARM5.
#else #else
mov(zeros, Operand(0)); mov(zeros, Operand(0, RelocInfo::NONE));
Move(scratch, source); Move(scratch, source);
// Top 16. // Top 16.
tst(scratch, Operand(0xffff0000)); tst(scratch, Operand(0xffff0000));

31
deps/v8/src/arm/macro-assembler-arm.h

@ -250,14 +250,14 @@ class MacroAssembler: public Assembler {
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either normal or debug mode. // Enter exit frame.
// Expects the number of arguments in register r0 and // Expects the number of arguments in register r0 and
// the builtin function to call in register r1. Exits with argc in // the builtin function to call in register r1. Exits with argc in
// r4, argv in r6, and and the builtin function to call in r5. // r4, argv in r6, and and the builtin function to call in r5.
void EnterExitFrame(ExitFrame::Mode mode); void EnterExitFrame();
// Leave the current exit frame. Expects the return value in r0. // Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(ExitFrame::Mode mode); void LeaveExitFrame();
// Get the actual activation frame alignment for target environment. // Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment(); static int ActivationFrameAlignment();
@ -294,12 +294,6 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
void SaveRegistersToMemory(RegList regs);
void RestoreRegistersFromMemory(RegList regs);
void CopyRegistersFromMemoryToStack(Register base, RegList regs);
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
void DebugBreak(); void DebugBreak();
#endif #endif
@ -475,6 +469,12 @@ class MacroAssembler: public Assembler {
// occurred. // occurred.
void IllegalOperation(int num_arguments); void IllegalOperation(int num_arguments);
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
// index - holds the overwritten index on exit.
void IndexFromHash(Register hash, Register index);
// Get the number of least significant bits from a register // Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
@ -504,6 +504,15 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch1,
SwVfpRegister scratch2); SwVfpRegister scratch2);
// Convert the HeapNumber pointed to by source to a 32bits signed integer
// dest. If the HeapNumber does not fit into a 32bits signed integer branch
// to not_int32 label.
void ConvertToInt32(Register source,
Register dest,
Register scratch,
Register scratch2,
Label *not_int32);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer // instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case // for 0 (31 instead of 32). Source and scratch can be the same in which case
@ -576,6 +585,9 @@ class MacroAssembler: public Assembler {
// setup the function in r1. // setup the function in r1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id); void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
Handle<Object> CodeObject() { return code_object_; } Handle<Object> CodeObject() { return code_object_; }
@ -597,6 +609,7 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cc, const char* msg); void Assert(Condition cc, const char* msg);
void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cc, const char* msg); void Check(Condition cc, const char* msg);

18
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -31,12 +31,10 @@
#include "unicode.h" #include "unicode.h"
#include "log.h" #include "log.h"
#include "ast.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "regexp-stack.h" #include "regexp-stack.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
namespace v8 { namespace v8 {
@ -191,7 +189,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start; Label not_at_start;
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart)); __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, &not_at_start); BranchOrBacktrack(eq, &not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
@ -206,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all? // Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kAtStart)); __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_not_at_start); BranchOrBacktrack(eq, on_not_at_start);
// If we did, are we still at the start of the input? // If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart)); __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@ -366,7 +364,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ CallCFunction(function, argument_count); __ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure. // Check if function returned non-zero for success or zero for failure.
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
BranchOrBacktrack(eq, on_no_match); BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture. // On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4)); __ add(current_input_offset(), current_input_offset(), Operand(r4));
@ -636,7 +634,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
CallCheckStackGuardState(r0); CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returned value is non-zero, we exit with the returned value as result. // If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &exit_label_); __ b(ne, &exit_label_);
@ -663,7 +661,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// string, and store that value in a local variable. // string, and store that value in a local variable.
__ tst(r1, Operand(r1)); __ tst(r1, Operand(r1));
__ mov(r1, Operand(1), LeaveCC, eq); __ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0), LeaveCC, ne); __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart)); __ str(r1, MemOperand(frame_pointer(), kAtStart));
if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
@ -686,7 +684,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Load previous char as initial value of current character register. // Load previous char as initial value of current character register.
Label at_start; Label at_start;
__ ldr(r0, MemOperand(frame_pointer(), kAtStart)); __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(ne, &at_start); __ b(ne, &at_start);
LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
__ jmp(&start_label_); __ jmp(&start_label_);
@ -753,7 +751,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_); SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0); CallCheckStackGuardState(r0);
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
// If returning non-zero, we should end execution with the given // If returning non-zero, we should end execution with the given
// result as return value. // result as return value.
__ b(ne, &exit_label_); __ b(ne, &exit_label_);
@ -780,7 +778,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments); __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
__ cmp(r0, Operand(0)); __ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(eq, &exit_with_exception); __ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer. // Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0); __ mov(backtrack_stackpointer(), r0);

16
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -242,22 +242,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
Label stack_overflow_label_; Label stack_overflow_label_;
}; };
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
class RegExpCEntryStub: public CodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "RegExpCEntryStub"; }
};
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP

89
deps/v8/src/arm/simulator-arm.cc

@ -727,6 +727,10 @@ void Simulator::set_register(int reg, int32_t value) {
// the special case of accessing the PC register. // the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const { int32_t Simulator::get_register(int reg) const {
ASSERT((reg >= 0) && (reg < num_registers)); ASSERT((reg >= 0) && (reg < num_registers));
// Stupid code added to avoid bug in GCC.
// See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
if (reg >= num_registers) return 0;
// End stupid code.
return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0); return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
} }
@ -1378,7 +1382,9 @@ void Simulator::HandleRList(Instr* instr, bool load) {
} }
case 3: { case 3: {
// Print("ib"); // Print("ib");
UNIMPLEMENTED(); start_address = rn_val + 4;
end_address = rn_val + (num_regs * 4);
rn_val = end_address;
break; break;
} }
default: { default: {
@ -2275,13 +2281,6 @@ void Simulator::DecodeUnconditional(Instr* instr) {
} }
// Depending on value of last_bit flag glue register code from vm and m values
// (where m is expected to be a single bit).
static int GlueRegCode(bool last_bit, int vm, int m) {
return last_bit ? ((vm << 1) | m) : ((m << 4) | vm);
}
// void Simulator::DecodeTypeVFP(Instr* instr) // void Simulator::DecodeTypeVFP(Instr* instr)
// The Following ARMv7 VFPv instructions are currently supported. // The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt // vmov :Sn = Rt
@ -2299,9 +2298,10 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) );
ASSERT(instr->Bits(11, 9) == 0x5); ASSERT(instr->Bits(11, 9) == 0x5);
int vm = instr->VmField(); // Obtain double precision register codes.
int vd = instr->VdField(); int vm = instr->VFPMRegCode(kDoublePrecision);
int vn = instr->VnField(); int vd = instr->VFPDRegCode(kDoublePrecision);
int vn = instr->VFPNRegCode(kDoublePrecision);
if (instr->Bit(4) == 0) { if (instr->Bit(4) == 0) {
if (instr->Opc1Field() == 0x7) { if (instr->Opc1Field() == 0x7) {
@ -2309,9 +2309,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) {
if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) { if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
// vmov register to register. // vmov register to register.
if (instr->SzField() == 0x1) { if (instr->SzField() == 0x1) {
set_d_register_from_double(vd, get_double_from_d_register(vm)); int m = instr->VFPMRegCode(kDoublePrecision);
int d = instr->VFPDRegCode(kDoublePrecision);
set_d_register_from_double(d, get_double_from_d_register(m));
} else { } else {
set_s_register_from_float(vd, get_float_from_s_register(vm)); int m = instr->VFPMRegCode(kSinglePrecision);
int d = instr->VFPDRegCode(kSinglePrecision);
set_s_register_from_float(d, get_float_from_s_register(m));
} }
} else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr); DecodeVCVTBetweenDoubleAndSingle(instr);
@ -2404,7 +2408,7 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) {
(instr->VAField() == 0x0)); (instr->VAField() == 0x0));
int t = instr->RtField(); int t = instr->RtField();
int n = GlueRegCode(true, instr->VnField(), instr->NField()); int n = instr->VFPNRegCode(kSinglePrecision);
bool to_arm_register = (instr->VLField() == 0x1); bool to_arm_register = (instr->VLField() == 0x1);
if (to_arm_register) { if (to_arm_register) {
@ -2421,22 +2425,25 @@ void Simulator::DecodeVCMP(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) &&
(instr->Opc3Field() & 0x1)); (instr->Opc3Field() & 0x1));
// Comparison. // Comparison.
bool dp_operation = (instr->SzField() == 1);
VFPRegPrecision precision = kSinglePrecision;
if (instr->SzField() == 1) {
precision = kDoublePrecision;
}
if (instr->Bit(7) != 0) { if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported. // Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
} }
int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField()); int d = instr->VFPDRegCode(precision);
int m = 0; int m = 0;
if (instr->Opc2Field() == 0x4) { if (instr->Opc2Field() == 0x4) {
m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField()); m = instr->VFPMRegCode(precision);
} }
if (dp_operation) { if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d); double dd_value = get_double_from_d_register(d);
double dm_value = 0.0; double dm_value = 0.0;
if (instr->Opc2Field() == 0x4) { if (instr->Opc2Field() == 0x4) {
@ -2454,11 +2461,17 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) {
ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7));
ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)); ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3));
bool double_to_single = (instr->SzField() == 1); VFPRegPrecision dst_precision = kDoublePrecision;
int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField()); VFPRegPrecision src_precision = kSinglePrecision;
int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField()); if (instr->SzField() == 1) {
dst_precision = kSinglePrecision;
src_precision = kDoublePrecision;
}
int dst = instr->VFPDRegCode(dst_precision);
int src = instr->VFPMRegCode(src_precision);
if (double_to_single) { if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src); double val = get_double_from_d_register(src);
set_s_register_from_float(dst, static_cast<float>(val)); set_s_register_from_float(dst, static_cast<float>(val));
} else { } else {
@ -2474,13 +2487,13 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
(((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1))); (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1)));
// Conversion between floating-point and integer. // Conversion between floating-point and integer.
int vd = instr->VdField();
int d = instr->DField();
int vm = instr->VmField();
int m = instr->MField();
bool to_integer = (instr->Bit(18) == 1); bool to_integer = (instr->Bit(18) == 1);
bool dp_operation = (instr->SzField() == 1);
VFPRegPrecision src_precision = kSinglePrecision;
if (instr->SzField() == 1) {
src_precision = kDoublePrecision;
}
if (to_integer) { if (to_integer) {
bool unsigned_integer = (instr->Bit(16) == 0); bool unsigned_integer = (instr->Bit(16) == 0);
if (instr->Bit(7) != 1) { if (instr->Bit(7) != 1) {
@ -2488,10 +2501,10 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
} }
int dst = GlueRegCode(true, vd, d); int dst = instr->VFPDRegCode(kSinglePrecision);
int src = GlueRegCode(!dp_operation, vm, m); int src = instr->VFPMRegCode(src_precision);
if (dp_operation) { if (src_precision == kDoublePrecision) {
double val = get_double_from_d_register(src); double val = get_double_from_d_register(src);
int sint = unsigned_integer ? static_cast<uint32_t>(val) : int sint = unsigned_integer ? static_cast<uint32_t>(val) :
@ -2509,12 +2522,12 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) {
} else { } else {
bool unsigned_integer = (instr->Bit(7) == 0); bool unsigned_integer = (instr->Bit(7) == 0);
int dst = GlueRegCode(!dp_operation, vd, d); int dst = instr->VFPDRegCode(src_precision);
int src = GlueRegCode(true, vm, m); int src = instr->VFPMRegCode(kSinglePrecision);
int val = get_sinteger_from_s_register(src); int val = get_sinteger_from_s_register(src);
if (dp_operation) { if (src_precision == kDoublePrecision) {
if (unsigned_integer) { if (unsigned_integer) {
set_d_register_from_double(dst, set_d_register_from_double(dst,
static_cast<double>((uint32_t)val)); static_cast<double>((uint32_t)val));
@ -2545,9 +2558,11 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
if (instr->CoprocessorField() == 0xA) { if (instr->CoprocessorField() == 0xA) {
switch (instr->OpcodeField()) { switch (instr->OpcodeField()) {
case 0x8: case 0x8:
case 0xC: { // Load and store float to memory. case 0xA:
case 0xC:
case 0xE: { // Load and store single precision float to memory.
int rn = instr->RnField(); int rn = instr->RnField();
int vd = instr->VdField(); int vd = instr->VFPDRegCode(kSinglePrecision);
int offset = instr->Immed8Field(); int offset = instr->Immed8Field();
if (!instr->HasU()) { if (!instr->HasU()) {
offset = -offset; offset = -offset;

141
deps/v8/src/arm/stub-cache-arm.cc

@ -1297,11 +1297,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1, argc + 1,
1); 1);
@ -1349,11 +1344,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Check that the maps haven't changed. // Check that the maps haven't changed.
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss);
if (object->IsGlobalObject()) {
__ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ str(r3, MemOperand(sp, argc * kPointerSize));
}
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop), __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop),
argc + 1, argc + 1,
1); 1);
@ -1373,8 +1363,68 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
JSFunction* function, JSFunction* function,
String* name, String* name,
CheckType check) { CheckType check) {
// TODO(722): implement this. // ----------- S t a t e -------------
return Heap::undefined_value(); // -- r2 : function name
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
// -- ...
// -- sp[argc * 4] : receiver
// -----------------------------------
// If object is not a string, bail out to regular call.
if (!object->IsString()) return Heap::undefined_value();
const int argc = arguments().immediate();
Label miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r0);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
Register scratch = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
__ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
StringCharCodeAtGenerator char_code_at_generator(receiver,
index,
scratch,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
char_code_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
ICRuntimeCallHelper call_helper;
char_code_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kNanValueRootIndex);
__ Drop(argc + 1);
__ Ret();
__ bind(&miss);
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
// Return the generated code.
return GetCode(function);
} }
@ -1383,8 +1433,71 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
JSFunction* function, JSFunction* function,
String* name, String* name,
CheckType check) { CheckType check) {
// TODO(722): implement this. // ----------- S t a t e -------------
return Heap::undefined_value(); // -- r2 : function name
// -- lr : return address
// -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
// -- ...
// -- sp[argc * 4] : receiver
// -----------------------------------
// If object is not a string, bail out to regular call.
if (!object->IsString()) return Heap::undefined_value();
const int argc = arguments().immediate();
Label miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(masm(),
Context::STRING_FUNCTION_INDEX,
r0);
ASSERT(object != holder);
CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
Register scratch1 = r1;
Register scratch2 = r3;
Register result = r0;
__ ldr(receiver, MemOperand(sp, argc * kPointerSize));
if (argc > 0) {
__ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
} else {
__ LoadRoot(index, Heap::kUndefinedValueRootIndex);
}
StringCharAtGenerator char_at_generator(receiver,
index,
scratch1,
scratch2,
result,
&miss, // When not a string.
&miss, // When not a number.
&index_out_of_range,
STRING_INDEX_IS_NUMBER);
char_at_generator.GenerateFast(masm());
__ Drop(argc + 1);
__ Ret();
ICRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm(), call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kEmptyStringRootIndex);
__ Drop(argc + 1);
__ Ret();
__ bind(&miss);
Object* obj = GenerateMissBranch();
if (obj->IsFailure()) return obj;
// Return the generated code.
return GetCode(function);
} }

15
deps/v8/src/array.js

@ -566,13 +566,6 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) { function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
// SpiderMonkey and JSC return undefined in the case where no
// arguments are given instead of using the implicit undefined
// arguments. This does not follow ECMA-262, but we do the same for
// compatibility.
// TraceMonkey follows ECMA-262 though.
if (num_arguments == 0) return;
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start); var start_i = TO_INTEGER(start);
@ -953,7 +946,8 @@ function ArrayMap(f, receiver) {
function ArrayIndexOf(element, index) { function ArrayIndexOf(element, index) {
var length = this.length; var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (IS_UNDEFINED(index)) { if (IS_UNDEFINED(index)) {
index = 0; index = 0;
} else { } else {
@ -963,13 +957,13 @@ function ArrayIndexOf(element, index) {
// If index is still negative, search the entire array. // If index is still negative, search the entire array.
if (index < 0) index = 0; if (index < 0) index = 0;
} }
// Lookup through the array.
if (!IS_UNDEFINED(element)) { if (!IS_UNDEFINED(element)) {
for (var i = index; i < length; i++) { for (var i = index; i < length; i++) {
if (this[i] === element) return i; if (this[i] === element) return i;
} }
return -1; return -1;
} }
// Lookup through the array.
for (var i = index; i < length; i++) { for (var i = index; i < length; i++) {
if (IS_UNDEFINED(this[i]) && i in this) { if (IS_UNDEFINED(this[i]) && i in this) {
return i; return i;
@ -980,7 +974,8 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) { function ArrayLastIndexOf(element, index) {
var length = this.length; var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (%_ArgumentsLength() < 2) { if (%_ArgumentsLength() < 2) {
index = length - 1; index = length - 1;
} else { } else {

3
deps/v8/src/ast-inl.h

@ -64,8 +64,7 @@ ForStatement::ForStatement(ZoneStringList* labels)
cond_(NULL), cond_(NULL),
next_(NULL), next_(NULL),
may_have_function_literal_(true), may_have_function_literal_(true),
loop_variable_(NULL), loop_variable_(NULL) {
peel_this_loop_(false) {
} }

627
deps/v8/src/ast.cc

@ -28,7 +28,6 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "data-flow.h"
#include "parser.h" #include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "string-stream.h" #include "string-stream.h"
@ -78,18 +77,17 @@ VariableProxy::VariableProxy(Handle<String> name,
var_(NULL), var_(NULL),
is_this_(is_this), is_this_(is_this),
inside_with_(inside_with), inside_with_(inside_with),
is_trivial_(false), is_trivial_(false) {
reaching_definitions_(NULL),
is_primitive_(false) {
// names must be canonicalized for fast equality checks // names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
} }
VariableProxy::VariableProxy(bool is_this) VariableProxy::VariableProxy(bool is_this)
: is_this_(is_this), : var_(NULL),
reaching_definitions_(NULL), is_this_(is_this),
is_primitive_(false) { inside_with_(false),
is_trivial_(false) {
} }
@ -237,6 +235,59 @@ bool Expression::GuaranteedSmiResult() {
return false; return false;
} }
void Expression::CopyAnalysisResultsFrom(Expression* other) {
bitfields_ = other->bitfields_;
type_ = other->type_;
}
bool UnaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::BIT_NOT:
case Token::SUB:
return true;
default:
return false;
}
}
bool BinaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::COMMA:
case Token::OR:
case Token::AND:
return false;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
return true;
default:
UNREACHABLE();
}
return false;
}
BinaryOperation::BinaryOperation(Assignment* assignment) {
ASSERT(assignment->is_compound());
op_ = assignment->binary_op();
left_ = assignment->target();
right_ = assignment->value();
pos_ = assignment->position();
CopyAnalysisResultsFrom(assignment);
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of AstVisitor // Implementation of AstVisitor
@ -575,218 +626,6 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
} }
} }
// IsPrimitive implementation. IsPrimitive is true if the value of an
// expression is known at compile-time to be any JS type other than Object
// (e.g, it is Undefined, Null, Boolean, String, or Number).
// The following expression types are never primitive because they express
// Object values.
bool FunctionLiteral::IsPrimitive() { return false; }
bool SharedFunctionInfoLiteral::IsPrimitive() { return false; }
bool RegExpLiteral::IsPrimitive() { return false; }
bool ObjectLiteral::IsPrimitive() { return false; }
bool ArrayLiteral::IsPrimitive() { return false; }
bool CatchExtensionObject::IsPrimitive() { return false; }
bool CallNew::IsPrimitive() { return false; }
bool ThisFunction::IsPrimitive() { return false; }
// The following expression types are not always primitive because we do not
// have enough information to conclude that they are.
bool Property::IsPrimitive() { return false; }
bool Call::IsPrimitive() { return false; }
bool CallRuntime::IsPrimitive() { return false; }
// A variable use is not primitive unless the primitive-type analysis
// determines otherwise.
bool VariableProxy::IsPrimitive() {
ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated()));
return is_primitive_;
}
// The value of a conditional is the value of one of the alternatives. It's
// always primitive if both alternatives are always primitive.
bool Conditional::IsPrimitive() {
return then_expression()->IsPrimitive() && else_expression()->IsPrimitive();
}
// A literal is primitive when it is not a JSObject.
bool Literal::IsPrimitive() { return !handle()->IsJSObject(); }
// The value of an assignment is the value of its right-hand side.
bool Assignment::IsPrimitive() {
switch (op()) {
case Token::INIT_VAR:
case Token::INIT_CONST:
case Token::ASSIGN:
return value()->IsPrimitive();
default:
// {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=}
// Arithmetic operations are always primitive. They express Numbers
// with the exception of +, which expresses a Number or a String.
return true;
}
}
// Throw does not express a value, so it's trivially always primitive.
bool Throw::IsPrimitive() { return true; }
// Unary operations always express primitive values. delete and ! express
// Booleans, void Undefined, typeof String, +, -, and ~ Numbers.
bool UnaryOperation::IsPrimitive() { return true; }
// Count operations (pre- and post-fix increment and decrement) always
// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2,
// 11.4.4, ane 11.4.5.
bool CountOperation::IsPrimitive() { return true; }
// Binary operations depend on the operator.
bool BinaryOperation::IsPrimitive() {
switch (op()) {
case Token::COMMA:
// Value is the value of the right subexpression.
return right()->IsPrimitive();
case Token::OR:
case Token::AND:
// Value is the value one of the subexpressions.
return left()->IsPrimitive() && right()->IsPrimitive();
default:
// {|, ^, &, <<, >>, >>>, +, -, *, /, %}
// Arithmetic operations are always primitive. They express Numbers
// with the exception of +, which expresses a Number or a String.
return true;
}
}
// Compare operations always express Boolean values.
bool CompareOperation::IsPrimitive() { return true; }
// Overridden IsCritical member functions. IsCritical is true for AST nodes
// whose evaluation is absolutely required (they are never dead) because
// they are externally visible.
// References to global variables or lookup slots are critical because they
// may have getters. All others, including parameters rewritten to explicit
// property references, are not critical.
bool VariableProxy::IsCritical() {
Variable* var = AsVariable();
return var != NULL &&
(var->slot() == NULL || var->slot()->type() == Slot::LOOKUP);
}
// Literals are never critical.
bool Literal::IsCritical() { return false; }
// Property assignments and throwing of reference errors are always
// critical. Assignments to escaping variables are also critical. In
// addition the operation of compound assignments is critical if either of
// its operands is non-primitive (the arithmetic operations all use one of
// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands).
// In this case, we mark the entire AST node as critical because there is
// no binary operation node to mark.
bool Assignment::IsCritical() {
Variable* var = AssignedVariable();
return var == NULL ||
!var->IsStackAllocated() ||
(is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive()));
}
// Property references are always critical, because they may have getters.
bool Property::IsCritical() { return true; }
// Calls are always critical.
bool Call::IsCritical() { return true; }
// +,- use ToNumber on the value of their operand.
bool UnaryOperation::IsCritical() {
ASSERT(op() == Token::ADD || op() == Token::SUB);
return !expression()->IsPrimitive();
}
// Count operations targeting properties and reference errors are always
// critical. Count operations on escaping variables are critical. Count
// operations targeting non-primitives are also critical because they use
// ToNumber.
bool CountOperation::IsCritical() {
Variable* var = AssignedVariable();
return var == NULL ||
!var->IsStackAllocated() ||
!expression()->IsPrimitive();
}
// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or
// ToUint32 on each of their operands.
bool BinaryOperation::IsCritical() {
ASSERT(op() != Token::COMMA);
ASSERT(op() != Token::OR);
ASSERT(op() != Token::AND);
return !left()->IsPrimitive() || !right()->IsPrimitive();
}
// <, >, <=, and >= all use ToPrimitive on both their operands.
bool CompareOperation::IsCritical() {
ASSERT(op() != Token::EQ);
ASSERT(op() != Token::NE);
ASSERT(op() != Token::EQ_STRICT);
ASSERT(op() != Token::NE_STRICT);
ASSERT(op() != Token::INSTANCEOF);
ASSERT(op() != Token::IN);
return !left()->IsPrimitive() || !right()->IsPrimitive();
}
// Implementation of a copy visitor. The visitor create a deep copy
// of ast nodes. Nodes that do not require a deep copy are copied
// with the default copy constructor.
AstNode::AstNode(AstNode* other) : num_(kNoNumber) {
// AST node number should be unique. Assert that we only copy AstNodes
// before node numbers are assigned.
ASSERT(other->num_ == kNoNumber);
}
Statement::Statement(Statement* other)
: AstNode(other), statement_pos_(other->statement_pos_) {}
Expression::Expression(Expression* other)
: AstNode(other),
bitfields_(other->bitfields_),
type_(other->type_) {}
BreakableStatement::BreakableStatement(BreakableStatement* other)
: Statement(other), labels_(other->labels_), type_(other->type_) {}
Block::Block(Block* other, ZoneList<Statement*>* statements)
: BreakableStatement(other),
statements_(statements->length()),
is_initializer_block_(other->is_initializer_block_) {
statements_.AddAll(*statements);
}
WhileStatement::WhileStatement(ZoneStringList* labels) WhileStatement::WhileStatement(ZoneStringList* labels)
: IterationStatement(labels), : IterationStatement(labels),
@ -795,358 +634,8 @@ WhileStatement::WhileStatement(ZoneStringList* labels)
} }
ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
Expression* expression)
: Statement(other), expression_(expression) {}
IfStatement::IfStatement(IfStatement* other,
Expression* condition,
Statement* then_statement,
Statement* else_statement)
: Statement(other),
condition_(condition),
then_statement_(then_statement),
else_statement_(else_statement) {}
EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {}
IterationStatement::IterationStatement(IterationStatement* other,
Statement* body)
: BreakableStatement(other), body_(body) {}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements) CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
: label_(label), statements_(statements) { : label_(label), statements_(statements) {
} }
ForStatement::ForStatement(ForStatement* other,
Statement* init,
Expression* cond,
Statement* next,
Statement* body)
: IterationStatement(other, body),
init_(init),
cond_(cond),
next_(next),
may_have_function_literal_(other->may_have_function_literal_),
loop_variable_(other->loop_variable_),
peel_this_loop_(other->peel_this_loop_) {}
Assignment::Assignment(Assignment* other,
Expression* target,
Expression* value)
: Expression(other),
op_(other->op_),
target_(target),
value_(value),
pos_(other->pos_),
block_start_(other->block_start_),
block_end_(other->block_end_) {}
Property::Property(Property* other, Expression* obj, Expression* key)
: Expression(other),
obj_(obj),
key_(key),
pos_(other->pos_),
type_(other->type_) {}
Call::Call(Call* other,
Expression* expression,
ZoneList<Expression*>* arguments)
: Expression(other),
expression_(expression),
arguments_(arguments),
pos_(other->pos_) {}
UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression)
: Expression(other), op_(other->op_), expression_(expression) {}
BinaryOperation::BinaryOperation(Expression* other,
Token::Value op,
Expression* left,
Expression* right)
: Expression(other), op_(op), left_(left), right_(right) {}
CountOperation::CountOperation(CountOperation* other, Expression* expression)
: Expression(other),
is_prefix_(other->is_prefix_),
op_(other->op_),
expression_(expression) {}
CompareOperation::CompareOperation(CompareOperation* other,
Expression* left,
Expression* right)
: Expression(other),
op_(other->op_),
left_(left),
right_(right) {}
Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) {
expr_ = NULL;
if (expr != NULL) Visit(expr);
return expr_;
}
Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) {
stmt_ = NULL;
if (stmt != NULL) Visit(stmt);
return stmt_;
}
ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList(
ZoneList<Expression*>* expressions) {
ZoneList<Expression*>* copy =
new ZoneList<Expression*>(expressions->length());
for (int i = 0; i < expressions->length(); i++) {
copy->Add(DeepCopyExpr(expressions->at(i)));
}
return copy;
}
ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList(
ZoneList<Statement*>* statements) {
ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length());
for (int i = 0; i < statements->length(); i++) {
copy->Add(DeepCopyStmt(statements->at(i)));
}
return copy;
}
void CopyAstVisitor::VisitBlock(Block* stmt) {
stmt_ = new Block(stmt,
DeepCopyStmtList(stmt->statements()));
}
void CopyAstVisitor::VisitExpressionStatement(
ExpressionStatement* stmt) {
stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression()));
}
void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) {
stmt_ = new EmptyStatement(stmt);
}
void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) {
stmt_ = new IfStatement(stmt,
DeepCopyExpr(stmt->condition()),
DeepCopyStmt(stmt->then_statement()),
DeepCopyStmt(stmt->else_statement()));
}
void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWithEnterStatement(
WithEnterStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitForStatement(ForStatement* stmt) {
stmt_ = new ForStatement(stmt,
DeepCopyStmt(stmt->init()),
DeepCopyExpr(stmt->cond()),
DeepCopyStmt(stmt->next()),
DeepCopyStmt(stmt->body()));
}
void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDebuggerStatement(
DebuggerStatement* stmt) {
SetStackOverflow();
}
void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitConditional(Conditional* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) {
expr_ = new VariableProxy(*expr);
}
void CopyAstVisitor::VisitLiteral(Literal* expr) {
expr_ = new Literal(*expr);
}
void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitAssignment(Assignment* expr) {
expr_ = new Assignment(expr,
DeepCopyExpr(expr->target()),
DeepCopyExpr(expr->value()));
}
void CopyAstVisitor::VisitThrow(Throw* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitProperty(Property* expr) {
expr_ = new Property(expr,
DeepCopyExpr(expr->obj()),
DeepCopyExpr(expr->key()));
}
void CopyAstVisitor::VisitCall(Call* expr) {
expr_ = new Call(expr,
DeepCopyExpr(expr->expression()),
DeepCopyExprList(expr->arguments()));
}
void CopyAstVisitor::VisitCallNew(CallNew* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) {
expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression()));
}
void CopyAstVisitor::VisitCountOperation(CountOperation* expr) {
expr_ = new CountOperation(expr,
DeepCopyExpr(expr->expression()));
}
void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) {
expr_ = new BinaryOperation(expr,
expr->op(),
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}
void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) {
expr_ = new CompareOperation(expr,
DeepCopyExpr(expr->left()),
DeepCopyExpr(expr->right()));
}
void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) {
SetStackOverflow();
}
void CopyAstVisitor::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
} } // namespace v8::internal } } // namespace v8::internal

371
deps/v8/src/ast.h

@ -89,9 +89,11 @@ namespace internal {
V(CallNew) \ V(CallNew) \
V(CallRuntime) \ V(CallRuntime) \
V(UnaryOperation) \ V(UnaryOperation) \
V(IncrementOperation) \
V(CountOperation) \ V(CountOperation) \
V(BinaryOperation) \ V(BinaryOperation) \
V(CompareOperation) \ V(CompareOperation) \
V(CompareToNull) \
V(ThisFunction) V(ThisFunction)
#define AST_NODE_LIST(V) \ #define AST_NODE_LIST(V) \
@ -118,12 +120,6 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
class AstNode: public ZoneObject { class AstNode: public ZoneObject {
public: public:
static const int kNoNumber = -1;
AstNode() : num_(kNoNumber) {}
explicit AstNode(AstNode* other);
virtual ~AstNode() { } virtual ~AstNode() { }
virtual void Accept(AstVisitor* v) = 0; virtual void Accept(AstVisitor* v) = 0;
@ -151,20 +147,6 @@ class AstNode: public ZoneObject {
virtual ObjectLiteral* AsObjectLiteral() { return NULL; } virtual ObjectLiteral* AsObjectLiteral() { return NULL; }
virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; }
virtual CompareOperation* AsCompareOperation() { return NULL; } virtual CompareOperation* AsCompareOperation() { return NULL; }
// True if the AST node is critical (its execution is needed or externally
// visible in some way).
virtual bool IsCritical() {
UNREACHABLE();
return true;
}
int num() { return num_; }
void set_num(int n) { num_ = n; }
private:
// Support for ast node numbering.
int num_;
}; };
@ -172,8 +154,6 @@ class Statement: public AstNode {
public: public:
Statement() : statement_pos_(RelocInfo::kNoPosition) {} Statement() : statement_pos_(RelocInfo::kNoPosition) {}
explicit Statement(Statement* other);
virtual Statement* AsStatement() { return this; } virtual Statement* AsStatement() { return this; }
virtual ReturnStatement* AsReturnStatement() { return NULL; } virtual ReturnStatement* AsReturnStatement() { return NULL; }
@ -201,48 +181,33 @@ class Expression: public AstNode {
// Evaluated for its value (and side effects). // Evaluated for its value (and side effects).
kValue, kValue,
// Evaluated for control flow (and side effects). // Evaluated for control flow (and side effects).
kTest, kTest
// Evaluated for control flow and side effects. Value is also
// needed if true.
kValueTest,
// Evaluated for control flow and side effects. Value is also
// needed if false.
kTestValue
}; };
Expression() : bitfields_(0) {} Expression() : bitfields_(0) {}
explicit Expression(Expression* other);
virtual Expression* AsExpression() { return this; } virtual Expression* AsExpression() { return this; }
virtual bool IsTrivial() { return false; }
virtual bool IsValidLeftHandSide() { return false; } virtual bool IsValidLeftHandSide() { return false; }
virtual Variable* AssignedVariable() { return NULL; }
// Symbols that cannot be parsed as array indices are considered property // Symbols that cannot be parsed as array indices are considered property
// names. We do not treat symbols that can be array indexes as property // names. We do not treat symbols that can be array indexes as property
// names because [] for string objects is handled only by keyed ICs. // names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() { return false; } virtual bool IsPropertyName() { return false; }
// True if the expression does not have (evaluated) subexpressions.
// Function literals are leaves because their subexpressions are not
// evaluated.
virtual bool IsLeaf() { return false; }
// True if the expression has no side effects and is safe to
// evaluate out of order.
virtual bool IsTrivial() { return false; }
// True if the expression always has one of the non-Object JS types
// (Undefined, Null, Boolean, String, or Number).
virtual bool IsPrimitive() = 0;
// Mark the expression as being compiled as an expression // Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to // statement. This is used to transform postfix increments to
// (faster) prefix increments. // (faster) prefix increments.
virtual void MarkAsStatement() { /* do nothing */ } virtual void MarkAsStatement() { /* do nothing */ }
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
virtual bool ResultOverwriteAllowed() { return false; }
// True iff the expression is a literal represented as a smi.
virtual bool IsSmiLiteral() { return false; }
// Static type information for this expression. // Static type information for this expression.
StaticType* type() { return &type_; } StaticType* type() { return &type_; }
@ -259,7 +224,8 @@ class Expression: public AstNode {
// top operation is a bit operation with a mask, or a shift. // top operation is a bit operation with a mask, or a shift.
bool GuaranteedSmiResult(); bool GuaranteedSmiResult();
// AST analysis results // AST analysis results.
void CopyAnalysisResultsFrom(Expression* other);
// True if the expression rooted at this node can be compiled by the // True if the expression rooted at this node can be compiled by the
// side-effect free compiler. // side-effect free compiler.
@ -320,11 +286,6 @@ class ValidLeftHandSideSentinel: public Expression {
virtual void Accept(AstVisitor* v) { UNREACHABLE(); } virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
static ValidLeftHandSideSentinel* instance() { return &instance_; } static ValidLeftHandSideSentinel* instance() { return &instance_; }
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
private: private:
static ValidLeftHandSideSentinel instance_; static ValidLeftHandSideSentinel instance_;
}; };
@ -353,8 +314,6 @@ class BreakableStatement: public Statement {
protected: protected:
inline BreakableStatement(ZoneStringList* labels, Type type); inline BreakableStatement(ZoneStringList* labels, Type type);
explicit BreakableStatement(BreakableStatement* other);
private: private:
ZoneStringList* labels_; ZoneStringList* labels_;
Type type_; Type type_;
@ -366,10 +325,6 @@ class Block: public BreakableStatement {
public: public:
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block); inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
// Construct a clone initialized from the original block and
// a deep copy of all statements of the original block.
Block(Block* other, ZoneList<Statement*>* statements);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual Block* AsBlock() { return this; } virtual Block* AsBlock() { return this; }
@ -433,10 +388,6 @@ class IterationStatement: public BreakableStatement {
protected: protected:
explicit inline IterationStatement(ZoneStringList* labels); explicit inline IterationStatement(ZoneStringList* labels);
// Construct a clone initialized from original and
// a deep copy of the original body.
IterationStatement(IterationStatement* other, Statement* body);
void Initialize(Statement* body) { void Initialize(Statement* body) {
body_ = body; body_ = body;
} }
@ -486,13 +437,14 @@ class WhileStatement: public IterationStatement {
bool may_have_function_literal() const { bool may_have_function_literal() const {
return may_have_function_literal_; return may_have_function_literal_;
} }
void set_may_have_function_literal(bool value) {
may_have_function_literal_ = value;
}
private: private:
Expression* cond_; Expression* cond_;
// True if there is a function literal subexpression in the condition. // True if there is a function literal subexpression in the condition.
bool may_have_function_literal_; bool may_have_function_literal_;
friend class AstOptimizer;
}; };
@ -500,14 +452,6 @@ class ForStatement: public IterationStatement {
public: public:
explicit inline ForStatement(ZoneStringList* labels); explicit inline ForStatement(ZoneStringList* labels);
// Construct a for-statement initialized from another for-statement
// and deep copies of all parts of the original statement.
ForStatement(ForStatement* other,
Statement* init,
Expression* cond,
Statement* next,
Statement* body);
virtual ForStatement* AsForStatement() { return this; } virtual ForStatement* AsForStatement() { return this; }
void Initialize(Statement* init, void Initialize(Statement* init,
@ -528,17 +472,18 @@ class ForStatement: public IterationStatement {
void set_cond(Expression* expr) { cond_ = expr; } void set_cond(Expression* expr) { cond_ = expr; }
Statement* next() const { return next_; } Statement* next() const { return next_; }
void set_next(Statement* stmt) { next_ = stmt; } void set_next(Statement* stmt) { next_ = stmt; }
bool may_have_function_literal() const { bool may_have_function_literal() const {
return may_have_function_literal_; return may_have_function_literal_;
} }
void set_may_have_function_literal(bool value) {
may_have_function_literal_ = value;
}
bool is_fast_smi_loop() { return loop_variable_ != NULL; } bool is_fast_smi_loop() { return loop_variable_ != NULL; }
Variable* loop_variable() { return loop_variable_; } Variable* loop_variable() { return loop_variable_; }
void set_loop_variable(Variable* var) { loop_variable_ = var; } void set_loop_variable(Variable* var) { loop_variable_ = var; }
bool peel_this_loop() { return peel_this_loop_; }
void set_peel_this_loop(bool b) { peel_this_loop_ = b; }
private: private:
Statement* init_; Statement* init_;
Expression* cond_; Expression* cond_;
@ -546,9 +491,6 @@ class ForStatement: public IterationStatement {
// True if there is a function literal subexpression in the condition. // True if there is a function literal subexpression in the condition.
bool may_have_function_literal_; bool may_have_function_literal_;
Variable* loop_variable_; Variable* loop_variable_;
bool peel_this_loop_;
friend class AstOptimizer;
}; };
@ -578,10 +520,6 @@ class ExpressionStatement: public Statement {
explicit ExpressionStatement(Expression* expression) explicit ExpressionStatement(Expression* expression)
: expression_(expression) { } : expression_(expression) { }
// Construct an expression statement initialized from another
// expression statement and a deep copy of the original expression.
ExpressionStatement(ExpressionStatement* other, Expression* expression);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion. // Type testing & conversion.
@ -721,13 +659,6 @@ class IfStatement: public Statement {
then_statement_(then_statement), then_statement_(then_statement),
else_statement_(else_statement) { } else_statement_(else_statement) { }
// Construct an if-statement initialized from another if-statement
// and deep copies of all parts of the original.
IfStatement(IfStatement* other,
Expression* condition,
Statement* then_statement,
Statement* else_statement);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
bool HasThenStatement() const { return !then_statement()->IsEmpty(); } bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
@ -834,8 +765,6 @@ class EmptyStatement: public Statement {
public: public:
EmptyStatement() {} EmptyStatement() {}
explicit EmptyStatement(EmptyStatement* other);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion. // Type testing & conversion.
@ -848,6 +777,8 @@ class Literal: public Expression {
explicit Literal(Handle<Object> handle) : handle_(handle) { } explicit Literal(Handle<Object> handle) : handle_(handle) { }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsTrivial() { return true; }
virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
// Type testing & conversion. // Type testing & conversion.
virtual Literal* AsLiteral() { return this; } virtual Literal* AsLiteral() { return this; }
@ -865,11 +796,6 @@ class Literal: public Expression {
return false; return false;
} }
virtual bool IsLeaf() { return true; }
virtual bool IsTrivial() { return true; }
virtual bool IsPrimitive();
virtual bool IsCritical();
// Identity testers. // Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); } bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -916,7 +842,6 @@ class ObjectLiteral: public MaterializedLiteral {
// to the code generator. // to the code generator.
class Property: public ZoneObject { class Property: public ZoneObject {
public: public:
enum Kind { enum Kind {
CONSTANT, // Property with constant value (compile time). CONSTANT, // Property with constant value (compile time).
COMPUTED, // Property with computed value (execution time). COMPUTED, // Property with computed value (execution time).
@ -954,10 +879,6 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; } virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return properties()->is_empty(); }
virtual bool IsPrimitive();
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
return constant_properties_; return constant_properties_;
} }
@ -984,10 +905,6 @@ class RegExpLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
Handle<String> pattern() const { return pattern_; } Handle<String> pattern() const { return pattern_; }
Handle<String> flags() const { return flags_; } Handle<String> flags() const { return flags_; }
@ -1012,10 +929,6 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; } virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsLeaf() { return values()->is_empty(); }
virtual bool IsPrimitive();
Handle<FixedArray> constant_elements() const { return constant_elements_; } Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; } ZoneList<Expression*>* values() const { return values_; }
@ -1036,8 +949,6 @@ class CatchExtensionObject: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Literal* key() const { return key_; } Literal* key() const { return key_; }
VariableProxy* value() const { return value_; } VariableProxy* value() const { return value_; }
@ -1055,7 +966,10 @@ class VariableProxy: public Expression {
virtual Property* AsProperty() { virtual Property* AsProperty() {
return var_ == NULL ? NULL : var_->AsProperty(); return var_ == NULL ? NULL : var_->AsProperty();
} }
virtual VariableProxy* AsVariableProxy() { return this; }
virtual VariableProxy* AsVariableProxy() {
return this;
}
Variable* AsVariable() { Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable(); return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
@ -1065,20 +979,12 @@ class VariableProxy: public Expression {
return var_ == NULL ? true : var_->IsValidLeftHandSide(); return var_ == NULL ? true : var_->IsValidLeftHandSide();
} }
virtual bool IsLeaf() { virtual bool IsTrivial() {
ASSERT(var_ != NULL); // Variable must be resolved. // Reading from a mutable variable is a side effect, but the
return var()->is_global() || var()->rewrite()->IsLeaf(); // variable for 'this' is immutable.
return is_this_ || is_trivial_;
} }
// Reading from a mutable variable is a side effect, but 'this' is
// immutable.
virtual bool IsTrivial() { return is_trivial_; }
virtual bool IsPrimitive();
virtual bool IsCritical();
void SetIsPrimitive(bool value) { is_primitive_ = value; }
bool IsVariable(Handle<String> n) { bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n); return !is_this() && name().is_identical_to(n);
} }
@ -1092,11 +998,8 @@ class VariableProxy: public Expression {
Variable* var() const { return var_; } Variable* var() const { return var_; }
bool is_this() const { return is_this_; } bool is_this() const { return is_this_; }
bool inside_with() const { return inside_with_; } bool inside_with() const { return inside_with_; }
bool is_trivial() { return is_trivial_; }
void set_is_trivial(bool b) { is_trivial_ = b; }
BitVector* reaching_definitions() { return reaching_definitions_; } void MarkAsTrivial() { is_trivial_ = true; }
void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; }
// Bind this proxy to the variable var. // Bind this proxy to the variable var.
void BindTo(Variable* var); void BindTo(Variable* var);
@ -1107,8 +1010,6 @@ class VariableProxy: public Expression {
bool is_this_; bool is_this_;
bool inside_with_; bool inside_with_;
bool is_trivial_; bool is_trivial_;
BitVector* reaching_definitions_;
bool is_primitive_;
VariableProxy(Handle<String> name, bool is_this, bool inside_with); VariableProxy(Handle<String> name, bool is_this, bool inside_with);
explicit VariableProxy(bool is_this); explicit VariableProxy(bool is_this);
@ -1125,11 +1026,6 @@ class VariableProxySentinel: public VariableProxy {
return &identifier_proxy_; return &identifier_proxy_;
} }
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
private: private:
explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { } explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { }
static VariableProxySentinel this_proxy_; static VariableProxySentinel this_proxy_;
@ -1171,13 +1067,6 @@ class Slot: public Expression {
// Type testing & conversion // Type testing & conversion
virtual Slot* AsSlot() { return this; } virtual Slot* AsSlot() { return this; }
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive() {
UNREACHABLE();
return false;
}
bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; } bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
// Accessors // Accessors
@ -1203,8 +1092,6 @@ class Property: public Expression {
Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
: obj_(obj), key_(key), pos_(pos), type_(type) { } : obj_(obj), key_(key), pos_(pos), type_(type) { }
Property(Property* other, Expression* obj, Expression* key);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion // Type testing & conversion
@ -1212,9 +1099,6 @@ class Property: public Expression {
virtual bool IsValidLeftHandSide() { return true; } virtual bool IsValidLeftHandSide() { return true; }
virtual bool IsPrimitive();
virtual bool IsCritical();
Expression* obj() const { return obj_; } Expression* obj() const { return obj_; }
Expression* key() const { return key_; } Expression* key() const { return key_; }
int position() const { return pos_; } int position() const { return pos_; }
@ -1240,16 +1124,11 @@ class Call: public Expression {
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos) Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
: expression_(expression), arguments_(arguments), pos_(pos) { } : expression_(expression), arguments_(arguments), pos_(pos) { }
Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing and conversion. // Type testing and conversion.
virtual Call* AsCall() { return this; } virtual Call* AsCall() { return this; }
virtual bool IsPrimitive();
virtual bool IsCritical();
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; } int position() { return pos_; }
@ -1272,8 +1151,6 @@ class CallNew: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
int position() { return pos_; } int position() { return pos_; }
@ -1298,8 +1175,6 @@ class CallRuntime: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
Runtime::Function* function() const { return function_; } Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; } ZoneList<Expression*>* arguments() const { return arguments_; }
@ -1319,16 +1194,12 @@ class UnaryOperation: public Expression {
ASSERT(Token::IsUnaryOp(op)); ASSERT(Token::IsUnaryOp(op));
} }
UnaryOperation(UnaryOperation* other, Expression* expression);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool ResultOverwriteAllowed();
// Type testing & conversion // Type testing & conversion
virtual UnaryOperation* AsUnaryOperation() { return this; } virtual UnaryOperation* AsUnaryOperation() { return this; }
virtual bool IsPrimitive();
virtual bool IsCritical();
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* expression() const { return expression_; } Expression* expression() const { return expression_; }
@ -1340,120 +1211,102 @@ class UnaryOperation: public Expression {
class BinaryOperation: public Expression { class BinaryOperation: public Expression {
public: public:
BinaryOperation(Token::Value op, Expression* left, Expression* right) BinaryOperation(Token::Value op,
: op_(op), left_(left), right_(right) { Expression* left,
Expression* right,
int pos)
: op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsBinaryOp(op)); ASSERT(Token::IsBinaryOp(op));
} }
// Construct a binary operation with a given operator and left and right // Create the binary operation corresponding to a compound assignment.
// subexpressions. The rest of the expression state is copied from explicit BinaryOperation(Assignment* assignment);
// another expression.
BinaryOperation(Expression* other,
Token::Value op,
Expression* left,
Expression* right);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool ResultOverwriteAllowed();
// Type testing & conversion // Type testing & conversion
virtual BinaryOperation* AsBinaryOperation() { return this; } virtual BinaryOperation* AsBinaryOperation() { return this; }
virtual bool IsPrimitive();
virtual bool IsCritical();
// True iff the result can be safely overwritten (to avoid allocation).
// False for operations that can return one of their operands.
bool ResultOverwriteAllowed() {
switch (op_) {
case Token::COMMA:
case Token::OR:
case Token::AND:
return false;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
return true;
default:
UNREACHABLE();
}
return false;
}
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* left() const { return left_; } Expression* left() const { return left_; }
Expression* right() const { return right_; } Expression* right() const { return right_; }
int position() const { return pos_; }
private: private:
Token::Value op_; Token::Value op_;
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
int pos_;
}; };
class CountOperation: public Expression { class IncrementOperation: public Expression {
public: public:
CountOperation(bool is_prefix, Token::Value op, Expression* expression) IncrementOperation(Token::Value op, Expression* expr)
: is_prefix_(is_prefix), op_(op), expression_(expression) { : op_(op), expression_(expr) {
ASSERT(Token::IsCountOp(op)); ASSERT(Token::IsCountOp(op));
} }
CountOperation(CountOperation* other, Expression* expression); Token::Value op() const { return op_; }
bool is_increment() { return op_ == Token::INC; }
Expression* expression() const { return expression_; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual CountOperation* AsCountOperation() { return this; } private:
Token::Value op_;
Expression* expression_;
int pos_;
};
virtual Variable* AssignedVariable() {
return expression()->AsVariableProxy()->AsVariable();
}
virtual bool IsPrimitive(); class CountOperation: public Expression {
virtual bool IsCritical(); public:
CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
: is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
virtual void Accept(AstVisitor* v);
virtual CountOperation* AsCountOperation() { return this; }
bool is_prefix() const { return is_prefix_; } bool is_prefix() const { return is_prefix_; }
bool is_postfix() const { return !is_prefix_; } bool is_postfix() const { return !is_prefix_; }
Token::Value op() const { return op_; }
Token::Value op() const { return increment_->op(); }
Token::Value binary_op() { Token::Value binary_op() {
return op_ == Token::INC ? Token::ADD : Token::SUB; return (op() == Token::INC) ? Token::ADD : Token::SUB;
} }
Expression* expression() const { return expression_; }
Expression* expression() const { return increment_->expression(); }
IncrementOperation* increment() const { return increment_; }
int position() const { return pos_; }
virtual void MarkAsStatement() { is_prefix_ = true; } virtual void MarkAsStatement() { is_prefix_ = true; }
private: private:
bool is_prefix_; bool is_prefix_;
Token::Value op_; IncrementOperation* increment_;
Expression* expression_; int pos_;
}; };
class CompareOperation: public Expression { class CompareOperation: public Expression {
public: public:
CompareOperation(Token::Value op, Expression* left, Expression* right) CompareOperation(Token::Value op,
: op_(op), left_(left), right_(right) { Expression* left,
Expression* right,
int pos)
: op_(op), left_(left), right_(right), pos_(pos) {
ASSERT(Token::IsCompareOp(op)); ASSERT(Token::IsCompareOp(op));
} }
CompareOperation(CompareOperation* other,
Expression* left,
Expression* right);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
virtual bool IsCritical();
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
Expression* left() const { return left_; } Expression* left() const { return left_; }
Expression* right() const { return right_; } Expression* right() const { return right_; }
int position() const { return pos_; }
// Type testing & conversion // Type testing & conversion
virtual CompareOperation* AsCompareOperation() { return this; } virtual CompareOperation* AsCompareOperation() { return this; }
@ -1462,6 +1315,24 @@ class CompareOperation: public Expression {
Token::Value op_; Token::Value op_;
Expression* left_; Expression* left_;
Expression* right_; Expression* right_;
int pos_;
};
class CompareToNull: public Expression {
public:
CompareToNull(bool is_strict, Expression* expression)
: is_strict_(is_strict), expression_(expression) { }
virtual void Accept(AstVisitor* v);
bool is_strict() const { return is_strict_; }
Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
Expression* expression() const { return expression_; }
private:
bool is_strict_;
Expression* expression_;
}; };
@ -1480,8 +1351,6 @@ class Conditional: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* condition() const { return condition_; } Expression* condition() const { return condition_; }
Expression* then_expression() const { return then_expression_; } Expression* then_expression() const { return then_expression_; }
Expression* else_expression() const { return else_expression_; } Expression* else_expression() const { return else_expression_; }
@ -1506,20 +1375,11 @@ class Assignment: public Expression {
ASSERT(Token::IsAssignmentOp(op)); ASSERT(Token::IsAssignmentOp(op));
} }
Assignment(Assignment* other, Expression* target, Expression* value);
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual Assignment* AsAssignment() { return this; } virtual Assignment* AsAssignment() { return this; }
virtual bool IsPrimitive();
virtual bool IsCritical();
Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
virtual Variable* AssignedVariable() {
return target()->AsVariableProxy()->AsVariable();
}
Token::Value binary_op() const; Token::Value binary_op() const;
Token::Value op() const { return op_; } Token::Value op() const { return op_; }
@ -1555,8 +1415,6 @@ class Throw: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
Expression* exception() const { return exception_; } Expression* exception() const { return exception_; }
int position() const { return pos_; } int position() const { return pos_; }
@ -1578,7 +1436,8 @@ class FunctionLiteral: public Expression {
int num_parameters, int num_parameters,
int start_position, int start_position,
int end_position, int end_position,
bool is_expression) bool is_expression,
bool contains_loops)
: name_(name), : name_(name),
scope_(scope), scope_(scope),
body_(body), body_(body),
@ -1591,6 +1450,7 @@ class FunctionLiteral: public Expression {
start_position_(start_position), start_position_(start_position),
end_position_(end_position), end_position_(end_position),
is_expression_(is_expression), is_expression_(is_expression),
contains_loops_(contains_loops),
function_token_position_(RelocInfo::kNoPosition), function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()), inferred_name_(Heap::empty_string()),
try_full_codegen_(false) { try_full_codegen_(false) {
@ -1604,10 +1464,6 @@ class FunctionLiteral: public Expression {
// Type testing & conversion // Type testing & conversion
virtual FunctionLiteral* AsFunctionLiteral() { return this; } virtual FunctionLiteral* AsFunctionLiteral() { return this; }
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
Handle<String> name() const { return name_; } Handle<String> name() const { return name_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; } ZoneList<Statement*>* body() const { return body_; }
@ -1616,6 +1472,7 @@ class FunctionLiteral: public Expression {
int start_position() const { return start_position_; } int start_position() const { return start_position_; }
int end_position() const { return end_position_; } int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; } bool is_expression() const { return is_expression_; }
bool contains_loops() const { return contains_loops_; }
int materialized_literal_count() { return materialized_literal_count_; } int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; } int expected_property_count() { return expected_property_count_; }
@ -1656,6 +1513,7 @@ class FunctionLiteral: public Expression {
int start_position_; int start_position_;
int end_position_; int end_position_;
bool is_expression_; bool is_expression_;
bool contains_loops_;
int function_token_position_; int function_token_position_;
Handle<String> inferred_name_; Handle<String> inferred_name_;
bool try_full_codegen_; bool try_full_codegen_;
@ -1675,12 +1533,8 @@ class SharedFunctionInfoLiteral: public Expression {
return shared_function_info_; return shared_function_info_;
} }
virtual bool IsLeaf() { return true; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsPrimitive();
private: private:
Handle<SharedFunctionInfo> shared_function_info_; Handle<SharedFunctionInfo> shared_function_info_;
}; };
@ -1689,8 +1543,6 @@ class SharedFunctionInfoLiteral: public Expression {
class ThisFunction: public Expression { class ThisFunction: public Expression {
public: public:
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsLeaf() { return true; }
virtual bool IsPrimitive();
}; };
@ -1895,7 +1747,7 @@ class RegExpText: public RegExpTree {
void AddElement(TextElement elm) { void AddElement(TextElement elm) {
elements_.Add(elm); elements_.Add(elm);
length_ += elm.length(); length_ += elm.length();
}; }
ZoneList<TextElement>* elements() { return &elements_; } ZoneList<TextElement>* elements() { return &elements_; }
private: private:
ZoneList<TextElement> elements_; ZoneList<TextElement> elements_;
@ -2078,29 +1930,6 @@ class AstVisitor BASE_EMBEDDED {
bool stack_overflow_; bool stack_overflow_;
}; };
class CopyAstVisitor : public AstVisitor {
public:
Expression* DeepCopyExpr(Expression* expr);
Statement* DeepCopyStmt(Statement* stmt);
private:
ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions);
ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Holds the result of copying an expression.
Expression* expr_;
// Holds the result of copying a statement.
Statement* stmt_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_AST_H_ #endif // V8_AST_H_

11
deps/v8/src/bootstrapper.cc

@ -232,6 +232,7 @@ class Genesis BASE_EMBEDDED {
bool InstallNatives(); bool InstallNatives();
void InstallCustomCallGenerators(); void InstallCustomCallGenerators();
void InstallJSFunctionResultCaches(); void InstallJSFunctionResultCaches();
void InitializeNormalizedMapCaches();
// Used both for deserialized and from-scratch contexts to add the extensions // Used both for deserialized and from-scratch contexts to add the extensions
// provided. // provided.
static bool InstallExtensions(Handle<Context> global_context, static bool InstallExtensions(Handle<Context> global_context,
@ -719,6 +720,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize, InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
Top::initial_object_prototype(), Builtins::Illegal, Top::initial_object_prototype(), Builtins::Illegal,
true); true);
string_fun->shared()->set_construct_stub(
Builtins::builtin(Builtins::StringConstructCode));
global_context()->set_string_function(*string_fun); global_context()->set_string_function(*string_fun);
// Add 'length' property to strings. // Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors = Handle<DescriptorArray> string_descriptors =
@ -1400,6 +1403,13 @@ void Genesis::InstallJSFunctionResultCaches() {
} }
void Genesis::InitializeNormalizedMapCaches() {
Handle<FixedArray> array(
Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED));
global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array));
}
int BootstrapperActive::nesting_ = 0; int BootstrapperActive::nesting_ = 0;
@ -1768,6 +1778,7 @@ Genesis::Genesis(Handle<Object> global_object,
HookUpGlobalProxy(inner_global, global_proxy); HookUpGlobalProxy(inner_global, global_proxy);
InitializeGlobal(inner_global, empty_function); InitializeGlobal(inner_global, empty_function);
InstallJSFunctionResultCaches(); InstallJSFunctionResultCaches();
InitializeNormalizedMapCaches();
if (!InstallNatives()) return; if (!InstallNatives()) return;
MakeFunctionInstancePrototypeWritable(); MakeFunctionInstancePrototypeWritable();

115
deps/v8/src/builtins.cc

@ -243,7 +243,7 @@ BUILTIN(ArrayCodeGeneric) {
} }
static Object* AllocateJSArray() { MUST_USE_RESULT static Object* AllocateJSArray() {
JSFunction* array_function = JSFunction* array_function =
Top::context()->global_context()->array_function(); Top::context()->global_context()->array_function();
Object* result = Heap::AllocateJSObject(array_function); Object* result = Heap::AllocateJSObject(array_function);
@ -252,7 +252,7 @@ static Object* AllocateJSArray() {
} }
static Object* AllocateEmptyJSArray() { MUST_USE_RESULT static Object* AllocateEmptyJSArray() {
Object* result = AllocateJSArray(); Object* result = AllocateJSArray();
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
JSArray* result_array = JSArray::cast(result); JSArray* result_array = JSArray::cast(result);
@ -269,6 +269,7 @@ static void CopyElements(AssertNoAllocation* no_gc,
int src_index, int src_index,
int len) { int len) {
ASSERT(dst != src); // Use MoveElements instead. ASSERT(dst != src); // Use MoveElements instead.
ASSERT(dst->map() != Heap::fixed_cow_array_map());
ASSERT(len > 0); ASSERT(len > 0);
CopyWords(dst->data_start() + dst_index, CopyWords(dst->data_start() + dst_index,
src->data_start() + src_index, src->data_start() + src_index,
@ -286,6 +287,7 @@ static void MoveElements(AssertNoAllocation* no_gc,
FixedArray* src, FixedArray* src,
int src_index, int src_index,
int len) { int len) {
ASSERT(dst->map() != Heap::fixed_cow_array_map());
memmove(dst->data_start() + dst_index, memmove(dst->data_start() + dst_index,
src->data_start() + src_index, src->data_start() + src_index,
len * kPointerSize); len * kPointerSize);
@ -297,17 +299,17 @@ static void MoveElements(AssertNoAllocation* no_gc,
static void FillWithHoles(FixedArray* dst, int from, int to) { static void FillWithHoles(FixedArray* dst, int from, int to) {
ASSERT(dst->map() != Heap::fixed_cow_array_map());
MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from); MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from);
} }
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// For now this trick is only applied to fixed arrays in new space. ASSERT(elms->map() != Heap::fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk // In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable. // and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with ASSERT(!Heap::lo_space()->Contains(elms));
// region dirty marks.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0); STATIC_ASSERT(FixedArray::kMapOffset == 0);
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
@ -317,6 +319,17 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
const int len = elms->length(); const int len = elms->length();
if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
!Heap::new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
// remembered set) won't find pointers to new-space there.
Object** zap = reinterpret_cast<Object**>(elms->address());
zap++; // Header of filler must be at least one word so skip that.
for (int i = 1; i < to_trim; i++) {
*zap++ = Smi::FromInt(0);
}
}
// Technically in new space this write might be omitted (except for // Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer // debug mode which iterates through the heap), but to play safer
// we still do it. // we still do it.
@ -325,9 +338,8 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
former_start[to_trim] = Heap::fixed_array_map(); former_start[to_trim] = Heap::fixed_array_map();
former_start[to_trim + 1] = Smi::FromInt(len - to_trim); former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize, return FixedArray::cast(HeapObject::FromAddress(
(elms + to_trim * kPointerSize)->address()); elms->address() + to_trim * kPointerSize));
return elms + to_trim * kPointerSize;
} }
@ -348,33 +360,24 @@ static bool ArrayPrototypeHasNoElements(Context* global_context,
} }
static bool IsJSArrayWithFastElements(Object* receiver, static inline Object* EnsureJSArrayWithWritableFastElements(Object* receiver) {
FixedArray** elements) { if (!receiver->IsJSArray()) return NULL;
if (!receiver->IsJSArray()) {
return false;
}
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
HeapObject* elms = HeapObject::cast(array->elements()); HeapObject* elms = HeapObject::cast(array->elements());
if (elms->map() != Heap::fixed_array_map()) { if (elms->map() == Heap::fixed_array_map()) return elms;
return false; if (elms->map() == Heap::fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
} }
return NULL;
*elements = FixedArray::cast(elms);
return true;
} }
static bool IsFastElementMovingAllowed(Object* receiver, static inline bool IsJSArrayFastElementMovingAllowed(JSArray* receiver) {
FixedArray** elements) {
if (!IsJSArrayWithFastElements(receiver, elements)) return false;
Context* global_context = Top::context()->global_context(); Context* global_context = Top::context()->global_context();
JSObject* array_proto = JSObject* array_proto =
JSObject::cast(global_context->array_function()->prototype()); JSObject::cast(global_context->array_function()->prototype());
if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false; return receiver->GetPrototype() == array_proto &&
return ArrayPrototypeHasNoElements(global_context, array_proto); ArrayPrototypeHasNoElements(global_context, array_proto);
} }
@ -405,10 +408,10 @@ static Object* CallJsBuiltin(const char* name,
BUILTIN(ArrayPush) { BUILTIN(ArrayPush) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsJSArrayWithFastElements(receiver, &elms)) { if (elms_obj == NULL) return CallJsBuiltin("ArrayPush", args);
return CallJsBuiltin("ArrayPush", args); if (elms_obj->IsFailure()) return elms_obj;
} FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
@ -454,10 +457,10 @@ BUILTIN(ArrayPush) {
BUILTIN(ArrayPop) { BUILTIN(ArrayPop) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsJSArrayWithFastElements(receiver, &elms)) { if (elms_obj == NULL) return CallJsBuiltin("ArrayPop", args);
return CallJsBuiltin("ArrayPop", args); if (elms_obj->IsFailure()) return elms_obj;
} FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value(); int len = Smi::cast(array->length())->value();
@ -483,10 +486,13 @@ BUILTIN(ArrayPop) {
BUILTIN(ArrayShift) { BUILTIN(ArrayShift) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsFastElementMovingAllowed(receiver, &elms)) { if (elms_obj->IsFailure()) return elms_obj;
if (elms_obj == NULL ||
!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArrayShift", args); return CallJsBuiltin("ArrayShift", args);
} }
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements()); ASSERT(array->HasFastElements());
@ -499,8 +505,8 @@ BUILTIN(ArrayShift) {
first = Heap::undefined_value(); first = Heap::undefined_value();
} }
if (Heap::new_space()->Contains(elms)) { if (!Heap::lo_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space), // As elms still in the same space they used to be,
// there is no need to update region dirty mark. // there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER); array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else { } else {
@ -519,10 +525,13 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) { BUILTIN(ArrayUnshift) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsFastElementMovingAllowed(receiver, &elms)) { if (elms_obj->IsFailure()) return elms_obj;
if (elms_obj == NULL ||
!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArrayUnshift", args); return CallJsBuiltin("ArrayUnshift", args);
} }
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements()); ASSERT(array->HasFastElements());
@ -568,10 +577,13 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) { BUILTIN(ArraySlice) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsFastElementMovingAllowed(receiver, &elms)) { if (elms_obj->IsFailure()) return elms_obj;
if (elms_obj == NULL ||
!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArraySlice", args); return CallJsBuiltin("ArraySlice", args);
} }
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements()); ASSERT(array->HasFastElements());
@ -637,10 +649,13 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) { BUILTIN(ArraySplice) {
Object* receiver = *args.receiver(); Object* receiver = *args.receiver();
FixedArray* elms = NULL; Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver);
if (!IsFastElementMovingAllowed(receiver, &elms)) { if (elms_obj->IsFailure()) return elms_obj;
if (elms_obj == NULL ||
!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArraySplice", args); return CallJsBuiltin("ArraySplice", args);
} }
FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver); JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements()); ASSERT(array->HasFastElements());
@ -648,13 +663,9 @@ BUILTIN(ArraySplice) {
int n_arguments = args.length() - 1; int n_arguments = args.length() - 1;
// SpiderMonkey and JSC return undefined in the case where no // Return empty array when no arguments are supplied.
// arguments are given instead of using the implicit undefined
// arguments. This does not follow ECMA-262, but we do the same for
// compatibility.
// TraceMonkey follows ECMA-262 though.
if (n_arguments == 0) { if (n_arguments == 0) {
return Heap::undefined_value(); return AllocateEmptyJSArray();
} }
int relative_start = 0; int relative_start = 0;
@ -717,7 +728,7 @@ BUILTIN(ArraySplice) {
if (item_count < actual_delete_count) { if (item_count < actual_delete_count) {
// Shrink the array. // Shrink the array.
const bool trim_array = Heap::new_space()->Contains(elms) && const bool trim_array = !Heap::lo_space()->Contains(elms) &&
((actual_start + item_count) < ((actual_start + item_count) <
(len - actual_delete_count - actual_start)); (len - actual_delete_count - actual_start));
if (trim_array) { if (trim_array) {

7
deps/v8/src/builtins.h

@ -117,7 +117,10 @@ enum BuiltinExtraArguments {
V(FunctionApply, BUILTIN, UNINITIALIZED) \ V(FunctionApply, BUILTIN, UNINITIALIZED) \
\ \
V(ArrayCode, BUILTIN, UNINITIALIZED) \ V(ArrayCode, BUILTIN, UNINITIALIZED) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED) V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly. // Define list of builtins used by the debugger implemented in assembly.
@ -258,6 +261,8 @@ class Builtins : public AllStatic {
static void Generate_ArrayCode(MacroAssembler* masm); static void Generate_ArrayCode(MacroAssembler* masm);
static void Generate_ArrayConstructCode(MacroAssembler* masm); static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
}; };
} } // namespace v8::internal } } // namespace v8::internal

12
deps/v8/src/char-predicates-inl.h

@ -34,6 +34,14 @@ namespace v8 {
namespace internal { namespace internal {
// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
// Else, return something outside of 'A'-'Z' and 'a'-'z'.
// Note: it ignores LOCALE.
inline int AsciiAlphaToLower(uc32 c) {
return c | 0x20;
}
inline bool IsCarriageReturn(uc32 c) { inline bool IsCarriageReturn(uc32 c) {
return c == 0x000D; return c == 0x000D;
} }
@ -59,12 +67,12 @@ inline bool IsDecimalDigit(uc32 c) {
inline bool IsHexDigit(uc32 c) { inline bool IsHexDigit(uc32 c) {
// ECMA-262, 3rd, 7.6 (p 15) // ECMA-262, 3rd, 7.6 (p 15)
return IsDecimalDigit(c) || IsInRange(c | 0x20, 'a', 'f'); return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
} }
inline bool IsRegExpWord(uc16 c) { inline bool IsRegExpWord(uc16 c) {
return IsInRange(c | 0x20, 'a', 'z') return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
|| IsDecimalDigit(c) || IsDecimalDigit(c)
|| (c == '_'); || (c == '_');
} }

5
deps/v8/src/circular-queue.cc

@ -47,8 +47,9 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
producer_consumer_distance_(2 * chunk_size_), producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) { buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2); ASSERT(buffer_size_in_chunks > 2);
// Only need to keep the first cell of a chunk clean. // Clean up the whole buffer to avoid encountering a random kEnd
for (int i = 0; i < buffer_size_; i += chunk_size_) { // while enqueuing.
for (int i = 0; i < buffer_size_; ++i) {
buffer_[i] = kClear; buffer_[i] = kClear;
} }
buffer_[buffer_size_] = kEnd; buffer_[buffer_size_] = kEnd;

622
deps/v8/src/code-stubs.h

@ -29,6 +29,7 @@
#define V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_
#include "globals.h" #include "globals.h"
#include "macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -80,6 +81,14 @@ namespace internal {
CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) CODE_STUB_LIST_ARM(V)
// Types of uncatchable exceptions.
enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
// Stub is base classes of all stubs. // Stub is base classes of all stubs.
class CodeStub BASE_EMBEDDED { class CodeStub BASE_EMBEDDED {
public: public:
@ -101,10 +110,16 @@ class CodeStub BASE_EMBEDDED {
static Major MajorKeyFromKey(uint32_t key) { static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key)); return static_cast<Major>(MajorKeyBits::decode(key));
}; }
static int MinorKeyFromKey(uint32_t key) { static int MinorKeyFromKey(uint32_t key) {
return MinorKeyBits::decode(key); return MinorKeyBits::decode(key);
}; }
// Gets the major key from a code object that is a code stub or binary op IC.
static Major GetMajorKey(Code* code_stub) {
return static_cast<Major>(code_stub->major_key());
}
static const char* MajorName(Major major_key, bool allow_unknown_keys); static const char* MajorName(Major major_key, bool allow_unknown_keys);
virtual ~CodeStub() {} virtual ~CodeStub() {}
@ -172,6 +187,609 @@ class CodeStub BASE_EMBEDDED {
friend class BreakPointIterator; friend class BreakPointIterator;
}; };
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
virtual ~RuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const = 0;
virtual void AfterCall(MacroAssembler* masm) const = 0;
protected:
RuntimeCallHelper() {}
private:
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
} } // namespace v8::internal
#if V8_TARGET_ARCH_IA32
#include "ia32/code-stubs-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/code-stubs-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/code-stubs-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/code-stubs-mips.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
// newly created internal frame before/after the runtime call.
class ICRuntimeCallHelper : public RuntimeCallHelper {
public:
ICRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
};
// Trivial RuntimeCallHelper implementation.
class NopRuntimeCallHelper : public RuntimeCallHelper {
public:
NopRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const {}
virtual void AfterCall(MacroAssembler* masm) const {}
};
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; }
int MinorKey() { return 0; }
};
class FastNewClosureStub : public CodeStub {
public:
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return 0; }
};
class FastNewContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
enum Mode {
CLONE_ELEMENTS,
COPY_ON_WRITE_ELEMENTS
};
FastCloneShallowArrayStub(Mode mode, int length)
: mode_(mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT(length_ >= 0);
ASSERT(length_ <= kMaximumClonedLength);
}
void Generate(MacroAssembler* masm);
private:
Mode mode_;
int length_;
const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
ASSERT(mode_ == 0 || mode_ == 1);
return (length_ << 1) | mode_;
}
};
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
int MinorKey() { return 0; }
};
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
bool strict,
NaNInformation nan_info = kBothCouldBeNaN,
bool include_number_compare = true,
Register lhs = no_reg,
Register rhs = no_reg) :
cc_(cc),
strict_(strict),
never_nan_nan_(nan_info == kCantBothBeNaN),
include_number_compare_(include_number_compare),
lhs_(lhs),
rhs_(rhs),
name_(NULL) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
// Only used for 'equal' comparisons. Tells the stub that we already know
// that at least one side of the comparison is not NaN. This allows the
// stub to use object identity in the positive case. We ignore it when
// generating the minor key for other comparisons to avoid creating more
// stubs.
bool never_nan_nan_;
// Do generate the number comparison code in the stub. Stubs without number
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
// Register holding the right hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register rhs_;
// Encoding of the minor key CCCCCCCCCCCCRCNS.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
class RegisterField: public BitField<bool, 3, 1> {};
class ConditionField: public BitField<int, 4, 12> {};
Major MajorKey() { return Compare; }
int MinorKey();
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s), "
"(never_nan_nan %s), (number_compare %s) ",
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_number_compare_ ? "included" : "not included");
if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
} else {
PrintF("\n");
}
}
#endif
};
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size) : result_size_(result_size) { }
void Generate(MacroAssembler* masm);
private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope,
int alignment_skew = 0);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
// Number of pointers/values returned.
const int result_size_;
Major MajorKey() { return CEntry; }
// Minor key must differ if different result_size_ values means different
// code is generated.
int MinorKey();
const char* GetName() { return "CEntryStub"; }
};
class ApiGetterEntryStub : public CodeStub {
public:
ApiGetterEntryStub(Handle<AccessorInfo> info,
ApiFunction* fun)
: info_(info),
fun_(fun) { }
void Generate(MacroAssembler* masm);
virtual bool has_custom_cache() { return true; }
virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value);
static const int kStackSpace = 5;
static const int kArgc = 4;
private:
Handle<AccessorInfo> info() { return info_; }
ApiFunction* fun() { return fun_; }
Major MajorKey() { return NoCache; }
int MinorKey() { return 0; }
const char* GetName() { return "ApiEntryStub"; }
// The accessor info associated with the function.
Handle<AccessorInfo> info_;
// The function to be called.
ApiFunction* fun_;
};
class JSEntryStub : public CodeStub {
public:
JSEntryStub() { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
protected:
void GenerateBody(MacroAssembler* masm, bool is_construct);
private:
Major MajorKey() { return JSEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "JSEntryStub"; }
};
class JSConstructEntryStub : public JSEntryStub {
public:
JSConstructEntryStub() { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
private:
int MinorKey() { return 1; }
const char* GetName() { return "JSConstructEntryStub"; }
};
class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_ELEMENT,
NEW_OBJECT
};
explicit ArgumentsAccessStub(Type type) : type_(type) { }
private:
Type type_;
Major MajorKey() { return ArgumentsAccess; }
int MinorKey() { return type_; }
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewObject(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
#ifdef DEBUG
void Print() {
PrintF("ArgumentsAccessStub (type %d)\n", type_);
}
#endif
};
class RegExpExecStub: public CodeStub {
public:
RegExpExecStub() { }
private:
Major MajorKey() { return RegExpExec; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpExecStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpExecStub\n");
}
#endif
};
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
: argc_(argc), in_loop_(in_loop), flags_(flags) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
CallFunctionFlags flags_;
#ifdef DEBUG
void Print() {
PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
argc_,
static_cast<int>(in_loop_),
static_cast<int>(flags_));
}
#endif
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
class ArgcBits: public BitField<int, 2, 32 - 2> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
// Encode the parameters in a unique 32 bit value.
return InLoopBits::encode(in_loop_)
| FlagBits::encode(flags_)
| ArgcBits::encode(argc_);
}
InLoopFlag InLoop() { return in_loop_; }
bool ReceiverMightBeValue() {
return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
}
public:
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
};
enum StringIndexFlags {
// Accepts smis or heap numbers.
STRING_INDEX_IS_NUMBER,
// Accepts smis or heap numbers that are valid array indices
// (ECMA-262 15.4). Invalid indices are reported as being out of
// range.
STRING_INDEX_IS_ARRAY_INDEX
};
// Generates code implementing String.prototype.charCodeAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch| and |result| are clobbered.
class StringCharCodeAtGenerator {
public:
StringCharCodeAtGenerator(Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: object_(object),
index_(index),
scratch_(scratch),
result_(result),
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
index_flags_(index_flags) {
ASSERT(!scratch_.is(object_));
ASSERT(!scratch_.is(index_));
ASSERT(!scratch_.is(result_));
ASSERT(!result_.is(object_));
ASSERT(!result_.is(index_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register object_;
Register index_;
Register scratch_;
Register result_;
Label* receiver_not_string_;
Label* index_not_number_;
Label* index_out_of_range_;
StringIndexFlags index_flags_;
Label call_runtime_;
Label index_not_smi_;
Label got_smi_index_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
// Generates code for creating a one-char string from a char code.
class StringCharFromCodeGenerator {
public:
StringCharFromCodeGenerator(Register code,
Register result)
: code_(code),
result_(result) {
ASSERT(!code_.is(result_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register code_;
Register result_;
Label slow_case_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
};
// Generates code implementing String.prototype.charAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
class StringCharAtGenerator {
public:
StringCharAtGenerator(Register object,
Register index,
Register scratch1,
Register scratch2,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: char_code_at_generator_(object,
index,
scratch1,
scratch2,
receiver_not_string,
index_not_number,
index_out_of_range,
index_flags),
char_from_code_generator_(scratch2, result) {}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
StringCharCodeAtGenerator char_code_at_generator_;
StringCharFromCodeGenerator char_from_code_generator_;
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CODE_STUBS_H_ #endif // V8_CODE_STUBS_H_

27
deps/v8/src/codegen.cc

@ -339,6 +339,11 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
} }
void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
UNREACHABLE();
}
// List of special runtime calls which are generated inline. For some of these // List of special runtime calls which are generated inline. For some of these
// functions the code will be generated inline, and for others a call to a code // functions the code will be generated inline, and for others a call to a code
// stub will be inlined. // stub will be inlined.
@ -380,21 +385,6 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
} }
bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
const CodeGenerator::InlineRuntimeLUT& new_entry,
CodeGenerator::InlineRuntimeLUT* old_entry) {
InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
if (entry == NULL) return false;
if (old_entry != NULL) {
old_entry->name = entry->name;
old_entry->method = entry->method;
}
entry->name = new_entry.name;
entry->method = new_entry.method;
return true;
}
int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) { int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
CodeGenerator::InlineRuntimeLUT* f = CodeGenerator::InlineRuntimeLUT* f =
CodeGenerator::FindInlineRuntimeLUT(name); CodeGenerator::FindInlineRuntimeLUT(name);
@ -496,12 +486,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
int CEntryStub::MinorKey() { int CEntryStub::MinorKey() {
ASSERT(result_size_ <= 2); ASSERT(result_size_ == 1 || result_size_ == 2);
#ifdef _WIN64 #ifdef _WIN64
return ExitFrameModeBits::encode(mode_) return result_size_ == 1 ? 0 : 1;
| IndirectResultBits::encode(result_size_ > 1);
#else #else
return ExitFrameModeBits::encode(mode_); return 0;
#endif #endif
} }

619
deps/v8/src/codegen.h

@ -64,7 +64,6 @@
// DeclareGlobals // DeclareGlobals
// FindInlineRuntimeLUT // FindInlineRuntimeLUT
// CheckForInlineRuntimeCall // CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry
// AnalyzeCondition // AnalyzeCondition
// CodeForFunctionPosition // CodeForFunctionPosition
// CodeForReturnPosition // CodeForReturnPosition
@ -73,13 +72,6 @@
// CodeForSourcePosition // CodeForSourcePosition
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };
// Types of uncatchable exceptions.
enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
#define INLINE_RUNTIME_FUNCTION_LIST(F) \ #define INLINE_RUNTIME_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \ F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \ F(IsNonNegativeSmi, 1, 1) \
@ -108,6 +100,7 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(StringCompare, 2, 1) \ F(StringCompare, 2, 1) \
F(RegExpExec, 4, 1) \ F(RegExpExec, 4, 1) \
F(RegExpConstructResult, 3, 1) \ F(RegExpConstructResult, 3, 1) \
F(RegExpCloneResult, 1, 1) \
F(GetFromCache, 2, 1) \ F(GetFromCache, 2, 1) \
F(NumberToString, 1, 1) \ F(NumberToString, 1, 1) \
F(SwapElements, 3, 1) \ F(SwapElements, 3, 1) \
@ -115,7 +108,9 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
F(MathSin, 1, 1) \ F(MathSin, 1, 1) \
F(MathCos, 1, 1) \ F(MathCos, 1, 1) \
F(MathSqrt, 1, 1) \ F(MathSqrt, 1, 1) \
F(IsRegExpEquivalent, 2, 1) F(IsRegExpEquivalent, 2, 1) \
F(HasCachedArrayIndex, 1, 1) \
F(GetCachedArrayIndex, 1, 1)
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -135,29 +130,6 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Support for "structured" code comments.
#ifdef DEBUG
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler* masm, const char* msg);
~Comment();
private:
MacroAssembler* masm_;
const char* msg_;
};
#else
class Comment BASE_EMBEDDED {
public:
Comment(MacroAssembler*, const char*) {}
};
#endif // DEBUG
// Code generation can be nested. Code generation scopes form a stack // Code generation can be nested. Code generation scopes form a stack
// of active code generators. // of active code generators.
class CodeGeneratorScope BASE_EMBEDDED { class CodeGeneratorScope BASE_EMBEDDED {
@ -181,6 +153,7 @@ class CodeGeneratorScope BASE_EMBEDDED {
CodeGenerator* previous_; CodeGenerator* previous_;
}; };
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame. // State of used registers in a virtual frame.
@ -229,23 +202,6 @@ class FrameRegisterState {
#endif #endif
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
virtual ~RuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const = 0;
virtual void AfterCall(MacroAssembler* masm) const = 0;
protected:
RuntimeCallHelper() {}
private:
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
};
// RuntimeCallHelper implementation that saves/restores state of a // RuntimeCallHelper implementation that saves/restores state of a
// virtual frame. // virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper { class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
@ -263,29 +219,6 @@ class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
}; };
// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
// newly created internal frame before/after the runtime call.
class ICRuntimeCallHelper : public RuntimeCallHelper {
public:
ICRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
};
// Trivial RuntimeCallHelper implementation.
class NopRuntimeCallHelper : public RuntimeCallHelper {
public:
NopRuntimeCallHelper() {}
virtual void BeforeCall(MacroAssembler* masm) const {}
virtual void AfterCall(MacroAssembler* masm) const {}
};
// Deferred code objects are small pieces of code that are compiled // Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon // out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts. // paths thereby avoiding expensive jumps around uncommon code parts.
@ -348,547 +281,7 @@ class DeferredCode: public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(DeferredCode); DISALLOW_COPY_AND_ASSIGN(DeferredCode);
}; };
class StackCheckStub : public CodeStub {
public:
StackCheckStub() { }
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; }
int MinorKey() { return 0; }
};
class FastNewClosureStub : public CodeStub {
public:
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return 0; }
};
class FastNewContextStub : public CodeStub {
public:
static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
int slots_;
const char* GetName() { return "FastNewContextStub"; }
Major MajorKey() { return FastNewContext; }
int MinorKey() { return slots_; }
};
class FastCloneShallowArrayStub : public CodeStub {
public:
static const int kMaximumLength = 8;
explicit FastCloneShallowArrayStub(int length) : length_(length) {
ASSERT(length >= 0 && length <= kMaximumLength);
}
void Generate(MacroAssembler* masm);
private:
int length_;
const char* GetName() { return "FastCloneShallowArrayStub"; }
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() { return length_; }
};
class InstanceofStub: public CodeStub {
public:
InstanceofStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
int MinorKey() { return 0; }
};
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
enum NaNInformation {
kBothCouldBeNaN,
kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc,
bool strict,
NaNInformation nan_info = kBothCouldBeNaN,
bool include_number_compare = true,
Register lhs = no_reg,
Register rhs = no_reg) :
cc_(cc),
strict_(strict),
never_nan_nan_(nan_info == kCantBothBeNaN),
include_number_compare_(include_number_compare),
lhs_(lhs),
rhs_(rhs),
name_(NULL) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
// Only used for 'equal' comparisons. Tells the stub that we already know
// that at least one side of the comparison is not NaN. This allows the
// stub to use object identity in the positive case. We ignore it when
// generating the minor key for other comparisons to avoid creating more
// stubs.
bool never_nan_nan_;
// Do generate the number comparison code in the stub. Stubs without number
// comparison code is used when the number comparison has been inlined, and
// the stub will be called if one of the operands is not a number.
bool include_number_compare_;
// Register holding the left hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register lhs_;
// Register holding the right hand side of the comparison if the stub gives
// a choice, no_reg otherwise.
Register rhs_;
// Encoding of the minor key CCCCCCCCCCCCRCNS.
class StrictField: public BitField<bool, 0, 1> {};
class NeverNanNanField: public BitField<bool, 1, 1> {};
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
class RegisterField: public BitField<bool, 3, 1> {};
class ConditionField: public BitField<int, 4, 12> {};
Major MajorKey() { return Compare; }
int MinorKey();
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s), "
"(never_nan_nan %s), (number_compare %s) ",
static_cast<int>(cc_),
strict_ ? "true" : "false",
never_nan_nan_ ? "true" : "false",
include_number_compare_ ? "included" : "not included");
if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code());
} else {
PrintF("\n");
}
}
#endif
};
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
ExitFrame::Mode mode = ExitFrame::MODE_NORMAL)
: result_size_(result_size), mode_(mode) { }
void Generate(MacroAssembler* masm);
private:
void GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope,
int alignment_skew = 0);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
// Number of pointers/values returned.
const int result_size_;
const ExitFrame::Mode mode_;
// Minor key encoding
class ExitFrameModeBits: public BitField<ExitFrame::Mode, 0, 1> {};
class IndirectResultBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return CEntry; }
// Minor key must differ if different result_size_ values means different
// code is generated.
int MinorKey();
const char* GetName() { return "CEntryStub"; }
};
class ApiGetterEntryStub : public CodeStub {
public:
ApiGetterEntryStub(Handle<AccessorInfo> info,
ApiFunction* fun)
: info_(info),
fun_(fun) { }
void Generate(MacroAssembler* masm);
virtual bool has_custom_cache() { return true; }
virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value);
static const int kStackSpace = 5;
static const int kArgc = 4;
private:
Handle<AccessorInfo> info() { return info_; }
ApiFunction* fun() { return fun_; }
Major MajorKey() { return NoCache; }
int MinorKey() { return 0; }
const char* GetName() { return "ApiEntryStub"; }
// The accessor info associated with the function.
Handle<AccessorInfo> info_;
// The function to be called.
ApiFunction* fun_;
};
class JSEntryStub : public CodeStub {
public:
JSEntryStub() { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
protected:
void GenerateBody(MacroAssembler* masm, bool is_construct);
private:
Major MajorKey() { return JSEntry; }
int MinorKey() { return 0; }
const char* GetName() { return "JSEntryStub"; }
};
class JSConstructEntryStub : public JSEntryStub {
public:
JSConstructEntryStub() { }
void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
private:
int MinorKey() { return 1; }
const char* GetName() { return "JSConstructEntryStub"; }
};
class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_ELEMENT,
NEW_OBJECT
};
explicit ArgumentsAccessStub(Type type) : type_(type) { }
private:
Type type_;
Major MajorKey() { return ArgumentsAccess; }
int MinorKey() { return type_; }
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewObject(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
#ifdef DEBUG
void Print() {
PrintF("ArgumentsAccessStub (type %d)\n", type_);
}
#endif
};
class RegExpExecStub: public CodeStub {
public:
RegExpExecStub() { }
private:
Major MajorKey() { return RegExpExec; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "RegExpExecStub"; }
#ifdef DEBUG
void Print() {
PrintF("RegExpExecStub\n");
}
#endif
};
class CallFunctionStub: public CodeStub {
public:
CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
: argc_(argc), in_loop_(in_loop), flags_(flags) { }
void Generate(MacroAssembler* masm);
private:
int argc_;
InLoopFlag in_loop_;
CallFunctionFlags flags_;
#ifdef DEBUG
void Print() {
PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n",
argc_,
static_cast<int>(in_loop_),
static_cast<int>(flags_));
}
#endif
// Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
class InLoopBits: public BitField<InLoopFlag, 0, 1> {};
class FlagBits: public BitField<CallFunctionFlags, 1, 1> {};
class ArgcBits: public BitField<int, 2, 32 - 2> {};
Major MajorKey() { return CallFunction; }
int MinorKey() {
// Encode the parameters in a unique 32 bit value.
return InLoopBits::encode(in_loop_)
| FlagBits::encode(flags_)
| ArgcBits::encode(argc_);
}
InLoopFlag InLoop() { return in_loop_; }
bool ReceiverMightBeValue() {
return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
}
public:
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
};
enum StringIndexFlags {
// Accepts smis or heap numbers.
STRING_INDEX_IS_NUMBER,
// Accepts smis or heap numbers that are valid array indices
// (ECMA-262 15.4). Invalid indices are reported as being out of
// range.
STRING_INDEX_IS_ARRAY_INDEX
};
// Generates code implementing String.prototype.charCodeAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch| and |result| are clobbered.
class StringCharCodeAtGenerator {
public:
StringCharCodeAtGenerator(Register object,
Register index,
Register scratch,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: object_(object),
index_(index),
scratch_(scratch),
result_(result),
receiver_not_string_(receiver_not_string),
index_not_number_(index_not_number),
index_out_of_range_(index_out_of_range),
index_flags_(index_flags) {
ASSERT(!scratch_.is(object_));
ASSERT(!scratch_.is(index_));
ASSERT(!scratch_.is(result_));
ASSERT(!result_.is(object_));
ASSERT(!result_.is(index_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register object_;
Register index_;
Register scratch_;
Register result_;
Label* receiver_not_string_;
Label* index_not_number_;
Label* index_out_of_range_;
StringIndexFlags index_flags_;
Label call_runtime_;
Label index_not_smi_;
Label got_smi_index_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
};
// Generates code for creating a one-char string from a char code.
class StringCharFromCodeGenerator {
public:
StringCharFromCodeGenerator(Register code,
Register result)
: code_(code),
result_(result) {
ASSERT(!code_.is(result_));
}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
Register code_;
Register result_;
Label slow_case_;
Label exit_;
DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
};
// Generates code implementing String.prototype.charAt.
//
// Only supports the case when the receiver is a string and the index
// is a number (smi or heap number) that is a valid index into the
// string. Additional index constraints are specified by the
// flags. Otherwise, bails out to the provided labels.
//
// Register usage: |object| may be changed to another string in a way
// that doesn't affect charCodeAt/charAt semantics, |index| is
// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
class StringCharAtGenerator {
public:
StringCharAtGenerator(Register object,
Register index,
Register scratch1,
Register scratch2,
Register result,
Label* receiver_not_string,
Label* index_not_number,
Label* index_out_of_range,
StringIndexFlags index_flags)
: char_code_at_generator_(object,
index,
scratch1,
scratch2,
receiver_not_string,
index_not_number,
index_out_of_range,
index_flags),
char_from_code_generator_(scratch2, result) {}
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
void GenerateFast(MacroAssembler* masm);
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
private:
StringCharCodeAtGenerator char_code_at_generator_;
StringCharFromCodeGenerator char_from_code_generator_;
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
};
} // namespace internal } } // namespace v8::internal
} // namespace v8
#endif // V8_CODEGEN_H_ #endif // V8_CODEGEN_H_

45
deps/v8/src/compilation-cache.cc

@ -79,10 +79,9 @@ class CompilationSubCache {
// young generation. // young generation.
void Age(); void Age();
bool HasFunction(SharedFunctionInfo* function_info);
// GC support. // GC support.
void Iterate(ObjectVisitor* v); void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content. // Clear this sub-cache evicting all its content.
void Clear(); void Clear();
@ -206,27 +205,6 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
} }
bool CompilationSubCache::HasFunction(SharedFunctionInfo* function_info) {
if (function_info->script()->IsUndefined() ||
Script::cast(function_info->script())->source()->IsUndefined()) {
return false;
}
String* source =
String::cast(Script::cast(function_info->script())->source());
// Check all generations.
for (int generation = 0; generation < generations(); generation++) {
if (tables_[generation]->IsUndefined()) continue;
CompilationCacheTable* table =
CompilationCacheTable::cast(tables_[generation]);
Object* object = table->Lookup(source);
if (object->IsSharedFunctionInfo()) return true;
}
return false;
}
void CompilationSubCache::Age() { void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest. // Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) { for (int i = generations_ - 1; i > 0; i--) {
@ -238,6 +216,16 @@ void CompilationSubCache::Age() {
} }
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = Heap::raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
}
}
}
void CompilationSubCache::Iterate(ObjectVisitor* v) { void CompilationSubCache::Iterate(ObjectVisitor* v) {
v->VisitPointers(&tables_[0], &tables_[generations_]); v->VisitPointers(&tables_[0], &tables_[generations_]);
} }
@ -528,15 +516,16 @@ void CompilationCache::Clear() {
} }
} }
void CompilationCache::Iterate(ObjectVisitor* v) {
bool CompilationCache::HasFunction(SharedFunctionInfo* function_info) { for (int i = 0; i < kSubCacheCount; i++) {
return script.HasFunction(function_info); subcaches[i]->Iterate(v);
}
} }
void CompilationCache::Iterate(ObjectVisitor* v) { void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) { for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Iterate(v); subcaches[i]->IterateFunctions(v);
} }
} }

4
deps/v8/src/compilation-cache.h

@ -79,11 +79,9 @@ class CompilationCache {
// Clear the cache - also used to initialize the cache at startup. // Clear the cache - also used to initialize the cache at startup.
static void Clear(); static void Clear();
static bool HasFunction(SharedFunctionInfo* function_info);
// GC support. // GC support.
static void Iterate(ObjectVisitor* v); static void Iterate(ObjectVisitor* v);
static void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to // Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to // take place. This is used to retire entries from the cache to

78
deps/v8/src/compiler.cc

@ -33,7 +33,6 @@
#include "compiler.h" #include "compiler.h"
#include "data-flow.h" #include "data-flow.h"
#include "debug.h" #include "debug.h"
#include "flow-graph.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "liveedit.h" #include "liveedit.h"
#include "oprofile-agent.h" #include "oprofile-agent.h"
@ -92,27 +91,6 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
return Handle<Code>::null(); return Handle<Code>::null();
} }
if (function->scope()->num_parameters() > 0 ||
function->scope()->num_stack_slots()) {
AssignedVariablesAnalyzer ava(function);
ava.Analyze();
if (ava.HasStackOverflow()) {
return Handle<Code>::null();
}
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
FlowGraph* graph = builder.Build(function);
USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
graph->PrintAsText(function->name());
}
#endif
}
// Generate code and return it. Code generator selection is governed by // Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered // which backends are enabled and whether the function is considered
// run-once code or not: // run-once code or not:
@ -126,17 +104,13 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
bool is_run_once = (shared.is_null()) bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope() ? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen()); : (shared->is_toplevel() || shared->try_full_codegen());
bool use_full = FLAG_full_compiler && !function->contains_loops();
if (AlwaysFullCompiler()) { if (AlwaysFullCompiler() || (use_full && is_run_once)) {
return FullCodeGenerator::MakeCode(info);
} else if (FLAG_full_compiler && is_run_once) {
FullCodeGenSyntaxChecker checker;
checker.Check(function);
if (checker.has_supported_syntax()) {
return FullCodeGenerator::MakeCode(info); return FullCodeGenerator::MakeCode(info);
} }
}
AssignedVariablesAnalyzer ava(function);
if (!ava.Analyze()) return Handle<Code>::null();
return CodeGenerator::MakeCode(info); return CodeGenerator::MakeCode(info);
} }
@ -442,6 +416,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// object last we avoid this. // object last we avoid this.
shared->set_scope_info(*SerializedScopeInfo::Create(info->scope())); shared->set_scope_info(*SerializedScopeInfo::Create(info->scope()));
shared->set_code(*code); shared->set_code(*code);
if (!info->closure().is_null()) {
info->closure()->set_code(*code);
}
// Set the expected number of properties for instances. // Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count()); SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
@ -454,6 +431,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Check the function has compiled code. // Check the function has compiled code.
ASSERT(shared->is_compiled()); ASSERT(shared->is_compiled());
shared->set_code_age(0);
return true; return true;
} }
@ -489,49 +467,19 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
if (literal->scope()->num_parameters() > 0 ||
literal->scope()->num_stack_slots()) {
AssignedVariablesAnalyzer ava(literal);
ava.Analyze();
if (ava.HasStackOverflow()) {
return Handle<SharedFunctionInfo>::null();
}
}
if (FLAG_use_flow_graph) {
FlowGraphBuilder builder;
FlowGraph* graph = builder.Build(literal);
USE(graph);
#ifdef DEBUG
if (FLAG_print_graph_text && !builder.HasStackOverflow()) {
graph->PrintAsText(literal->name());
}
#endif
}
// Generate code and return it. The way that the compilation mode // Generate code and return it. The way that the compilation mode
// is controlled by the command-line flags is described in // is controlled by the command-line flags is described in
// the static helper function MakeCode. // the static helper function MakeCode.
CompilationInfo info(literal, script, false); CompilationInfo info(literal, script, false);
bool is_run_once = literal->try_full_codegen(); bool is_run_once = literal->try_full_codegen();
bool is_compiled = false; bool use_full = FLAG_full_compiler && !literal->contains_loops();
if (AlwaysFullCompiler() || (use_full && is_run_once)) {
if (AlwaysFullCompiler()) {
code = FullCodeGenerator::MakeCode(&info); code = FullCodeGenerator::MakeCode(&info);
is_compiled = true; } else {
} else if (FLAG_full_compiler && is_run_once) {
FullCodeGenSyntaxChecker checker;
checker.Check(literal);
if (checker.has_supported_syntax()) {
code = FullCodeGenerator::MakeCode(&info);
is_compiled = true;
}
}
if (!is_compiled) {
// We fall back to the classic V8 code generator. // We fall back to the classic V8 code generator.
AssignedVariablesAnalyzer ava(literal);
if (!ava.Analyze()) return Handle<SharedFunctionInfo>::null();
code = CodeGenerator::MakeCode(&info); code = CodeGenerator::MakeCode(&info);
} }

8
deps/v8/src/contexts.h

@ -28,6 +28,9 @@
#ifndef V8_CONTEXTS_H_ #ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_ #define V8_CONTEXTS_H_
#include "heap.h"
#include "objects.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -86,6 +89,7 @@ enum ContextLookupFlags {
V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \ V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \ V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \ V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \ V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
@ -211,6 +215,7 @@ class Context: public FixedArray {
CONFIGURE_GLOBAL_INDEX, CONFIGURE_GLOBAL_INDEX,
FUNCTION_CACHE_INDEX, FUNCTION_CACHE_INDEX,
JSFUNCTION_RESULT_CACHES_INDEX, JSFUNCTION_RESULT_CACHES_INDEX,
NORMALIZED_MAP_CACHE_INDEX,
RUNTIME_CONTEXT_INDEX, RUNTIME_CONTEXT_INDEX,
CALL_AS_FUNCTION_DELEGATE_INDEX, CALL_AS_FUNCTION_DELEGATE_INDEX,
CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, CALL_AS_CONSTRUCTOR_DELEGATE_INDEX,
@ -243,7 +248,8 @@ class Context: public FixedArray {
GlobalObject* global() { GlobalObject* global() {
Object* result = get(GLOBAL_INDEX); Object* result = get(GLOBAL_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result)); ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result); return reinterpret_cast<GlobalObject*>(result);
} }
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); } void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }

9
deps/v8/src/conversions.cc

@ -733,11 +733,18 @@ double StringToInt(String* str, int radix) {
double StringToDouble(const char* str, int flags, double empty_string_val) { double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str); const char* end = str + StrLength(str);
return InternalStringToDouble(str, end, flags, empty_string_val); return InternalStringToDouble(str, end, flags, empty_string_val);
} }
double StringToDouble(Vector<const char> str,
int flags,
double empty_string_val) {
const char* end = str.start() + str.length();
return InternalStringToDouble(str.start(), end, flags, empty_string_val);
}
extern "C" char* dtoa(double d, int mode, int ndigits, extern "C" char* dtoa(double d, int mode, int ndigits,
int* decpt, int* sign, char** rve); int* decpt, int* sign, char** rve);

6
deps/v8/src/conversions.h

@ -96,8 +96,12 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1 // Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToDouble(String* str, int flags, double empty_string_val = 0); double StringToDouble(String* str, int flags, double empty_string_val = 0);
double StringToDouble(Vector<const char> str,
int flags,
double empty_string_val = 0);
// This version expects a zero-terminated character array.
double StringToDouble(const char* str, int flags, double empty_string_val = 0);
// Converts a string into an integer. // Converts a string into an integer.
double StringToInt(String* str, int radix); double StringToInt(String* str, int radix);

18
deps/v8/src/cpu-profiler.cc

@ -46,7 +46,7 @@ static const int kTickSamplesBufferChunksCount = 16;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator), : generator_(generator),
running_(false), running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord), ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize, kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount), kTickSamplesBufferChunksCount),
@ -235,8 +235,19 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
const TickSampleEventRecord* rec = const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
if (rec->order == dequeue_order) { // Make a local copy of tick sample record to ensure that it won't
generator_->RecordTickSample(rec->sample); // be modified as we are processing it. This is possible as the
// sampler writes w/o any sync to the queue, so if the processor
// will get far behind, a record may be modified right under its
// feet.
TickSampleEventRecord record = *rec;
if (record.order == dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
|| record.sample.frames_count >= TickSample::kMaxFramesCount)
record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue(); ticks_buffer_.FinishDequeue();
} else { } else {
return true; return true;
@ -247,7 +258,6 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
void ProfilerEventsProcessor::Run() { void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0; unsigned dequeue_order = 0;
running_ = true;
while (running_) { while (running_) {
// Process ticks until we have any. // Process ticks until we have any.

2
deps/v8/src/d8.cc

@ -486,7 +486,7 @@ void Shell::Initialize() {
// Start the debugger agent if requested. // Start the debugger agent if requested.
if (i::FLAG_debugger_agent) { if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port); v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
} }
// Start the in-process debugger if requested. // Start the in-process debugger if requested.

278
deps/v8/src/data-flow.cc

@ -50,258 +50,13 @@ void BitVector::Print() {
#endif #endif
void AstLabeler::Label(CompilationInfo* info) { bool AssignedVariablesAnalyzer::Analyze() {
info_ = info; Scope* scope = fun_->scope();
VisitStatements(info_->function()->body()); int variables = scope->num_parameters() + scope->num_stack_slots();
} if (variables == 0) return true;
av_.ExpandTo(variables);
void AstLabeler::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) {
Visit(stmts->at(i));
}
}
void AstLabeler::VisitDeclarations(ZoneList<Declaration*>* decls) {
UNREACHABLE();
}
void AstLabeler::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void AstLabeler::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) {
// Do nothing.
}
void AstLabeler::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitWithEnterStatement(
WithEnterStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitDebuggerStatement(
DebuggerStatement* stmt) {
UNREACHABLE();
}
void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void AstLabeler::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void AstLabeler::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void AstLabeler::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void AstLabeler::VisitVariableProxy(VariableProxy* expr) {
expr->set_num(next_number_++);
Variable* var = expr->var();
if (var->is_global() && !var->is_this()) {
info_->set_has_globals(true);
}
}
void AstLabeler::VisitLiteral(Literal* expr) {
UNREACHABLE();
}
void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void AstLabeler::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
UNREACHABLE();
}
void AstLabeler::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
ASSERT(prop->key()->IsPropertyName());
VariableProxy* proxy = prop->obj()->AsVariableProxy();
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
prop->obj()->set_num(AstNode::kNoNumber);
prop->key()->set_num(AstNode::kNoNumber);
Visit(expr->value());
expr->set_num(next_number_++);
}
void AstLabeler::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void AstLabeler::VisitProperty(Property* expr) {
ASSERT(expr->key()->IsPropertyName());
VariableProxy* proxy = expr->obj()->AsVariableProxy();
USE(proxy);
ASSERT(proxy != NULL && proxy->var()->is_this());
info()->set_has_this_properties(true);
expr->obj()->set_num(AstNode::kNoNumber);
expr->key()->set_num(AstNode::kNoNumber);
expr->set_num(next_number_++);
}
void AstLabeler::VisitCall(Call* expr) {
UNREACHABLE();
}
void AstLabeler::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void AstLabeler::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) {
UNREACHABLE();
}
void AstLabeler::VisitCountOperation(CountOperation* expr) {
UNREACHABLE();
}
void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) {
Visit(expr->left());
Visit(expr->right());
expr->set_num(next_number_++);
}
void AstLabeler::VisitCompareOperation(CompareOperation* expr) {
UNREACHABLE();
}
void AstLabeler::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
void AstLabeler::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun)
: fun_(fun),
av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {}
void AssignedVariablesAnalyzer::Analyze() {
ASSERT(av_.length() > 0);
VisitStatements(fun_->body()); VisitStatements(fun_->body());
return !HasStackOverflow();
} }
@ -394,7 +149,7 @@ void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
!var->is_arguments() && !var->is_arguments() &&
var->mode() != Variable::CONST && var->mode() != Variable::CONST &&
(var->is_this() || !av_.Contains(BitIndex(var)))) { (var->is_this() || !av_.Contains(BitIndex(var)))) {
expr->AsVariableProxy()->set_is_trivial(true); expr->AsVariableProxy()->MarkAsTrivial();
} }
} }
@ -489,9 +244,7 @@ void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) { void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (stmt->init() != NULL) Visit(stmt->init()); if (stmt->init() != NULL) Visit(stmt->init());
if (stmt->cond() != NULL) ProcessExpression(stmt->cond()); if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
if (stmt->next() != NULL) Visit(stmt->next()); if (stmt->next() != NULL) Visit(stmt->next());
// Process loop body. After visiting the loop body av_ contains // Process loop body. After visiting the loop body av_ contains
@ -504,7 +257,6 @@ void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
if (var != NULL && !av_.Contains(BitIndex(var))) { if (var != NULL && !av_.Contains(BitIndex(var))) {
stmt->set_loop_variable(var); stmt->set_loop_variable(var);
} }
av_.Union(saved_av); av_.Union(saved_av);
} }
@ -712,13 +464,20 @@ void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) { void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(av_.IsEmpty()); ASSERT(av_.IsEmpty());
MarkIfTrivial(expr->expression());
Visit(expr->expression()); Visit(expr->expression());
} }
void AssignedVariablesAnalyzer::VisitIncrementOperation(
IncrementOperation* expr) {
UNREACHABLE();
}
void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) { void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
ASSERT(av_.IsEmpty()); ASSERT(av_.IsEmpty());
if (expr->is_prefix()) MarkIfTrivial(expr->expression());
Visit(expr->expression()); Visit(expr->expression());
Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
@ -744,6 +503,13 @@ void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
} }
void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
ASSERT(av_.IsEmpty());
MarkIfTrivial(expr->expression());
Visit(expr->expression());
}
void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) { void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
// Nothing to do. // Nothing to do.
ASSERT(av_.IsEmpty()); ASSERT(av_.IsEmpty());

76
deps/v8/src/data-flow.h

@ -42,12 +42,10 @@ class Node;
class BitVector: public ZoneObject { class BitVector: public ZoneObject {
public: public:
explicit BitVector(int length) BitVector() : length_(0), data_length_(0), data_(NULL) { }
: length_(length),
data_length_(SizeFor(length)), explicit BitVector(int length) {
data_(Zone::NewArray<uint32_t>(data_length_)) { ExpandTo(length);
ASSERT(length > 0);
Clear();
} }
BitVector(const BitVector& other) BitVector(const BitVector& other)
@ -57,8 +55,12 @@ class BitVector: public ZoneObject {
CopyFrom(other); CopyFrom(other);
} }
static int SizeFor(int length) { void ExpandTo(int length) {
return 1 + ((length - 1) / 32); ASSERT(length > 0);
length_ = length;
data_length_ = SizeFor(length);
data_ = Zone::NewArray<uint32_t>(data_length_);
Clear();
} }
BitVector& operator=(const BitVector& rhs) { BitVector& operator=(const BitVector& rhs) {
@ -137,6 +139,10 @@ class BitVector: public ZoneObject {
#endif #endif
private: private:
static int SizeFor(int length) {
return 1 + ((length - 1) / 32);
}
int length_; int length_;
int data_length_; int data_length_;
uint32_t* data_; uint32_t* data_;
@ -187,63 +193,13 @@ class WorkList BASE_EMBEDDED {
}; };
struct ReachingDefinitionsData BASE_EMBEDDED {
public:
ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {}
void Initialize(int definition_count) {
rd_in_ = new BitVector(definition_count);
kill_ = new BitVector(definition_count);
gen_ = new BitVector(definition_count);
}
BitVector* rd_in() { return rd_in_; }
BitVector* kill() { return kill_; }
BitVector* gen() { return gen_; }
private:
BitVector* rd_in_;
BitVector* kill_;
BitVector* gen_;
};
// This class is used to number all expressions in the AST according to
// their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor {
public:
AstLabeler() : next_number_(0) {}
void Label(CompilationInfo* info);
private:
CompilationInfo* info() { return info_; }
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
// Traversal number for labelling AST nodes.
int next_number_;
CompilationInfo* info_;
DISALLOW_COPY_AND_ASSIGN(AstLabeler);
};
// Computes the set of assigned variables and annotates variables proxies // Computes the set of assigned variables and annotates variables proxies
// that are trivial sub-expressions and for-loops where the loop variable // that are trivial sub-expressions and for-loops where the loop variable
// is guaranteed to be a smi. // is guaranteed to be a smi.
class AssignedVariablesAnalyzer : public AstVisitor { class AssignedVariablesAnalyzer : public AstVisitor {
public: public:
explicit AssignedVariablesAnalyzer(FunctionLiteral* fun); explicit AssignedVariablesAnalyzer(FunctionLiteral* fun) : fun_(fun) { }
bool Analyze();
void Analyze();
private: private:
Variable* FindSmiLoopVariable(ForStatement* stmt); Variable* FindSmiLoopVariable(ForStatement* stmt);

16
deps/v8/src/date.js

@ -137,12 +137,18 @@ var DST_offset_cache = {
// Time interval where the cached offset is valid. // Time interval where the cached offset is valid.
start: 0, end: -1, start: 0, end: -1,
// Size of next interval expansion. // Size of next interval expansion.
increment: 0 increment: 0,
initial_increment: 19 * msPerDay
}; };
// NOTE: The implementation relies on the fact that no time zones have // NOTE: The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per month. // more than one daylight savings offset change per 19 days.
//
// In Egypt in 2010 they decided to suspend DST during Ramadan. This
// led to a short interval where DST is in effect from September 10 to
// September 30.
//
// If this function is called with NaN it returns NaN. // If this function is called with NaN it returns NaN.
function DaylightSavingsOffset(t) { function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object. // Load the cache object from the builtins object.
@ -171,7 +177,7 @@ function DaylightSavingsOffset(t) {
// the offset in the cache, we grow the cached time interval // the offset in the cache, we grow the cached time interval
// and return the offset. // and return the offset.
cache.end = new_end; cache.end = new_end;
cache.increment = msPerMonth; cache.increment = cache.initial_increment;
return end_offset; return end_offset;
} else { } else {
var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
@ -182,7 +188,7 @@ function DaylightSavingsOffset(t) {
// the interval to reflect this and reset the increment. // the interval to reflect this and reset the increment.
cache.start = t; cache.start = t;
cache.end = new_end; cache.end = new_end;
cache.increment = msPerMonth; cache.increment = cache.initial_increment;
} else { } else {
// The interval contains a DST offset change and the given time is // The interval contains a DST offset change and the given time is
// before it. Adjust the increment to avoid a linear search for // before it. Adjust the increment to avoid a linear search for
@ -207,7 +213,7 @@ function DaylightSavingsOffset(t) {
var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
cache.offset = offset; cache.offset = offset;
cache.start = cache.end = t; cache.start = cache.end = t;
cache.increment = msPerMonth; cache.increment = cache.initial_increment;
return offset; return offset;
} }

6
deps/v8/src/dateparser.h

@ -92,7 +92,7 @@ class DateParser : public AllStatic {
int ReadWord(uint32_t* prefix, int prefix_size) { int ReadWord(uint32_t* prefix, int prefix_size) {
int len; int len;
for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) { for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
if (len < prefix_size) prefix[len] = GetAsciiAlphaLower(); if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_);
} }
for (int i = len; i < prefix_size; i++) prefix[i] = 0; for (int i = len; i < prefix_size; i++) prefix[i] = 0;
return len; return len;
@ -130,10 +130,6 @@ class DateParser : public AllStatic {
bool HasReadNumber() const { return has_read_number_; } bool HasReadNumber() const { return has_read_number_; }
private: private:
// If current character is in 'A'-'Z' or 'a'-'z', return its lower-case.
// Else, return something outside of 'A'-'Z' and 'a'-'z'.
uint32_t GetAsciiAlphaLower() const { return ch_ | 32; }
int index_; int index_;
Vector<Char> buffer_; Vector<Char> buffer_;
bool has_read_number_; bool has_read_number_;

16
deps/v8/src/debug.cc

@ -461,6 +461,8 @@ void BreakLocationIterator::SetDebugBreakAtIC() {
KeyedStoreIC::ClearInlinedVersion(pc()); KeyedStoreIC::ClearInlinedVersion(pc());
} else if (code->is_load_stub()) { } else if (code->is_load_stub()) {
LoadIC::ClearInlinedVersion(pc()); LoadIC::ClearInlinedVersion(pc());
} else if (code->is_store_stub()) {
StoreIC::ClearInlinedVersion(pc());
} }
} }
} }
@ -549,6 +551,7 @@ void Debug::ThreadInit() {
thread_local_.after_break_target_ = 0; thread_local_.after_break_target_ = 0;
thread_local_.debugger_entry_ = NULL; thread_local_.debugger_entry_ = NULL;
thread_local_.pending_interrupts_ = 0; thread_local_.pending_interrupts_ = 0;
thread_local_.restarter_frame_function_pointer_ = NULL;
} }
@ -1004,17 +1007,18 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
for (int i = 0; i < array->length(); i++) { for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i)); Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) { if (CheckBreakPoint(o)) {
break_points_hit->SetElement(break_points_hit_count++, *o); SetElement(break_points_hit, break_points_hit_count++, o);
} }
} }
} else { } else {
if (CheckBreakPoint(break_point_objects)) { if (CheckBreakPoint(break_point_objects)) {
break_points_hit->SetElement(break_points_hit_count++, SetElement(break_points_hit,
*break_point_objects); break_points_hit_count++,
break_point_objects);
} }
} }
// Return undefined if no break points where triggered. // Return undefined if no break points were triggered.
if (break_points_hit_count == 0) { if (break_points_hit_count == 0) {
return Factory::undefined_value(); return Factory::undefined_value();
} }
@ -1440,7 +1444,7 @@ bool Debug::IsDebugBreak(Address addr) {
// Check whether a code stub with the specified major key is a possible break // Check whether a code stub with the specified major key is a possible break
// point location when looking for source break locations. // point location when looking for source break locations.
bool Debug::IsSourceBreakStub(Code* code) { bool Debug::IsSourceBreakStub(Code* code) {
CodeStub::Major major_key = code->major_key(); CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction; return major_key == CodeStub::CallFunction;
} }
@ -1448,7 +1452,7 @@ bool Debug::IsSourceBreakStub(Code* code) {
// Check whether a code stub with the specified major key is a possible break // Check whether a code stub with the specified major key is a possible break
// location. // location.
bool Debug::IsBreakStub(Code* code) { bool Debug::IsBreakStub(Code* code) {
CodeStub::Major major_key = code->major_key(); CodeStub::Major major_key = CodeStub::GetMajorKey(code);
return major_key == CodeStub::CallFunction || return major_key == CodeStub::CallFunction ||
major_key == CodeStub::StackCheck; major_key == CodeStub::StackCheck;
} }

16
deps/v8/src/debug.h

@ -29,7 +29,6 @@
#define V8_DEBUG_H_ #define V8_DEBUG_H_
#include "assembler.h" #include "assembler.h"
#include "code-stubs.h"
#include "debug-agent.h" #include "debug-agent.h"
#include "execution.h" #include "execution.h"
#include "factory.h" #include "factory.h"
@ -332,8 +331,7 @@ class Debug {
k_after_break_target_address, k_after_break_target_address,
k_debug_break_return_address, k_debug_break_return_address,
k_debug_break_slot_address, k_debug_break_slot_address,
k_restarter_frame_function_pointer, k_restarter_frame_function_pointer
k_register_address
}; };
// Support for setting the address to jump to when returning from break point. // Support for setting the address to jump to when returning from break point.
@ -953,10 +951,7 @@ class DisableBreak BASE_EMBEDDED {
// code. // code.
class Debug_Address { class Debug_Address {
public: public:
Debug_Address(Debug::AddressId id, int reg = 0) explicit Debug_Address(Debug::AddressId id) : id_(id) { }
: id_(id), reg_(reg) {
ASSERT(reg == 0 || id == Debug::k_register_address);
}
static Debug_Address AfterBreakTarget() { static Debug_Address AfterBreakTarget() {
return Debug_Address(Debug::k_after_break_target_address); return Debug_Address(Debug::k_after_break_target_address);
@ -970,10 +965,6 @@ class Debug_Address {
return Debug_Address(Debug::k_restarter_frame_function_pointer); return Debug_Address(Debug::k_restarter_frame_function_pointer);
} }
static Debug_Address Register(int reg) {
return Debug_Address(Debug::k_register_address, reg);
}
Address address() const { Address address() const {
switch (id_) { switch (id_) {
case Debug::k_after_break_target_address: case Debug::k_after_break_target_address:
@ -985,8 +976,6 @@ class Debug_Address {
case Debug::k_restarter_frame_function_pointer: case Debug::k_restarter_frame_function_pointer:
return reinterpret_cast<Address>( return reinterpret_cast<Address>(
Debug::restarter_frame_function_pointer_address()); Debug::restarter_frame_function_pointer_address());
case Debug::k_register_address:
return reinterpret_cast<Address>(Debug::register_address(reg_));
default: default:
UNREACHABLE(); UNREACHABLE();
return NULL; return NULL;
@ -994,7 +983,6 @@ class Debug_Address {
} }
private: private:
Debug::AddressId id_; Debug::AddressId id_;
int reg_;
}; };
// The optional thread that Debug Agent may use to temporary call V8 to process // The optional thread that Debug Agent may use to temporary call V8 to process

7
deps/v8/src/disassembler.cc

@ -258,11 +258,12 @@ static int DecodeIt(FILE* f,
// Get the STUB key and extract major and minor key. // Get the STUB key and extract major and minor key.
uint32_t key = Smi::cast(obj)->value(); uint32_t key = Smi::cast(obj)->value();
uint32_t minor_key = CodeStub::MinorKeyFromKey(key); uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key)); CodeStub::Major major_key = CodeStub::GetMajorKey(code);
ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ", out.AddFormatted(" %s, %s, ",
Code::Kind2String(kind), Code::Kind2String(kind),
CodeStub::MajorName(code->major_key(), false)); CodeStub::MajorName(major_key, false));
switch (code->major_key()) { switch (major_key) {
case CodeStub::CallFunction: case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key); out.AddFormatted("argc = %d", minor_key);
break; break;

2
deps/v8/src/execution.cc

@ -710,7 +710,7 @@ class SimpleStringResource : public Base {
: data_(data), : data_(data),
length_(length) {} length_(length) {}
virtual ~SimpleStringResource() { delete data_; } virtual ~SimpleStringResource() { delete[] data_; }
virtual const Char* data() const { return data_; } virtual const Char* data() const { return data_; }

2
deps/v8/src/flag-definitions.h

@ -283,7 +283,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
"debugger agent in another process") "debugger agent in another process")
DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_bool(debugger_agent, false, "Enable debugger agent")
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
DEFINE_string(map_counters, false, "Map counters to a file") DEFINE_string(map_counters, NULL, "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(), DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".") "Pass all remaining arguments to the script. Alias for \"--\".")

2
deps/v8/src/flags.h

@ -27,8 +27,6 @@
#ifndef V8_FLAGS_H_ #ifndef V8_FLAGS_H_
#define V8_FLAGS_H_ #define V8_FLAGS_H_
#include "checks.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

763
deps/v8/src/flow-graph.cc

@ -1,763 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "flow-graph.h"
#include "scopes.h"
namespace v8 {
namespace internal {
void BasicBlock::BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
ZoneList<BasicBlock*>* postorder,
bool mark) {
if (mark_ == mark) return;
mark_ = mark;
preorder->Add(this);
if (right_successor_ != NULL) {
right_successor_->BuildTraversalOrder(preorder, postorder, mark);
}
if (left_successor_ != NULL) {
left_successor_->BuildTraversalOrder(preorder, postorder, mark);
}
postorder->Add(this);
}
FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) {
// Create new entry and exit nodes. These will not change during
// construction.
entry_ = new BasicBlock(NULL);
exit_ = new BasicBlock(NULL);
// Begin accumulating instructions in the entry block.
current_ = entry_;
VisitDeclarations(lit->scope()->declarations());
VisitStatements(lit->body());
// In the event of stack overflow or failure to handle a syntactic
// construct, return an invalid flow graph.
if (HasStackOverflow()) return new FlowGraph(NULL, NULL);
// If current is not the exit, add a link to the exit.
if (current_ != exit_) {
// If current already has a successor (i.e., will be a branch node) and
// if the exit already has a predecessor, insert an empty block to
// maintain edge split form.
if (current_->HasSuccessor() && exit_->HasPredecessor()) {
current_ = new BasicBlock(current_);
}
Literal* undefined = new Literal(Factory::undefined_value());
current_->AddInstruction(new ReturnStatement(undefined));
exit_->AddPredecessor(current_);
}
FlowGraph* graph = new FlowGraph(entry_, exit_);
bool mark = !entry_->GetMark();
entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark);
#ifdef DEBUG
// Number the nodes in reverse postorder.
int n = 0;
for (int i = graph->postorder()->length() - 1; i >= 0; --i) {
graph->postorder()->at(i)->set_number(n++);
}
#endif
return graph;
}
void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
Variable* var = decl->proxy()->AsVariable();
Slot* slot = var->slot();
// We allow only declarations that do not require code generation.
// The following all require code generation: global variables and
// functions, variables with slot type LOOKUP, declarations with
// mode CONST, and functions.
if (var->is_global() ||
(slot != NULL && slot->type() == Slot::LOOKUP) ||
decl->mode() == Variable::CONST ||
decl->fun() != NULL) {
// Here and in the rest of the flow graph builder we indicate an
// unsupported syntactic construct by setting the stack overflow
// flag on the visitor. This causes bailout of the visitor.
SetStackOverflow();
}
}
void FlowGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
// Nothing to do.
}
void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
// Build a diamond in the flow graph. First accumulate the instructions
// of the test in the current basic block.
Visit(stmt->condition());
// Remember the branch node and accumulate the true branch as its left
// successor. This relies on the successors being added left to right.
BasicBlock* branch = current_;
current_ = new BasicBlock(branch);
Visit(stmt->then_statement());
// Construct a join node and then accumulate the false branch in a fresh
// successor of the branch node.
BasicBlock* join = new BasicBlock(current_);
current_ = new BasicBlock(branch);
Visit(stmt->else_statement());
join->AddPredecessor(current_);
current_ = join;
}
void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
// Build a loop in the flow graph. First accumulate the instructions of
// the initializer in the current basic block.
if (stmt->init() != NULL) Visit(stmt->init());
// Create a new basic block for the test. This will be the join node.
BasicBlock* join = new BasicBlock(current_);
current_ = join;
if (stmt->cond() != NULL) Visit(stmt->cond());
// The current node is the branch node. Create a new basic block to begin
// the body.
BasicBlock* branch = current_;
current_ = new BasicBlock(branch);
Visit(stmt->body());
if (stmt->next() != NULL) Visit(stmt->next());
// Add the backward edge from the end of the body and continue with the
// false arm of the branch.
join->AddPredecessor(current_);
current_ = new BasicBlock(branch);
}
void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitConditional(Conditional* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitSlot(Slot* expr) {
// Slots do not appear in the AST.
UNREACHABLE();
}
void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitLiteral(Literal* expr) {
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
// There are three basic kinds of assignment: variable assignments,
// property assignments, and invalid left-hand sides (which are translated
// to "throw ReferenceError" by the parser).
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (expr->is_compound() && !expr->target()->IsTrivial()) {
Visit(expr->target());
}
if (!expr->value()->IsTrivial()) Visit(expr->value());
current_->AddInstruction(expr);
} else if (prop != NULL) {
if (!prop->obj()->IsTrivial()) Visit(prop->obj());
if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) {
Visit(prop->key());
}
if (!expr->value()->IsTrivial()) Visit(expr->value());
current_->AddInstruction(expr);
} else {
Visit(expr->target());
}
}
void FlowGraphBuilder::VisitThrow(Throw* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitProperty(Property* expr) {
if (!expr->obj()->IsTrivial()) Visit(expr->obj());
if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) {
Visit(expr->key());
}
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitCall(Call* expr) {
Visit(expr->expression());
VisitExpressions(expr->arguments());
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
SetStackOverflow();
}
void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::BIT_NOT:
case Token::DELETE:
case Token::TYPEOF:
case Token::VOID:
SetStackOverflow();
break;
case Token::ADD:
case Token::SUB:
Visit(expr->expression());
current_->AddInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
Visit(expr->expression());
current_->AddInstruction(expr);
}
void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
switch (expr->op()) {
case Token::COMMA:
case Token::OR:
case Token::AND:
SetStackOverflow();
break;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
if (!expr->left()->IsTrivial()) Visit(expr->left());
if (!expr->right()->IsTrivial()) Visit(expr->right());
current_->AddInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
switch (expr->op()) {
case Token::EQ:
case Token::NE:
case Token::EQ_STRICT:
case Token::NE_STRICT:
case Token::INSTANCEOF:
case Token::IN:
SetStackOverflow();
break;
case Token::LT:
case Token::GT:
case Token::LTE:
case Token::GTE:
if (!expr->left()->IsTrivial()) Visit(expr->left());
if (!expr->right()->IsTrivial()) Visit(expr->right());
current_->AddInstruction(expr);
break;
default:
UNREACHABLE();
}
}
void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
SetStackOverflow();
}
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph.
class InstructionPrinter: public AstVisitor {
public:
InstructionPrinter() {}
private:
// Overridden from the base class.
virtual void VisitExpressions(ZoneList<Expression*>* exprs);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(InstructionPrinter);
};
static void PrintSubexpression(Expression* expr) {
if (!expr->IsTrivial()) {
PrintF("@%d", expr->num());
} else if (expr->AsLiteral() != NULL) {
expr->AsLiteral()->handle()->Print();
} else if (expr->AsVariableProxy() != NULL) {
PrintF("%s", *expr->AsVariableProxy()->name()->ToCString());
} else {
UNREACHABLE();
}
}
void InstructionPrinter::VisitExpressions(ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
if (i != 0) PrintF(", ");
PrintF("@%d", exprs->at(i)->num());
}
}
// We only define printing functions for the node types that can occur as
// instructions in a flow graph. The rest are unreachable.
void InstructionPrinter::VisitDeclaration(Declaration* decl) {
UNREACHABLE();
}
void InstructionPrinter::VisitBlock(Block* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitIfStatement(IfStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) {
PrintF("return ");
PrintSubexpression(stmt->expression());
}
void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitForStatement(ForStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNREACHABLE();
}
void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitConditional(Conditional* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) {
Variable* var = expr->AsVariable();
if (var != NULL) {
PrintF("%s", *var->name()->ToCString());
} else {
ASSERT(expr->AsProperty() != NULL);
Visit(expr->AsProperty());
}
}
void InstructionPrinter::VisitLiteral(Literal* expr) {
expr->handle()->Print();
}
void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitAssignment(Assignment* expr) {
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
// Print the left-hand side.
Visit(expr->target());
if (var == NULL && prop == NULL) return; // Throw reference error.
PrintF(" = ");
// For compound assignments, print the left-hand side again and the
// corresponding binary operator.
if (expr->is_compound()) {
PrintSubexpression(expr->target());
PrintF(" %s ", Token::String(expr->binary_op()));
}
// Print the right-hand side.
PrintSubexpression(expr->value());
}
void InstructionPrinter::VisitThrow(Throw* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitProperty(Property* expr) {
PrintSubexpression(expr->obj());
if (expr->key()->IsPropertyName()) {
PrintF(".");
ASSERT(expr->key()->AsLiteral() != NULL);
expr->key()->AsLiteral()->handle()->Print();
} else {
PrintF("[");
PrintSubexpression(expr->key());
PrintF("]");
}
}
void InstructionPrinter::VisitCall(Call* expr) {
PrintF("@%d(", expr->expression()->num());
VisitExpressions(expr->arguments());
PrintF(")");
}
void InstructionPrinter::VisitCallNew(CallNew* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) {
UNREACHABLE();
}
void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) {
PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num());
}
void InstructionPrinter::VisitCountOperation(CountOperation* expr) {
if (expr->is_prefix()) {
PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num());
} else {
PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op()));
}
}
void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) {
PrintSubexpression(expr->left());
PrintF(" %s ", Token::String(expr->op()));
PrintSubexpression(expr->right());
}
void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) {
PrintSubexpression(expr->left());
PrintF(" %s ", Token::String(expr->op()));
PrintSubexpression(expr->right());
}
void InstructionPrinter::VisitThisFunction(ThisFunction* expr) {
UNREACHABLE();
}
int BasicBlock::PrintAsText(int instruction_number) {
// Print a label for all blocks except the entry.
if (HasPredecessor()) {
PrintF("L%d:", number());
}
// Number and print the instructions. Since AST child nodes are visited
// before their parents, the parent nodes can refer to them by number.
InstructionPrinter printer;
for (int i = 0; i < instructions_.length(); ++i) {
PrintF("\n%d ", instruction_number);
instructions_[i]->set_num(instruction_number++);
instructions_[i]->Accept(&printer);
}
// If this is the exit, print "exit". If there is a single successor,
// print "goto" successor on a separate line. If there are two
// successors, print "goto" successor on the same line as the last
// instruction in the block. There is a blank line between blocks (and
// after the last one).
if (left_successor_ == NULL) {
PrintF("\nexit\n\n");
} else if (right_successor_ == NULL) {
PrintF("\ngoto L%d\n\n", left_successor_->number());
} else {
PrintF(", goto (L%d, L%d)\n\n",
left_successor_->number(),
right_successor_->number());
}
return instruction_number;
}
void FlowGraph::PrintAsText(Handle<String> name) {
PrintF("\n==== name = \"%s\" ====\n", *name->ToCString());
// Print nodes in reverse postorder. Note that AST node numbers are used
// during printing of instructions and thus their current values are
// destroyed.
int number = 0;
for (int i = postorder_.length() - 1; i >= 0; --i) {
number = postorder_[i]->PrintAsText(number);
}
}
#endif // DEBUG
} } // namespace v8::internal

180
deps/v8/src/flow-graph.h

@ -1,180 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FLOW_GRAPH_H_
#define V8_FLOW_GRAPH_H_
#include "v8.h"
#include "data-flow.h"
#include "zone.h"
namespace v8 {
namespace internal {
// The nodes of a flow graph are basic blocks. Basic blocks consist of
// instructions represented as pointers to AST nodes in the order that they
// would be visited by the code generator. A block can have arbitrarily many
// (even zero) predecessors and up to two successors. Blocks with multiple
// predecessors are "join nodes" and blocks with multiple successors are
// "branch nodes". A block can be both a branch and a join node.
//
// Flow graphs are in edge split form: a branch node is never the
// predecessor of a merge node. Empty basic blocks are inserted to maintain
// edge split form.
class BasicBlock: public ZoneObject {
public:
// Construct a basic block with a given predecessor. NULL indicates no
// predecessor or that the predecessor will be set later.
explicit BasicBlock(BasicBlock* predecessor)
: predecessors_(2),
instructions_(8),
left_successor_(NULL),
right_successor_(NULL),
mark_(false) {
if (predecessor != NULL) AddPredecessor(predecessor);
}
bool HasPredecessor() { return !predecessors_.is_empty(); }
bool HasSuccessor() { return left_successor_ != NULL; }
// Add a given basic block as a predecessor of this block. This function
// also adds this block as a successor of the given block.
void AddPredecessor(BasicBlock* predecessor) {
ASSERT(predecessor != NULL);
predecessors_.Add(predecessor);
predecessor->AddSuccessor(this);
}
// Add an instruction to the end of this block. The block must be "open"
// by not having a successor yet.
void AddInstruction(AstNode* instruction) {
ASSERT(!HasSuccessor() && instruction != NULL);
instructions_.Add(instruction);
}
// Perform a depth-first traversal of graph rooted at this node,
// accumulating pre- and postorder traversal orders. Visited nodes are
// marked with mark.
void BuildTraversalOrder(ZoneList<BasicBlock*>* preorder,
ZoneList<BasicBlock*>* postorder,
bool mark);
bool GetMark() { return mark_; }
#ifdef DEBUG
// In debug mode, blocks are numbered in reverse postorder to help with
// printing.
int number() { return number_; }
void set_number(int n) { number_ = n; }
// Print a basic block, given the number of the first instruction.
// Returns the next number after the number of the last instruction.
int PrintAsText(int instruction_number);
#endif
private:
// Add a given basic block as successor to this block. This function does
// not add this block as a predecessor of the given block so as to avoid
// circularity.
void AddSuccessor(BasicBlock* successor) {
ASSERT(right_successor_ == NULL && successor != NULL);
if (HasSuccessor()) {
right_successor_ = successor;
} else {
left_successor_ = successor;
}
}
ZoneList<BasicBlock*> predecessors_;
ZoneList<AstNode*> instructions_;
BasicBlock* left_successor_;
BasicBlock* right_successor_;
// Support for graph traversal. Before traversal, all nodes in the graph
// have the same mark (true or false). Traversal marks already-visited
// nodes with the opposite mark. After traversal, all nodes again have
// the same mark. Traversal of the same graph is not reentrant.
bool mark_;
#ifdef DEBUG
int number_;
#endif
DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
// A flow graph has distinguished entry and exit blocks. The entry block is
// the only one with no predecessors and the exit block is the only one with
// no successors.
class FlowGraph: public ZoneObject {
public:
FlowGraph(BasicBlock* entry, BasicBlock* exit)
: entry_(entry), exit_(exit), preorder_(8), postorder_(8) {
}
ZoneList<BasicBlock*>* preorder() { return &preorder_; }
ZoneList<BasicBlock*>* postorder() { return &postorder_; }
#ifdef DEBUG
void PrintAsText(Handle<String> name);
#endif
private:
BasicBlock* entry_;
BasicBlock* exit_;
ZoneList<BasicBlock*> preorder_;
ZoneList<BasicBlock*> postorder_;
};
// The flow graph builder walks the AST adding reachable AST nodes to the
// flow graph as instructions. It remembers the entry and exit nodes of the
// graph, and keeps a pointer to the current block being constructed.
class FlowGraphBuilder: public AstVisitor {
public:
FlowGraphBuilder() {}
FlowGraph* Build(FunctionLiteral* lit);
private:
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
BasicBlock* entry_;
BasicBlock* exit_;
BasicBlock* current_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
};
} } // namespace v8::internal
#endif // V8_FLOW_GRAPH_H_

15
deps/v8/src/frames-inl.h

@ -64,9 +64,8 @@ inline bool StackHandler::includes(Address address) const {
} }
inline void StackHandler::Iterate(ObjectVisitor* v) const { inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
// Stack handlers do not contain any pointers that need to be StackFrame::IteratePc(v, pc_address(), holder);
// traversed.
} }
@ -81,15 +80,9 @@ inline StackHandler::State StackHandler::state() const {
} }
inline Address StackHandler::pc() const { inline Address* StackHandler::pc_address() const {
const int offset = StackHandlerConstants::kPCOffset; const int offset = StackHandlerConstants::kPCOffset;
return Memory::Address_at(address() + offset); return reinterpret_cast<Address*>(address() + offset);
}
inline void StackHandler::set_pc(Address value) {
const int offset = StackHandlerConstants::kPCOffset;
Memory::Address_at(address() + offset) = value;
} }

215
deps/v8/src/frames.cc

@ -36,6 +36,11 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
PcToCodeCache::PcToCodeCacheEntry
PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
int SafeStackFrameIterator::active_count_ = 0;
// Iterator that supports traversing the stack handlers of a // Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain. // particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED { class StackHandlerIterator BASE_EMBEDDED {
@ -88,7 +93,6 @@ StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp)
if (use_top || fp != NULL) { if (use_top || fp != NULL) {
Reset(); Reset();
} }
JavaScriptFrame_.DisableHeapAccess();
} }
#undef INITIALIZE_SINGLETON #undef INITIALIZE_SINGLETON
@ -201,7 +205,7 @@ bool StackTraceFrameIterator::IsValidFrame() {
SafeStackFrameIterator::SafeStackFrameIterator( SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) : Address fp, Address sp, Address low_bound, Address high_bound) :
low_bound_(low_bound), high_bound_(high_bound), maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
is_valid_top_( is_valid_top_(
IsWithinBounds(low_bound, high_bound, IsWithinBounds(low_bound, high_bound,
Top::c_entry_fp(Top::GetCurrentThread())) && Top::c_entry_fp(Top::GetCurrentThread())) &&
@ -302,69 +306,42 @@ void SafeStackTraceFrameIterator::Advance() {
#endif #endif
// -------------------------------------------------------------------------
void StackHandler::Cook(Code* code) {
ASSERT(code->contains(pc()));
set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
}
void StackHandler::Uncook(Code* code) {
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
// -------------------------------------------------------------------------
bool StackFrame::HasHandler() const { bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler()); StackHandlerIterator it(this, top_handler());
return !it.done(); return !it.done();
} }
void StackFrame::IteratePc(ObjectVisitor* v,
void StackFrame::CookFramesForThread(ThreadLocalTop* thread) { Address* pc_address,
ASSERT(!thread->stack_is_cooked()); Code* holder) {
for (StackFrameIterator it(thread); !it.done(); it.Advance()) { Address pc = *pc_address;
it.frame()->Cook(); ASSERT(holder->contains(pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
v->VisitPointer(&code);
if (code != holder) {
holder = reinterpret_cast<Code*>(code);
pc = holder->instruction_start() + pc_offset;
*pc_address = pc;
} }
thread->set_stack_is_cooked(true);
} }
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) { StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(thread->stack_is_cooked()); ASSERT(state->fp != NULL);
for (StackFrameIterator it(thread); !it.done(); it.Advance()) { if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
it.frame()->Uncook(); return ARGUMENTS_ADAPTOR;
} }
thread->set_stack_is_cooked(false); // The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
} }
void StackFrame::Cook() {
Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Cook(code);
}
ASSERT(code->contains(pc()));
set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
}
void StackFrame::Uncook() {
Code* code = this->code();
ASSERT(code->IsCode());
for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
it.handler()->Uncook(code);
}
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
StackFrame::Type StackFrame::GetCallerState(State* state) const { StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state); ComputeCallerState(state);
@ -372,8 +349,8 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const {
} }
Code* EntryFrame::code() const { Code* EntryFrame::unchecked_code() const {
return Heap::js_entry_code(); return Heap::raw_unchecked_js_entry_code();
} }
@ -395,8 +372,8 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
} }
Code* EntryConstructFrame::code() const { Code* EntryConstructFrame::unchecked_code() const {
return Heap::js_construct_entry_code(); return Heap::raw_unchecked_js_construct_entry_code();
} }
@ -406,8 +383,8 @@ Object*& ExitFrame::code_slot() const {
} }
Code* ExitFrame::code() const { Code* ExitFrame::unchecked_code() const {
return Code::cast(code_slot()); return reinterpret_cast<Code*>(code_slot());
} }
@ -425,6 +402,14 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
} }
void ExitFrame::Iterate(ObjectVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), code());
v->VisitPointer(&code_slot());
}
Address ExitFrame::GetCallerStackPointer() const { Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement; return fp() + ExitFrameConstants::kCallerSPDisplacement;
} }
@ -493,22 +478,65 @@ bool JavaScriptFrame::IsConstructor() const {
} }
Code* JavaScriptFrame::code() const { Code* JavaScriptFrame::unchecked_code() const {
JSFunction* function = JSFunction::cast(this->function());
return function->unchecked_code();
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC ||
SafeStackFrameIterator::is_active()) {
// If the we are currently iterating the safe stack the
// arguments for frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when objects may have been marked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function()); JSFunction* function = JSFunction::cast(this->function());
return function->shared()->code(); arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
} }
Code* ArgumentsAdaptorFrame::code() const { Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline); return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
} }
Code* InternalFrame::code() const { Code* InternalFrame::unchecked_code() const {
const int offset = InternalFrameConstants::kCodeOffset; const int offset = InternalFrameConstants::kCodeOffset;
Object* code = Memory::Object_at(fp() + offset); Object* code = Memory::Object_at(fp() + offset);
ASSERT(code != NULL); ASSERT(code != NULL);
return Code::cast(code); return reinterpret_cast<Code*>(code);
} }
@ -694,13 +722,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
ASSERT(!it.done()); ASSERT(!it.done());
StackHandler* handler = it.handler(); StackHandler* handler = it.handler();
ASSERT(handler->is_entry()); ASSERT(handler->is_entry());
handler->Iterate(v); handler->Iterate(v, code());
// Make sure that there's the entry frame does not contain more than
// one stack handler.
#ifdef DEBUG #ifdef DEBUG
// Make sure that the entry frame does not contain more than one
// stack handler.
it.Advance(); it.Advance();
ASSERT(it.done()); ASSERT(it.done());
#endif #endif
IteratePc(v, pc_address(), code());
} }
@ -717,7 +746,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
v->VisitPointers(base, reinterpret_cast<Object**>(address)); v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize); base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself. // Traverse the pointers in the handler itself.
handler->Iterate(v); handler->Iterate(v, code());
} }
v->VisitPointers(base, limit); v->VisitPointers(base, limit);
} }
@ -725,6 +754,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
void JavaScriptFrame::Iterate(ObjectVisitor* v) const { void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v); IterateExpressions(v);
IteratePc(v, pc_address(), code());
// Traverse callee-saved registers, receiver, and parameters. // Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset; const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
@ -739,6 +769,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack // Internal frames only have object pointers on the expression stack
// as they never have any arguments. // as they never have any arguments.
IterateExpressions(v); IterateExpressions(v);
IteratePc(v, pc_address(), code());
} }
@ -760,6 +791,56 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
Code* code = reinterpret_cast<Code*>(object);
ASSERT(code != NULL && code->contains(pc));
return code;
}
Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
// Check if the pc points into a large object chunk.
LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
// Iterate through the 8K page until we reach the end or find an
// object starting after the pc.
Page* page = Page::FromAddress(pc);
HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
HeapObject* previous = NULL;
while (true) {
HeapObject* next = iterator.next();
if (next == NULL || next->address() >= pc) {
return GcSafeCastToCode(previous, pc);
}
previous = next;
}
}
PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
Counters::pc_to_code.Increment();
ASSERT(IsPowerOf2(kPcToCodeCacheSize));
uint32_t hash = ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
uint32_t index = hash & (kPcToCodeCacheSize - 1);
PcToCodeCacheEntry* entry = cache(index);
if (entry->pc == pc) {
Counters::pc_to_code_cached.Increment();
ASSERT(entry->code == GcSafeFindCodeForPc(pc));
} else {
// Because this code may be interrupted by a profiling signal that
// also queries the cache, we cannot update pc before the code has
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
entry->pc = pc;
}
return entry;
}
// -------------------------------------------------------------------------
int NumRegs(RegList reglist) { int NumRegs(RegList reglist) {
int n = 0; int n = 0;
while (reglist != 0) { while (reglist != 0) {

102
deps/v8/src/frames.h

@ -46,6 +46,32 @@ class Top;
class ThreadLocalTop; class ThreadLocalTop;
class PcToCodeCache : AllStatic {
public:
struct PcToCodeCacheEntry {
Address pc;
Code* code;
};
static PcToCodeCacheEntry* cache(int index) {
return &cache_[index];
}
static Code* GcSafeFindCodeForPc(Address pc);
static Code* GcSafeCastToCode(HeapObject* object, Address pc);
static void FlushPcToCodeCache() {
memset(&cache_[0], 0, sizeof(cache_));
}
static PcToCodeCacheEntry* GetCacheEntry(Address pc);
private:
static const int kPcToCodeCacheSize = 256;
static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
};
class StackHandler BASE_EMBEDDED { class StackHandler BASE_EMBEDDED {
public: public:
enum State { enum State {
@ -64,7 +90,7 @@ class StackHandler BASE_EMBEDDED {
inline bool includes(Address address) const; inline bool includes(Address address) const;
// Garbage collection support. // Garbage collection support.
inline void Iterate(ObjectVisitor* v) const; inline void Iterate(ObjectVisitor* v, Code* holder) const;
// Conversion support. // Conversion support.
static inline StackHandler* FromAddress(Address address); static inline StackHandler* FromAddress(Address address);
@ -74,16 +100,11 @@ class StackHandler BASE_EMBEDDED {
bool is_try_catch() { return state() == TRY_CATCH; } bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; } bool is_try_finally() { return state() == TRY_FINALLY; }
// Garbage collection support.
void Cook(Code* code);
void Uncook(Code* code);
private: private:
// Accessors. // Accessors.
inline State state() const; inline State state() const;
inline Address pc() const; inline Address* pc_address() const;
inline void set_pc(Address value);
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler); DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
}; };
@ -112,7 +133,13 @@ class StackFrame BASE_EMBEDDED {
// Opaque data type for identifying stack frames. Used extensively // Opaque data type for identifying stack frames. Used extensively
// by the debugger. // by the debugger.
enum Id { NO_ID = 0 }; // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
// has correct value range (see Issue 830 for more details).
enum Id {
ID_MIN_VALUE = kMinInt,
ID_MAX_VALUE = kMaxInt,
NO_ID = 0
};
// Copy constructor; it breaks the connection to host iterator. // Copy constructor; it breaks the connection to host iterator.
StackFrame(const StackFrame& original) { StackFrame(const StackFrame& original) {
@ -152,13 +179,20 @@ class StackFrame BASE_EMBEDDED {
virtual Type type() const = 0; virtual Type type() const = 0;
// Get the code associated with this frame. // Get the code associated with this frame.
virtual Code* code() const = 0; // This method could be called during marking phase of GC.
virtual Code* unchecked_code() const = 0;
// Garbage collection support. // Get the code associated with this frame.
static void CookFramesForThread(ThreadLocalTop* thread); Code* code() const { return GetContainingCode(pc()); }
static void UncookFramesForThread(ThreadLocalTop* thread);
// Get the code object that contains the given pc.
Code* GetContainingCode(Address pc) const {
return PcToCodeCache::GetCacheEntry(pc)->code;
}
virtual void Iterate(ObjectVisitor* v) const = 0;
static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
virtual void Iterate(ObjectVisitor* v) const { }
// Printing support. // Printing support.
enum PrintMode { OVERVIEW, DETAILS }; enum PrintMode { OVERVIEW, DETAILS };
@ -200,10 +234,6 @@ class StackFrame BASE_EMBEDDED {
// Get the type and the state of the calling frame. // Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const; virtual Type GetCallerState(State* state) const;
// Cooking/uncooking support.
void Cook();
void Uncook();
friend class StackFrameIterator; friend class StackFrameIterator;
friend class StackHandlerIterator; friend class StackHandlerIterator;
friend class SafeStackFrameIterator; friend class SafeStackFrameIterator;
@ -218,7 +248,7 @@ class EntryFrame: public StackFrame {
public: public:
virtual Type type() const { return ENTRY; } virtual Type type() const { return ENTRY; }
virtual Code* code() const; virtual Code* unchecked_code() const;
// Garbage collection support. // Garbage collection support.
virtual void Iterate(ObjectVisitor* v) const; virtual void Iterate(ObjectVisitor* v) const;
@ -249,7 +279,7 @@ class EntryConstructFrame: public EntryFrame {
public: public:
virtual Type type() const { return ENTRY_CONSTRUCT; } virtual Type type() const { return ENTRY_CONSTRUCT; }
virtual Code* code() const; virtual Code* unchecked_code() const;
static EntryConstructFrame* cast(StackFrame* frame) { static EntryConstructFrame* cast(StackFrame* frame) {
ASSERT(frame->is_entry_construct()); ASSERT(frame->is_entry_construct());
@ -268,10 +298,9 @@ class EntryConstructFrame: public EntryFrame {
// Exit frames are used to exit JavaScript execution and go to C. // Exit frames are used to exit JavaScript execution and go to C.
class ExitFrame: public StackFrame { class ExitFrame: public StackFrame {
public: public:
enum Mode { MODE_NORMAL, MODE_DEBUG };
virtual Type type() const { return EXIT; } virtual Type type() const { return EXIT; }
virtual Code* code() const; virtual Code* unchecked_code() const;
Object*& code_slot() const; Object*& code_slot() const;
@ -397,7 +426,7 @@ class JavaScriptFrame: public StandardFrame {
int index) const; int index) const;
// Determine the code for the frame. // Determine the code for the frame.
virtual Code* code() const; virtual Code* unchecked_code() const;
static JavaScriptFrame* cast(StackFrame* frame) { static JavaScriptFrame* cast(StackFrame* frame) {
ASSERT(frame->is_java_script()); ASSERT(frame->is_java_script());
@ -406,19 +435,11 @@ class JavaScriptFrame: public StandardFrame {
protected: protected:
explicit JavaScriptFrame(StackFrameIterator* iterator) explicit JavaScriptFrame(StackFrameIterator* iterator)
: StandardFrame(iterator), disable_heap_access_(false) { } : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const; virtual Address GetCallerStackPointer() const;
// When this mode is enabled it is not allowed to access heap objects.
// This is a special mode used when gathering stack samples in profiler.
// A shortcoming is that caller's SP value will be calculated incorrectly
// (see GetCallerStackPointer implementation), but it is not used for stack
// sampling.
void DisableHeapAccess() { disable_heap_access_ = true; }
private: private:
bool disable_heap_access_;
inline Object* function_slot_object() const; inline Object* function_slot_object() const;
friend class StackFrameIterator; friend class StackFrameIterator;
@ -433,7 +454,7 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
virtual Type type() const { return ARGUMENTS_ADAPTOR; } virtual Type type() const { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame. // Determine the code for the frame.
virtual Code* code() const; virtual Code* unchecked_code() const;
static ArgumentsAdaptorFrame* cast(StackFrame* frame) { static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
ASSERT(frame->is_arguments_adaptor()); ASSERT(frame->is_arguments_adaptor());
@ -463,7 +484,7 @@ class InternalFrame: public StandardFrame {
virtual void Iterate(ObjectVisitor* v) const; virtual void Iterate(ObjectVisitor* v) const;
// Determine the code for the frame. // Determine the code for the frame.
virtual Code* code() const; virtual Code* unchecked_code() const;
static InternalFrame* cast(StackFrame* frame) { static InternalFrame* cast(StackFrame* frame) {
ASSERT(frame->is_internal()); ASSERT(frame->is_internal());
@ -625,6 +646,8 @@ class SafeStackFrameIterator BASE_EMBEDDED {
void Advance(); void Advance();
void Reset(); void Reset();
static bool is_active() { return active_count_ > 0; }
static bool IsWithinBounds( static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) { Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound; return low_bound <= addr && addr <= high_bound;
@ -638,6 +661,19 @@ class SafeStackFrameIterator BASE_EMBEDDED {
bool IsValidFrame(StackFrame* frame) const; bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame); bool IsValidCaller(StackFrame* frame);
// This is a nasty hack to make sure the active count is incremented
// before the constructor for the embedded iterator is invoked. This
// is needed because the constructor will start looking at frames
// right away and we need to make sure it doesn't start inspecting
// heap objects.
class ActiveCountMaintainer BASE_EMBEDDED {
public:
ActiveCountMaintainer() { active_count_++; }
~ActiveCountMaintainer() { active_count_--; }
};
ActiveCountMaintainer maintainer_;
static int active_count_;
Address low_bound_; Address low_bound_;
Address high_bound_; Address high_bound_;
const bool is_valid_top_; const bool is_valid_top_;

750
deps/v8/src/full-codegen.cc

@ -30,6 +30,7 @@
#include "codegen-inl.h" #include "codegen-inl.h"
#include "compiler.h" #include "compiler.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "macro-assembler.h"
#include "scopes.h" #include "scopes.h"
#include "stub-cache.h" #include "stub-cache.h"
#include "debug.h" #include "debug.h"
@ -38,407 +39,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define BAILOUT(reason) \
do { \
if (FLAG_trace_bailout) { \
PrintF("%s\n", reason); \
} \
has_supported_syntax_ = false; \
return; \
} while (false)
#define CHECK_BAILOUT \
do { \
if (!has_supported_syntax_) return; \
} while (false)
void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
Scope* scope = fun->scope();
VisitDeclarations(scope->declarations());
CHECK_BAILOUT;
VisitStatements(fun->body());
}
void FullCodeGenSyntaxChecker::VisitDeclarations(
ZoneList<Declaration*>* decls) {
for (int i = 0; i < decls->length(); i++) {
Visit(decls->at(i));
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0, len = stmts->length(); i < len; i++) {
Visit(stmts->at(i));
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
Visit(prop->obj());
Visit(prop->key());
}
if (decl->fun() != NULL) {
Visit(decl->fun());
}
}
void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
}
void FullCodeGenSyntaxChecker::VisitExpressionStatement(
ExpressionStatement* stmt) {
Visit(stmt->expression());
}
void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
Visit(stmt->condition());
CHECK_BAILOUT;
Visit(stmt->then_statement());
CHECK_BAILOUT;
Visit(stmt->else_statement());
}
void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
Visit(stmt->expression());
}
void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
WithEnterStatement* stmt) {
Visit(stmt->expression());
}
void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
BAILOUT("SwitchStatement");
}
void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
Visit(stmt->cond());
CHECK_BAILOUT;
Visit(stmt->body());
}
void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
Visit(stmt->cond());
CHECK_BAILOUT;
Visit(stmt->body());
}
void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
if (!FLAG_always_full_compiler) BAILOUT("ForStatement");
if (stmt->init() != NULL) {
Visit(stmt->init());
CHECK_BAILOUT;
}
if (stmt->cond() != NULL) {
Visit(stmt->cond());
CHECK_BAILOUT;
}
Visit(stmt->body());
if (stmt->next() != NULL) {
CHECK_BAILOUT;
Visit(stmt->next());
}
}
void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
BAILOUT("ForInStatement");
}
void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
Visit(stmt->try_block());
CHECK_BAILOUT;
Visit(stmt->catch_block());
}
void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
TryFinallyStatement* stmt) {
Visit(stmt->try_block());
CHECK_BAILOUT;
Visit(stmt->finally_block());
}
void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
DebuggerStatement* stmt) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
BAILOUT("SharedFunctionInfoLiteral");
}
void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
Visit(expr->condition());
CHECK_BAILOUT;
Visit(expr->then_expression());
CHECK_BAILOUT;
Visit(expr->else_expression());
}
void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
UNREACHABLE();
}
void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
// Supported.
}
void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
for (int i = 0, len = properties->length(); i < len; i++) {
ObjectLiteral::Property* property = properties->at(i);
if (property->IsCompileTimeValue()) continue;
Visit(property->key());
CHECK_BAILOUT;
Visit(property->value());
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
ZoneList<Expression*>* subexprs = expr->values();
for (int i = 0, len = subexprs->length(); i < len; i++) {
Expression* subexpr = subexprs->at(i);
if (subexpr->AsLiteral() != NULL) continue;
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
Visit(subexpr);
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
CatchExtensionObject* expr) {
Visit(expr->key());
CHECK_BAILOUT;
Visit(expr->value());
}
void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
Token::Value op = expr->op();
if (op == Token::INIT_CONST) BAILOUT("initialize constant");
Variable* var = expr->target()->AsVariableProxy()->AsVariable();
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
// All other variables are supported.
} else if (prop != NULL) {
Visit(prop->obj());
CHECK_BAILOUT;
Visit(prop->key());
CHECK_BAILOUT;
} else {
// This is a throw reference error.
BAILOUT("non-variable/non-property assignment");
}
Visit(expr->value());
}
void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
Visit(expr->exception());
}
void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
Visit(expr->obj());
CHECK_BAILOUT;
Visit(expr->key());
}
void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
Expression* fun = expr->expression();
ZoneList<Expression*>* args = expr->arguments();
Variable* var = fun->AsVariableProxy()->AsVariable();
// Check for supported calls
if (var != NULL && var->is_possibly_eval()) {
BAILOUT("call to the identifier 'eval'");
} else if (var != NULL && !var->is_this() && var->is_global()) {
// Calls to global variables are supported.
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
BAILOUT("call to a lookup slot");
} else if (fun->AsProperty() != NULL) {
Property* prop = fun->AsProperty();
Visit(prop->obj());
CHECK_BAILOUT;
Visit(prop->key());
CHECK_BAILOUT;
} else {
// Otherwise the call is supported if the function expression is.
Visit(fun);
}
// Check all arguments to the call.
for (int i = 0; i < args->length(); i++) {
Visit(args->at(i));
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
Visit(expr->expression());
CHECK_BAILOUT;
ZoneList<Expression*>* args = expr->arguments();
// Check all arguments to the call
for (int i = 0; i < args->length(); i++) {
Visit(args->at(i));
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
// Check for inline runtime call
if (expr->name()->Get(0) == '_' &&
CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
BAILOUT("inlined runtime call");
}
// Check all arguments to the call. (Relies on TEMP meaning STACK.)
for (int i = 0; i < expr->arguments()->length(); i++) {
Visit(expr->arguments()->at(i));
CHECK_BAILOUT;
}
}
void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::ADD:
case Token::BIT_NOT:
case Token::NOT:
case Token::SUB:
case Token::TYPEOF:
case Token::VOID:
Visit(expr->expression());
break;
case Token::DELETE:
BAILOUT("UnaryOperation: DELETE");
default:
UNREACHABLE();
}
}
void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
Property* prop = expr->expression()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
Slot::Type type = var->slot()->type();
if (type == Slot::LOOKUP) {
BAILOUT("CountOperation with lookup slot");
}
}
} else if (prop != NULL) {
Visit(prop->obj());
CHECK_BAILOUT;
Visit(prop->key());
CHECK_BAILOUT;
} else {
// This is a throw reference error.
BAILOUT("CountOperation non-variable/non-property expression");
}
}
void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
Visit(expr->left());
CHECK_BAILOUT;
Visit(expr->right());
}
void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
CHECK_BAILOUT;
Visit(expr->right());
}
void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
// Supported.
}
#undef BAILOUT
#undef CHECK_BAILOUT
void BreakableStatementChecker::Check(Statement* stmt) { void BreakableStatementChecker::Check(Statement* stmt) {
Visit(stmt); Visit(stmt);
} }
@ -616,6 +216,12 @@ void BreakableStatementChecker::VisitThrow(Throw* expr) {
} }
void BreakableStatementChecker::VisitIncrementOperation(
IncrementOperation* expr) {
UNREACHABLE();
}
void BreakableStatementChecker::VisitProperty(Property* expr) { void BreakableStatementChecker::VisitProperty(Property* expr) {
// Property load is breakable. // Property load is breakable.
is_breakable_ = true; is_breakable_ = true;
@ -654,6 +260,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) {
} }
void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
Visit(expr->expression());
}
void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) { void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left()); Visit(expr->left());
Visit(expr->right()); Visit(expr->right());
@ -707,6 +318,46 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
} }
bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
// TODO(kasperl): Once the compare stub allows leaving out the
// inlined smi case, we should get rid of this check.
if (Token::IsCompareOp(op)) return true;
// TODO(kasperl): Once the unary bit not stub allows leaving out
// the inlined smi case, we should get rid of this check.
if (op == Token::BIT_NOT) return true;
// Inline smi case inside loops, but not division and modulo which
// are too complicated and take up too much space.
return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
}
void FullCodeGenerator::PrepareTest(Label* materialize_true,
Label* materialize_false,
Label** if_true,
Label** if_false,
Label** fall_through) {
switch (context_) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kEffect:
// In an effect context, the true and the false case branch to the
// same label.
*if_true = *if_false = *fall_through = materialize_true;
break;
case Expression::kValue:
*if_true = *fall_through = materialize_true;
*if_false = materialize_false;
break;
case Expression::kTest:
*if_true = true_label_;
*if_false = false_label_;
*fall_through = fall_through_;
break;
}
}
void FullCodeGenerator::VisitDeclarations( void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) { ZoneList<Declaration*>* declarations) {
int length = declarations->length(); int length = declarations->length();
@ -851,78 +502,80 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
Handle<String> name = expr->name(); Handle<String> name = expr->name();
if (strcmp("_IsSmi", *name->ToCString()) == 0) { SmartPointer<char> cstring = name->ToCString();
EmitIsSmi(expr->arguments());
} else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) { #define CHECK_EMIT_INLINE_CALL(name, x, y) \
EmitIsNonNegativeSmi(expr->arguments()); if (strcmp("_"#name, *cstring) == 0) { \
} else if (strcmp("_IsObject", *name->ToCString()) == 0) { Emit##name(expr->arguments()); \
EmitIsObject(expr->arguments()); return; \
} else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) { }
EmitIsSpecObject(expr->arguments()); INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
} else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) { #undef CHECK_EMIT_INLINE_CALL
EmitIsUndetectableObject(expr->arguments()); UNREACHABLE();
} else if (strcmp("_IsFunction", *name->ToCString()) == 0) { }
EmitIsFunction(expr->arguments());
} else if (strcmp("_IsArray", *name->ToCString()) == 0) {
EmitIsArray(expr->arguments()); void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
} else if (strcmp("_IsRegExp", *name->ToCString()) == 0) { Comment cmnt(masm_, "[ BinaryOperation");
EmitIsRegExp(expr->arguments()); Token::Value op = expr->op();
} else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) { Expression* left = expr->left();
EmitIsConstructCall(expr->arguments()); Expression* right = expr->right();
} else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
EmitObjectEquals(expr->arguments()); OverwriteMode mode = NO_OVERWRITE;
} else if (strcmp("_Arguments", *name->ToCString()) == 0) { if (left->ResultOverwriteAllowed()) {
EmitArguments(expr->arguments()); mode = OVERWRITE_LEFT;
} else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) { } else if (right->ResultOverwriteAllowed()) {
EmitArgumentsLength(expr->arguments()); mode = OVERWRITE_RIGHT;
} else if (strcmp("_ClassOf", *name->ToCString()) == 0) { }
EmitClassOf(expr->arguments());
} else if (strcmp("_Log", *name->ToCString()) == 0) { switch (op) {
EmitLog(expr->arguments()); case Token::COMMA:
} else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) { VisitForEffect(left);
EmitRandomHeapNumber(expr->arguments()); Visit(right);
} else if (strcmp("_SubString", *name->ToCString()) == 0) { break;
EmitSubString(expr->arguments());
} else if (strcmp("_RegExpExec", *name->ToCString()) == 0) { case Token::OR:
EmitRegExpExec(expr->arguments()); case Token::AND:
} else if (strcmp("_ValueOf", *name->ToCString()) == 0) { EmitLogicalOperation(expr);
EmitValueOf(expr->arguments()); break;
} else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
EmitSetValueOf(expr->arguments()); case Token::ADD:
} else if (strcmp("_NumberToString", *name->ToCString()) == 0) { case Token::SUB:
EmitNumberToString(expr->arguments()); case Token::DIV:
} else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) { case Token::MOD:
EmitStringCharFromCode(expr->arguments()); case Token::MUL:
} else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) { case Token::BIT_OR:
EmitStringCharCodeAt(expr->arguments()); case Token::BIT_AND:
} else if (strcmp("_StringCharAt", *name->ToCString()) == 0) { case Token::BIT_XOR:
EmitStringCharAt(expr->arguments()); case Token::SHL:
} else if (strcmp("_StringAdd", *name->ToCString()) == 0) { case Token::SHR:
EmitStringAdd(expr->arguments()); case Token::SAR: {
} else if (strcmp("_StringCompare", *name->ToCString()) == 0) { // Figure out if either of the operands is a constant.
EmitStringCompare(expr->arguments()); ConstantOperand constant = ShouldInlineSmiCase(op)
} else if (strcmp("_MathPow", *name->ToCString()) == 0) { ? GetConstantOperand(op, left, right)
EmitMathPow(expr->arguments()); : kNoConstants;
} else if (strcmp("_MathSin", *name->ToCString()) == 0) {
EmitMathSin(expr->arguments()); // Load only the operands that we need to materialize.
} else if (strcmp("_MathCos", *name->ToCString()) == 0) { if (constant == kNoConstants) {
EmitMathCos(expr->arguments()); VisitForValue(left, kStack);
} else if (strcmp("_MathSqrt", *name->ToCString()) == 0) { VisitForValue(right, kAccumulator);
EmitMathSqrt(expr->arguments()); } else if (constant == kRightConstant) {
} else if (strcmp("_CallFunction", *name->ToCString()) == 0) { VisitForValue(left, kAccumulator);
EmitCallFunction(expr->arguments()); } else {
} else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) { ASSERT(constant == kLeftConstant);
EmitRegExpConstructResult(expr->arguments()); VisitForValue(right, kAccumulator);
} else if (strcmp("_SwapElements", *name->ToCString()) == 0) { }
EmitSwapElements(expr->arguments());
} else if (strcmp("_GetFromCache", *name->ToCString()) == 0) { SetSourcePosition(expr->position());
EmitGetFromCache(expr->arguments()); if (ShouldInlineSmiCase(op)) {
} else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) { EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant);
EmitIsRegExpEquivalent(expr->arguments());
} else if (strcmp("_IsStringWrapperSafeForDefaultValueOf",
*name->ToCString()) == 0) {
EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments());
} else { } else {
EmitBinaryOp(op, context_, mode);
}
break;
}
default:
UNREACHABLE(); UNREACHABLE();
} }
} }
@ -939,25 +592,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized: case Expression::kUninitialized:
UNREACHABLE(); UNREACHABLE();
case Expression::kEffect: case Expression::kEffect:
VisitForControl(expr->left(), &done, &eval_right); VisitForControl(expr->left(), &done, &eval_right, &eval_right);
break; break;
case Expression::kValue: case Expression::kValue:
VisitForValueControl(expr->left(), VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
location_,
&done,
&eval_right);
break; break;
case Expression::kTest: case Expression::kTest:
VisitForControl(expr->left(), true_label_, &eval_right); VisitForControl(expr->left(), true_label_, &eval_right, &eval_right);
break;
case Expression::kValueTest:
VisitForValueControl(expr->left(),
location_,
true_label_,
&eval_right);
break;
case Expression::kTestValue:
VisitForControl(expr->left(), true_label_, &eval_right);
break; break;
} }
} else { } else {
@ -966,25 +607,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
case Expression::kUninitialized: case Expression::kUninitialized:
UNREACHABLE(); UNREACHABLE();
case Expression::kEffect: case Expression::kEffect:
VisitForControl(expr->left(), &eval_right, &done); VisitForControl(expr->left(), &eval_right, &done, &eval_right);
break; break;
case Expression::kValue: case Expression::kValue:
VisitForControlValue(expr->left(), VisitLogicalForValue(expr->left(), expr->op(), location_, &done);
location_,
&eval_right,
&done);
break; break;
case Expression::kTest: case Expression::kTest:
VisitForControl(expr->left(), &eval_right, false_label_); VisitForControl(expr->left(), &eval_right, false_label_, &eval_right);
break;
case Expression::kValueTest:
VisitForControl(expr->left(), &eval_right, false_label_);
break;
case Expression::kTestValue:
VisitForControlValue(expr->left(),
location_,
&eval_right,
false_label_);
break; break;
} }
} }
@ -996,6 +625,43 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
} }
void FullCodeGenerator::VisitLogicalForValue(Expression* expr,
Token::Value op,
Location where,
Label* done) {
ASSERT(op == Token::AND || op == Token::OR);
VisitForValue(expr, kAccumulator);
__ push(result_register());
Label discard;
switch (where) {
case kAccumulator: {
Label restore;
if (op == Token::OR) {
DoTest(&restore, &discard, &restore);
} else {
DoTest(&discard, &restore, &restore);
}
__ bind(&restore);
__ pop(result_register());
__ jmp(done);
break;
}
case kStack: {
if (op == Token::OR) {
DoTest(done, &discard, &discard);
} else {
DoTest(&discard, done, &discard);
}
break;
}
}
__ bind(&discard);
__ Drop(1);
}
void FullCodeGenerator::VisitBlock(Block* stmt) { void FullCodeGenerator::VisitBlock(Block* stmt) {
Comment cmnt(masm_, "[ Block"); Comment cmnt(masm_, "[ Block");
Breakable nested_statement(this, stmt); Breakable nested_statement(this, stmt);
@ -1023,16 +689,19 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
SetStatementPosition(stmt); SetStatementPosition(stmt);
Label then_part, else_part, done; Label then_part, else_part, done;
// Do not worry about optimizing for empty then or else bodies. if (stmt->HasElseStatement()) {
VisitForControl(stmt->condition(), &then_part, &else_part); VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
__ bind(&then_part); __ bind(&then_part);
Visit(stmt->then_statement()); Visit(stmt->then_statement());
__ jmp(&done); __ jmp(&done);
__ bind(&else_part); __ bind(&else_part);
Visit(stmt->else_statement()); Visit(stmt->else_statement());
} else {
VisitForControl(stmt->condition(), &then_part, &done, &then_part);
__ bind(&then_part);
Visit(stmt->then_statement());
}
__ bind(&done); __ bind(&done);
} }
@ -1120,7 +789,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement"); Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt); SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success; Label body, stack_limit_hit, stack_check_success, done;
Iteration loop_statement(this, stmt); Iteration loop_statement(this, stmt);
increment_loop_depth(); increment_loop_depth();
@ -1132,28 +801,31 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit); __ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success); __ bind(&stack_check_success);
// Record the position of the do while condition and make sure it is
// possible to break on the condition.
__ bind(loop_statement.continue_target()); __ bind(loop_statement.continue_target());
// Record the position of the do while condition and make sure it is possible
// to break on the condition.
SetExpressionPosition(stmt->cond(), stmt->condition_position()); SetExpressionPosition(stmt->cond(), stmt->condition_position());
VisitForControl(stmt->cond(),
&body,
loop_statement.break_target(),
loop_statement.break_target());
VisitForControl(stmt->cond(), &body, loop_statement.break_target()); __ bind(loop_statement.break_target());
__ jmp(&done);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
StackCheckStub stack_stub; StackCheckStub stack_stub;
__ CallStub(&stack_stub); __ CallStub(&stack_stub);
__ jmp(&stack_check_success); __ jmp(&stack_check_success);
__ bind(loop_statement.break_target()); __ bind(&done);
decrement_loop_depth(); decrement_loop_depth();
} }
void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement"); Comment cmnt(masm_, "[ WhileStatement");
Label body, stack_limit_hit, stack_check_success; Label body, stack_limit_hit, stack_check_success, done;
Iteration loop_statement(this, stmt); Iteration loop_statement(this, stmt);
increment_loop_depth(); increment_loop_depth();
@ -1163,24 +835,30 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ bind(&body); __ bind(&body);
Visit(stmt->body()); Visit(stmt->body());
__ bind(loop_statement.continue_target()); __ bind(loop_statement.continue_target());
// Emit the statement position here as this is where the while statement code
// starts. // Emit the statement position here as this is where the while
// statement code starts.
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
__ StackLimitCheck(&stack_limit_hit); __ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success); __ bind(&stack_check_success);
VisitForControl(stmt->cond(), &body, loop_statement.break_target()); VisitForControl(stmt->cond(),
&body,
loop_statement.break_target(),
loop_statement.break_target());
__ bind(loop_statement.break_target());
__ jmp(&done);
__ bind(&stack_limit_hit); __ bind(&stack_limit_hit);
StackCheckStub stack_stub; StackCheckStub stack_stub;
__ CallStub(&stack_stub); __ CallStub(&stack_stub);
__ jmp(&stack_check_success); __ jmp(&stack_check_success);
__ bind(loop_statement.break_target()); __ bind(&done);
decrement_loop_depth(); decrement_loop_depth();
} }
@ -1198,6 +876,11 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
// Emit the test at the bottom of the loop (even if empty). // Emit the test at the bottom of the loop (even if empty).
__ jmp(&test); __ jmp(&test);
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(&body); __ bind(&body);
Visit(stmt->body()); Visit(stmt->body());
@ -1209,8 +892,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
} }
__ bind(&test); __ bind(&test);
// Emit the statement position here as this is where the for statement code // Emit the statement position here as this is where the for
// starts. // statement code starts.
SetStatementPosition(stmt); SetStatementPosition(stmt);
// Check stack before looping. // Check stack before looping.
@ -1218,16 +901,14 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
__ bind(&stack_check_success); __ bind(&stack_check_success);
if (stmt->cond() != NULL) { if (stmt->cond() != NULL) {
VisitForControl(stmt->cond(), &body, loop_statement.break_target()); VisitForControl(stmt->cond(),
&body,
loop_statement.break_target(),
loop_statement.break_target());
} else { } else {
__ jmp(&body); __ jmp(&body);
} }
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
__ CallStub(&stack_stub);
__ jmp(&stack_check_success);
__ bind(loop_statement.break_target()); __ bind(loop_statement.break_target());
decrement_loop_depth(); decrement_loop_depth();
} }
@ -1354,7 +1035,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
void FullCodeGenerator::VisitConditional(Conditional* expr) { void FullCodeGenerator::VisitConditional(Conditional* expr) {
Comment cmnt(masm_, "[ Conditional"); Comment cmnt(masm_, "[ Conditional");
Label true_case, false_case, done; Label true_case, false_case, done;
VisitForControl(expr->condition(), &true_case, &false_case); VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
__ bind(&true_case); __ bind(&true_case);
SetExpressionPosition(expr->then_expression(), SetExpressionPosition(expr->then_expression(),
@ -1426,6 +1107,11 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
} }
void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
UNREACHABLE();
}
int FullCodeGenerator::TryFinally::Exit(int stack_depth) { int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
// The macros used here must preserve the result register. // The macros used here must preserve the result register.
__ Drop(stack_depth); __ Drop(stack_depth);
@ -1442,6 +1128,14 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
return 0; return 0;
} }
void FullCodeGenerator::EmitRegExpCloneResult(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kStack);
__ CallRuntime(Runtime::kRegExpCloneResult, 1);
Apply(context_, result_register());
}
#undef __ #undef __

215
deps/v8/src/full-codegen.h

@ -36,29 +36,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class FullCodeGenSyntaxChecker: public AstVisitor {
public:
FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
void Check(FunctionLiteral* fun);
bool has_supported_syntax() { return has_supported_syntax_; }
private:
void VisitDeclarations(ZoneList<Declaration*>* decls);
void VisitStatements(ZoneList<Statement*>* stmts);
// AST node visit functions.
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
bool has_supported_syntax_;
DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
};
// AST node visitor which can tell whether a given statement will be breakable // AST node visitor which can tell whether a given statement will be breakable
// when the code is compiled by the full compiler in the debugger. This means // when the code is compiled by the full compiler in the debugger. This means
// that there will be an IC (load/store/call) in the code generated for the // that there will be an IC (load/store/call) in the code generated for the
@ -96,7 +73,8 @@ class FullCodeGenerator: public AstVisitor {
loop_depth_(0), loop_depth_(0),
location_(kStack), location_(kStack),
true_label_(NULL), true_label_(NULL),
false_label_(NULL) { false_label_(NULL),
fall_through_(NULL) {
} }
static Handle<Code> MakeCode(CompilationInfo* info); static Handle<Code> MakeCode(CompilationInfo* info);
@ -259,8 +237,25 @@ class FullCodeGenerator: public AstVisitor {
kStack kStack
}; };
enum ConstantOperand {
kNoConstants,
kLeftConstant,
kRightConstant
};
// Compute the frame pointer relative offset for a given local or
// parameter slot.
int SlotOffset(Slot* slot); int SlotOffset(Slot* slot);
// Determine whether or not to inline the smi case for the given
// operation.
bool ShouldInlineSmiCase(Token::Value op);
// Compute which (if any) of the operands is a compile-time constant.
ConstantOperand GetConstantOperand(Token::Value op,
Expression* left,
Expression* right);
// Emit code to convert a pure value (in a register, slot, as a literal, // Emit code to convert a pure value (in a register, slot, as a literal,
// or on top of the stack) into the result expected according to an // or on top of the stack) into the result expected according to an
// expression context. // expression context.
@ -281,7 +276,8 @@ class FullCodeGenerator: public AstVisitor {
void PrepareTest(Label* materialize_true, void PrepareTest(Label* materialize_true,
Label* materialize_false, Label* materialize_false,
Label** if_true, Label** if_true,
Label** if_false); Label** if_false,
Label** fall_through);
// Emit code to convert pure control flow to a pair of labels into the // Emit code to convert pure control flow to a pair of labels into the
// result expected according to an expression context. // result expected according to an expression context.
@ -296,7 +292,14 @@ class FullCodeGenerator: public AstVisitor {
// Helper function to convert a pure value into a test context. The value // Helper function to convert a pure value into a test context. The value
// is expected on the stack or the accumulator, depending on the platform. // is expected on the stack or the accumulator, depending on the platform.
// See the platform-specific implementation for details. // See the platform-specific implementation for details.
void DoTest(Expression::Context context); void DoTest(Label* if_true, Label* if_false, Label* fall_through);
// Helper function to split control flow and avoid a branch to the
// fall-through label if it is set up.
void Split(Condition cc,
Label* if_true,
Label* if_false,
Label* fall_through);
void Move(Slot* dst, Register source, Register scratch1, Register scratch2); void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source); void Move(Register dst, Slot* source);
@ -323,60 +326,38 @@ class FullCodeGenerator: public AstVisitor {
location_ = saved_location; location_ = saved_location;
} }
void VisitForControl(Expression* expr, Label* if_true, Label* if_false) { void VisitForControl(Expression* expr,
Expression::Context saved_context = context_;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
context_ = Expression::kTest;
true_label_ = if_true;
false_label_ = if_false;
Visit(expr);
context_ = saved_context;
true_label_ = saved_true;
false_label_ = saved_false;
}
void VisitForValueControl(Expression* expr,
Location where,
Label* if_true, Label* if_true,
Label* if_false) { Label* if_false,
Expression::Context saved_context = context_; Label* fall_through) {
Location saved_location = location_;
Label* saved_true = true_label_;
Label* saved_false = false_label_;
context_ = Expression::kValueTest;
location_ = where;
true_label_ = if_true;
false_label_ = if_false;
Visit(expr);
context_ = saved_context;
location_ = saved_location;
true_label_ = saved_true;
false_label_ = saved_false;
}
void VisitForControlValue(Expression* expr,
Location where,
Label* if_true,
Label* if_false) {
Expression::Context saved_context = context_; Expression::Context saved_context = context_;
Location saved_location = location_;
Label* saved_true = true_label_; Label* saved_true = true_label_;
Label* saved_false = false_label_; Label* saved_false = false_label_;
context_ = Expression::kTestValue; Label* saved_fall_through = fall_through_;
location_ = where; context_ = Expression::kTest;
true_label_ = if_true; true_label_ = if_true;
false_label_ = if_false; false_label_ = if_false;
fall_through_ = fall_through;
Visit(expr); Visit(expr);
context_ = saved_context; context_ = saved_context;
location_ = saved_location;
true_label_ = saved_true; true_label_ = saved_true;
false_label_ = saved_false; false_label_ = saved_false;
fall_through_ = saved_fall_through;
} }
void VisitDeclarations(ZoneList<Declaration*>* declarations); void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs); void DeclareGlobals(Handle<FixedArray> pairs);
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
bool TryLiteralCompare(Token::Value op,
Expression* left,
Expression* right,
Label* if_true,
Label* if_false,
Label* fall_through);
// Platform-specific code for a variable, constant, or function // Platform-specific code for a variable, constant, or function
// declaration. Functions have an initial value. // declaration. Functions have an initial value.
void EmitDeclaration(Variable* variable, void EmitDeclaration(Variable* variable,
@ -391,45 +372,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode); void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode); void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
// Platform-specific code for inline runtime calls. // Platform-specific code for inline runtime calls.
void EmitInlineRuntimeCall(CallRuntime* expr); void EmitInlineRuntimeCall(CallRuntime* expr);
void EmitIsSmi(ZoneList<Expression*>* arguments);
void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments); #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
void EmitIsObject(ZoneList<Expression*>* arguments); void Emit##name(ZoneList<Expression*>* arguments);
void EmitIsSpecObject(ZoneList<Expression*>* arguments); INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
void EmitIsUndetectableObject(ZoneList<Expression*>* arguments); #undef EMIT_INLINE_RUNTIME_CALL
void EmitIsFunction(ZoneList<Expression*>* arguments);
void EmitIsArray(ZoneList<Expression*>* arguments);
void EmitIsRegExp(ZoneList<Expression*>* arguments);
void EmitIsConstructCall(ZoneList<Expression*>* arguments);
void EmitIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* arguments);
void EmitObjectEquals(ZoneList<Expression*>* arguments);
void EmitArguments(ZoneList<Expression*>* arguments);
void EmitArgumentsLength(ZoneList<Expression*>* arguments);
void EmitClassOf(ZoneList<Expression*>* arguments);
void EmitValueOf(ZoneList<Expression*>* arguments);
void EmitSetValueOf(ZoneList<Expression*>* arguments);
void EmitNumberToString(ZoneList<Expression*>* arguments);
void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
void EmitStringCharAt(ZoneList<Expression*>* arguments);
void EmitStringCompare(ZoneList<Expression*>* arguments);
void EmitStringAdd(ZoneList<Expression*>* arguments);
void EmitLog(ZoneList<Expression*>* arguments);
void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
void EmitSubString(ZoneList<Expression*>* arguments);
void EmitRegExpExec(ZoneList<Expression*>* arguments);
void EmitMathPow(ZoneList<Expression*>* arguments);
void EmitMathSin(ZoneList<Expression*>* arguments);
void EmitMathCos(ZoneList<Expression*>* arguments);
void EmitMathSqrt(ZoneList<Expression*>* arguments);
void EmitCallFunction(ZoneList<Expression*>* arguments);
void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
void EmitSwapElements(ZoneList<Expression*>* arguments);
void EmitGetFromCache(ZoneList<Expression*>* arguments);
void EmitIsRegExpEquivalent(ZoneList<Expression*>* arguments);
// Platform-specific code for loading variables. // Platform-specific code for loading variables.
void EmitVariableLoad(Variable* expr, Expression::Context context); void EmitVariableLoad(Variable* expr, Expression::Context context);
@ -450,7 +399,50 @@ class FullCodeGenerator: public AstVisitor {
// Apply the compound assignment operator. Expects the left operand on top // Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator. // of the stack and the right one in the accumulator.
void EmitBinaryOp(Token::Value op, Expression::Context context); void EmitBinaryOp(Token::Value op,
Expression::Context context,
OverwriteMode mode);
// Helper functions for generating inlined smi code for certain
// binary operations.
void EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
Expression::Context context,
OverwriteMode mode,
Expression* left,
Expression* right,
ConstantOperand constant);
void EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
Expression::Context context,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
Expression::Context context,
OverwriteMode mode,
Smi* value);
void EmitConstantSmiAdd(Expression* expr,
Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
void EmitConstantSmiSub(Expression* expr,
Expression::Context context,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value);
// Assign to the given expression as if via '='. The right-hand-side value // Assign to the given expression as if via '='. The right-hand-side value
// is expected in the accumulator. // is expected in the accumulator.
@ -471,14 +463,6 @@ class FullCodeGenerator: public AstVisitor {
// accumulator. // accumulator.
void EmitKeyedPropertyAssignment(Assignment* expr); void EmitKeyedPropertyAssignment(Assignment* expr);
// Helper for compare operations. Expects the null-value in a register.
void EmitNullCompare(bool strict,
Register obj,
Register null_const,
Label* if_true,
Label* if_false,
Register scratch);
void SetFunctionPosition(FunctionLiteral* fun); void SetFunctionPosition(FunctionLiteral* fun);
void SetReturnPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun);
void SetStatementPosition(Statement* stmt); void SetStatementPosition(Statement* stmt);
@ -523,6 +507,14 @@ class FullCodeGenerator: public AstVisitor {
// Handles the shortcutted logical binary operations in VisitBinaryOperation. // Handles the shortcutted logical binary operations in VisitBinaryOperation.
void EmitLogicalOperation(BinaryOperation* expr); void EmitLogicalOperation(BinaryOperation* expr);
void VisitForTypeofValue(Expression* expr, Location where);
void VisitLogicalForValue(Expression* expr,
Token::Value op,
Location where,
Label* done);
MacroAssembler* masm_; MacroAssembler* masm_;
CompilationInfo* info_; CompilationInfo* info_;
@ -534,6 +526,7 @@ class FullCodeGenerator: public AstVisitor {
Location location_; Location location_;
Label* true_label_; Label* true_label_;
Label* false_label_; Label* false_label_;
Label* fall_through_;
friend class NestedStatement; friend class NestedStatement;

14
deps/v8/src/func-name-inferrer.cc

@ -44,6 +44,20 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
} }
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) {
names_stack_.Add(name);
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
if (IsOpen() && !Heap::result_symbol()->Equals(*name)) {
names_stack_.Add(name);
}
}
Handle<String> FuncNameInferrer::MakeNameFromStack() { Handle<String> FuncNameInferrer::MakeNameFromStack() {
if (names_stack_.is_empty()) { if (names_stack_.is_empty()) {
return Factory::empty_string(); return Factory::empty_string();

54
deps/v8/src/func-name-inferrer.h

@ -36,11 +36,12 @@ namespace internal {
// Inference is performed in cases when an anonymous function is assigned // Inference is performed in cases when an anonymous function is assigned
// to a variable or a property (see test-func-name-inference.cc for examples.) // to a variable or a property (see test-func-name-inference.cc for examples.)
// //
// The basic idea is that during AST traversal LHSs of expressions are // The basic idea is that during parsing of LHSs of certain expressions
// always visited before RHSs. Thus, during visiting the LHS, a name can be // (assignments, declarations, object literals) we collect name strings,
// collected, and during visiting the RHS, a function literal can be collected. // and during parsing of the RHS, a function literal can be collected. After
// Inference is performed while leaving the assignment node. // parsing the RHS we can infer a name for function literals that do not have
class FuncNameInferrer BASE_EMBEDDED { // a name.
class FuncNameInferrer : public ZoneObject {
public: public:
FuncNameInferrer() FuncNameInferrer()
: entries_stack_(10), : entries_stack_(10),
@ -61,11 +62,9 @@ class FuncNameInferrer BASE_EMBEDDED {
} }
// Pushes an encountered name onto names stack when in collection state. // Pushes an encountered name onto names stack when in collection state.
void PushName(Handle<String> name) { void PushLiteralName(Handle<String> name);
if (IsOpen()) {
names_stack_.Add(name); void PushVariableName(Handle<String> name);
}
}
// Adds a function to infer name for. // Adds a function to infer name for.
void AddFunction(FunctionLiteral* func_to_infer) { void AddFunction(FunctionLiteral* func_to_infer) {
@ -75,11 +74,16 @@ class FuncNameInferrer BASE_EMBEDDED {
} }
// Infers a function name and leaves names collection state. // Infers a function name and leaves names collection state.
void InferAndLeave() { void Infer() {
ASSERT(IsOpen()); ASSERT(IsOpen());
if (!funcs_to_infer_.is_empty()) { if (!funcs_to_infer_.is_empty()) {
InferFunctionsNames(); InferFunctionsNames();
} }
}
// Infers a function name and leaves names collection state.
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast()); names_stack_.Rewind(entries_stack_.RemoveLast());
} }
@ -102,34 +106,6 @@ class FuncNameInferrer BASE_EMBEDDED {
}; };
// A wrapper class that automatically calls InferAndLeave when
// leaving scope.
class ScopedFuncNameInferrer BASE_EMBEDDED {
public:
explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer)
: inferrer_(inferrer),
is_entered_(false) {}
~ScopedFuncNameInferrer() {
if (is_entered_) {
inferrer_->InferAndLeave();
}
}
// Triggers the wrapped inferrer into name collection state.
void Enter() {
inferrer_->Enter();
is_entered_ = true;
}
private:
FuncNameInferrer* inferrer_;
bool is_entered_;
DISALLOW_COPY_AND_ASSIGN(ScopedFuncNameInferrer);
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_FUNC_NAME_INFERRER_H_ #endif // V8_FUNC_NAME_INFERRER_H_

12
deps/v8/src/globals.h

@ -244,10 +244,12 @@ const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead)); reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue = const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad)); reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb;
#else #else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed); const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead); const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad); const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
const uint32_t kDebugZapValue = 0xbadbaddb;
#endif #endif
@ -662,7 +664,7 @@ F FUNCTION_CAST(Address addr) {
#define TRACK_MEMORY(name) #define TRACK_MEMORY(name)
#endif #endif
// define used for helping GCC to make better inlining. Don't bother for debug // Define used for helping GCC to make better inlining. Don't bother for debug
// builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation
// errors in debug build. // errors in debug build.
#if defined(__GNUC__) && !defined(DEBUG) #if defined(__GNUC__) && !defined(DEBUG)
@ -678,6 +680,14 @@ F FUNCTION_CAST(Address addr) {
#define NO_INLINE(header) header #define NO_INLINE(header) header
#endif #endif
#if defined(__GNUC__) && __GNUC__ >= 4
#define MUST_USE_RESULT __attribute__ ((warn_unused_result))
#else
#define MUST_USE_RESULT
#endif
// Feature flags bit positions. They are mostly based on the CPUID spec. // Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits -- // (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.) // feel free to change this if needed.)

2
deps/v8/src/handles.cc

@ -773,6 +773,7 @@ bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) { if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code()); function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true; return true;
} else { } else {
CompilationInfo info(function, 0, receiver); CompilationInfo info(function, 0, receiver);
@ -788,6 +789,7 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) { ClearExceptionFlag flag) {
if (function->shared()->is_compiled()) { if (function->shared()->is_compiled()) {
function->set_code(function->shared()->code()); function->set_code(function->shared()->code());
function->shared()->set_code_age(0);
return true; return true;
} else { } else {
CompilationInfo info(function, 1, receiver); CompilationInfo info(function, 1, receiver);

3
deps/v8/src/heap-inl.h

@ -28,7 +28,8 @@
#ifndef V8_HEAP_INL_H_ #ifndef V8_HEAP_INL_H_
#define V8_HEAP_INL_H_ #define V8_HEAP_INL_H_
#include "log.h" #include "heap.h"
#include "objects.h"
#include "v8-counters.h" #include "v8-counters.h"
namespace v8 { namespace v8 {

440
deps/v8/src/heap-profiler.cc

@ -280,10 +280,12 @@ void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster,
printer_->PrintRetainers(cluster, stream); printer_->PrintRetainers(cluster, stream);
} }
} // namespace
// A helper class for building a retainers tree, that aggregates // A helper class for building a retainers tree, that aggregates
// all equivalent clusters. // all equivalent clusters.
class RetainerTreeAggregator BASE_EMBEDDED { class RetainerTreeAggregator {
public: public:
explicit RetainerTreeAggregator(ClustersCoarser* coarser) explicit RetainerTreeAggregator(ClustersCoarser* coarser)
: coarser_(coarser) {} : coarser_(coarser) {}
@ -311,8 +313,6 @@ void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster,
tree->ForEach(&retainers_aggregator); tree->ForEach(&retainers_aggregator);
} }
} // namespace
HeapProfiler* HeapProfiler::singleton_ = NULL; HeapProfiler* HeapProfiler::singleton_ = NULL;
@ -347,30 +347,46 @@ void HeapProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) { HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
ASSERT(singleton_ != NULL); ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name); return singleton_->TakeSnapshotImpl(name, type);
} }
HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) { HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
ASSERT(singleton_ != NULL); ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name); return singleton_->TakeSnapshotImpl(name, type);
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) { HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
Heap::CollectAllGarbage(true); Heap::CollectAllGarbage(true);
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++); HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
switch (s_type) {
case HeapSnapshot::kFull: {
HeapSnapshotGenerator generator(result); HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot(); generator.GenerateSnapshot();
break;
}
case HeapSnapshot::kAggregated: {
AggregatedHeapSnapshot agg_snapshot;
AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
generator.GenerateSnapshot();
generator.FillHeapSnapshot(result);
break;
}
default:
UNREACHABLE();
}
snapshots_->SnapshotGenerationFinished(); snapshots_->SnapshotGenerationFinished();
return result; return result;
} }
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) { HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
return TakeSnapshotImpl(snapshots_->GetName(name)); return TakeSnapshotImpl(snapshots_->GetName(name), type);
} }
@ -433,16 +449,25 @@ static const char* GetConstructorName(const char* name) {
} }
void JSObjectsCluster::Print(StringStream* accumulator) const { const char* JSObjectsCluster::GetSpecialCaseName() const {
ASSERT(!is_null());
if (constructor_ == FromSpecialCase(ROOTS)) { if (constructor_ == FromSpecialCase(ROOTS)) {
accumulator->Add("(roots)"); return "(roots)";
} else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) { } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
accumulator->Add("(global property)"); return "(global property)";
} else if (constructor_ == FromSpecialCase(CODE)) { } else if (constructor_ == FromSpecialCase(CODE)) {
accumulator->Add("(code)"); return "(code)";
} else if (constructor_ == FromSpecialCase(SELF)) { } else if (constructor_ == FromSpecialCase(SELF)) {
accumulator->Add("(self)"); return "(self)";
}
return NULL;
}
void JSObjectsCluster::Print(StringStream* accumulator) const {
ASSERT(!is_null());
const char* special_case_name = GetSpecialCaseName();
if (special_case_name != NULL) {
accumulator->Add(special_case_name);
} else { } else {
SmartPointer<char> s_name( SmartPointer<char> s_name(
constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)); constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
@ -618,13 +643,19 @@ const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue =
RetainerHeapProfile::RetainerHeapProfile() RetainerHeapProfile::RetainerHeapProfile()
: zscope_(DELETE_ON_EXIT) { : zscope_(DELETE_ON_EXIT),
aggregator_(NULL) {
JSObjectsCluster roots(JSObjectsCluster::ROOTS); JSObjectsCluster roots(JSObjectsCluster::ROOTS);
ReferencesExtractor extractor(roots, this); ReferencesExtractor extractor(roots, this);
Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG); Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
} }
RetainerHeapProfile::~RetainerHeapProfile() {
delete aggregator_;
}
void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster, void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
HeapObject* ref) { HeapObject* ref) {
JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref); JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
@ -646,18 +677,22 @@ void RetainerHeapProfile::CollectStats(HeapObject* obj) {
} }
void RetainerHeapProfile::CoarseAndAggregate() {
coarser_.Process(&retainers_tree_);
ASSERT(aggregator_ == NULL);
aggregator_ = new RetainerTreeAggregator(&coarser_);
aggregator_->Process(&retainers_tree_);
}
void RetainerHeapProfile::DebugPrintStats( void RetainerHeapProfile::DebugPrintStats(
RetainerHeapProfile::Printer* printer) { RetainerHeapProfile::Printer* printer) {
coarser_.Process(&retainers_tree_);
// Print clusters that have no equivalents, aggregating their retainers. // Print clusters that have no equivalents, aggregating their retainers.
AggregatingRetainerTreePrinter agg_printer(&coarser_, printer); AggregatingRetainerTreePrinter agg_printer(&coarser_, printer);
retainers_tree_.ForEach(&agg_printer); retainers_tree_.ForEach(&agg_printer);
// Now aggregate clusters that have equivalents... // Print clusters that have equivalents.
RetainerTreeAggregator aggregator(&coarser_);
aggregator.Process(&retainers_tree_);
// ...and print them.
SimpleRetainerTreePrinter s_printer(printer); SimpleRetainerTreePrinter s_printer(printer);
aggregator.output_tree().ForEach(&s_printer); aggregator_->output_tree().ForEach(&s_printer);
} }
@ -670,16 +705,6 @@ void RetainerHeapProfile::PrintStats() {
// //
// HeapProfiler class implementation. // HeapProfiler class implementation.
// //
void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
if (!FreeListNode::IsFreeListNode(obj)) {
info[type].increment_number(1);
info[type].increment_bytes(obj->Size());
}
}
static void StackWeakReferenceCallback(Persistent<Value> object, static void StackWeakReferenceCallback(Persistent<Value> object,
void* trace) { void* trace) {
DeleteArray(static_cast<Address*>(trace)); DeleteArray(static_cast<Address*>(trace));
@ -702,46 +727,339 @@ void HeapProfiler::WriteSample() {
LOG(HeapSampleStats( LOG(HeapSampleStats(
"Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects())); "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
HistogramInfo info[LAST_TYPE+1]; AggregatedHeapSnapshot snapshot;
#define DEF_TYPE_NAME(name) info[name].set_name(#name); AggregatedHeapSnapshotGenerator generator(&snapshot);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME) generator.GenerateSnapshot();
#undef DEF_TYPE_NAME
ConstructorHeapProfile js_cons_profile; HistogramInfo* info = snapshot.info();
RetainerHeapProfile js_retainer_profile; for (int i = FIRST_NONSTRING_TYPE;
HeapIterator iterator; i <= AggregatedHeapSnapshotGenerator::kAllStringsType;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { ++i) {
CollectStats(obj, info); if (info[i].bytes() > 0) {
js_cons_profile.CollectStats(obj); LOG(HeapSampleItemEvent(info[i].name(), info[i].number(),
js_retainer_profile.CollectStats(obj); info[i].bytes()));
}
} }
snapshot.js_cons_profile()->PrintStats();
snapshot.js_retainer_profile()->PrintStats();
GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
StackWeakReferenceCallback);
LOG(HeapSampleEndEvent("Heap", "allocated"));
}
AggregatedHeapSnapshot::AggregatedHeapSnapshot()
: info_(NewArray<HistogramInfo>(
AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) {
#define DEF_TYPE_NAME(name) info_[name].set_name(#name);
INSTANCE_TYPE_LIST(DEF_TYPE_NAME);
#undef DEF_TYPE_NAME
info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name(
"STRING_TYPE");
}
AggregatedHeapSnapshot::~AggregatedHeapSnapshot() {
DeleteArray(info_);
}
AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator(
AggregatedHeapSnapshot* agg_snapshot)
: agg_snapshot_(agg_snapshot) {
}
void AggregatedHeapSnapshotGenerator::CalculateStringsStats() {
HistogramInfo* info = agg_snapshot_->info();
HistogramInfo& strings = info[kAllStringsType];
// Lump all the string types together. // Lump all the string types together.
int string_number = 0;
int string_bytes = 0;
#define INCREMENT_SIZE(type, size, name, camel_name) \ #define INCREMENT_SIZE(type, size, name, camel_name) \
string_number += info[type].number(); \ strings.increment_number(info[type].number()); \
string_bytes += info[type].bytes(); strings.increment_bytes(info[type].bytes());
STRING_TYPE_LIST(INCREMENT_SIZE) STRING_TYPE_LIST(INCREMENT_SIZE);
#undef INCREMENT_SIZE #undef INCREMENT_SIZE
if (string_bytes > 0) { }
LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) {
InstanceType type = obj->map()->instance_type();
ASSERT(0 <= type && type <= LAST_TYPE);
if (!FreeListNode::IsFreeListNode(obj)) {
agg_snapshot_->info()[type].increment_number(1);
agg_snapshot_->info()[type].increment_bytes(obj->Size());
} }
}
for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
if (info[i].bytes() > 0) { void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), HeapIterator iterator;
info[i].bytes())); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CollectStats(obj);
agg_snapshot_->js_cons_profile()->CollectStats(obj);
agg_snapshot_->js_retainer_profile()->CollectStats(obj);
} }
CalculateStringsStats();
agg_snapshot_->js_retainer_profile()->CoarseAndAggregate();
}
class CountingConstructorHeapProfileIterator {
public:
CountingConstructorHeapProfileIterator()
: entities_count_(0), children_count_(0) {
} }
js_cons_profile.PrintStats(); void Call(const JSObjectsCluster& cluster,
js_retainer_profile.PrintStats(); const NumberAndSizeInfo& number_and_size) {
++entities_count_;
children_count_ += number_and_size.number();
}
GlobalHandles::IterateWeakRoots(PrintProducerStackTrace, int entities_count() { return entities_count_; }
StackWeakReferenceCallback); int children_count() { return children_count_; }
LOG(HeapSampleEndEvent("Heap", "allocated")); private:
int entities_count_;
int children_count_;
};
static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot,
int* root_child_index,
HeapEntry::Type type,
const char* name,
int count,
int size,
int children_count,
int retainers_count) {
HeapEntry* entry = snapshot->AddEntry(
type, name, count, size, children_count, retainers_count);
ASSERT(entry != NULL);
snapshot->root()->SetUnidirElementReference(*root_child_index,
*root_child_index + 1,
entry);
*root_child_index = *root_child_index + 1;
return entry;
}
class AllocatingConstructorHeapProfileIterator {
public:
AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot,
int* root_child_index)
: snapshot_(snapshot),
root_child_index_(root_child_index) {
}
void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
const char* name = cluster.GetSpecialCaseName();
if (name == NULL) {
name = snapshot_->collection()->GetFunctionName(cluster.constructor());
}
AddEntryFromAggregatedSnapshot(snapshot_,
root_child_index_,
HeapEntry::kObject,
name,
number_and_size.number(),
number_and_size.bytes(),
0,
0);
}
private:
HeapSnapshot* snapshot_;
int* root_child_index_;
};
static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) {
return cluster.can_be_coarsed() ?
reinterpret_cast<HeapObject*>(cluster.instance()) : cluster.constructor();
}
static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
if (object->IsString()) {
return JSObjectsCluster(String::cast(object));
} else {
JSObject* js_obj = JSObject::cast(object);
String* constructor = JSObject::cast(js_obj)->constructor_name();
return JSObjectsCluster(constructor, object);
}
}
class CountingRetainersIterator {
public:
CountingRetainersIterator(const JSObjectsCluster& child_cluster,
HeapEntriesMap* map)
: child_(ClusterAsHeapObject(child_cluster)), map_(map) {
if (map_->Map(child_) == NULL)
map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder);
}
void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
map_->Pair(ClusterAsHeapObject(cluster),
HeapEntriesMap::kHeapEntryPlaceholder);
map_->CountReference(ClusterAsHeapObject(cluster), child_);
}
private:
HeapObject* child_;
HeapEntriesMap* map_;
};
class AllocatingRetainersIterator {
public:
AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
HeapEntriesMap* map)
: child_(ClusterAsHeapObject(child_cluster)), map_(map) {
child_entry_ = map_->Map(child_);
ASSERT(child_entry_ != NULL);
}
void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size) {
int child_index, retainer_index;
map_->CountReference(ClusterAsHeapObject(cluster), child_,
&child_index, &retainer_index);
map_->Map(ClusterAsHeapObject(cluster))->SetElementReference(
child_index, number_and_size.number(), child_entry_, retainer_index);
}
private:
HeapObject* child_;
HeapEntriesMap* map_;
HeapEntry* child_entry_;
};
template<class RetainersIterator>
class AggregatingRetainerTreeIterator {
public:
explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
HeapEntriesMap* map)
: coarser_(coarser), map_(map) {
}
void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
if (coarser_ != NULL &&
!coarser_->GetCoarseEquivalent(cluster).is_null()) return;
JSObjectsClusterTree* tree_to_iterate = tree;
ZoneScope zs(DELETE_ON_EXIT);
JSObjectsClusterTree dest_tree_;
if (coarser_ != NULL) {
RetainersAggregator retainers_aggregator(coarser_, &dest_tree_);
tree->ForEach(&retainers_aggregator);
tree_to_iterate = &dest_tree_;
}
RetainersIterator iterator(cluster, map_);
tree_to_iterate->ForEach(&iterator);
}
private:
ClustersCoarser* coarser_;
HeapEntriesMap* map_;
};
class AggregatedRetainerTreeAllocator {
public:
AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
int* root_child_index)
: snapshot_(snapshot), root_child_index_(root_child_index) {
}
HeapEntry* GetEntry(
HeapObject* obj, int children_count, int retainers_count) {
JSObjectsCluster cluster = HeapObjectAsCluster(obj);
const char* name = cluster.GetSpecialCaseName();
if (name == NULL) {
name = snapshot_->collection()->GetFunctionName(cluster.constructor());
}
return AddEntryFromAggregatedSnapshot(
snapshot_, root_child_index_, HeapEntry::kObject, name,
0, 0, children_count, retainers_count);
}
private:
HeapSnapshot* snapshot_;
int* root_child_index_;
};
template<class Iterator>
void AggregatedHeapSnapshotGenerator::IterateRetainers(
HeapEntriesMap* entries_map) {
RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
p->coarser(), entries_map);
p->retainers_tree()->ForEach(&agg_ret_iter_1);
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map);
p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
}
void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
// Count the number of entities.
int histogram_entities_count = 0;
int histogram_children_count = 0;
int histogram_retainers_count = 0;
for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
if (agg_snapshot_->info()[i].bytes() > 0) {
++histogram_entities_count;
}
}
CountingConstructorHeapProfileIterator counting_cons_iter;
agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
histogram_entities_count += counting_cons_iter.entities_count();
HeapEntriesMap entries_map;
IterateRetainers<CountingRetainersIterator>(&entries_map);
histogram_entities_count += entries_map.entries_count();
histogram_children_count += entries_map.total_children_count();
histogram_retainers_count += entries_map.total_retainers_count();
// Root entry references all other entries.
histogram_children_count += histogram_entities_count;
int root_children_count = histogram_entities_count;
++histogram_entities_count;
// Allocate and fill entries in the snapshot, allocate references.
snapshot->AllocateEntries(histogram_entities_count,
histogram_children_count,
histogram_retainers_count);
snapshot->AddEntry(HeapSnapshot::kInternalRootObject,
root_children_count,
0);
int root_child_index = 0;
for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
if (agg_snapshot_->info()[i].bytes() > 0) {
AddEntryFromAggregatedSnapshot(snapshot,
&root_child_index,
HeapEntry::kInternal,
agg_snapshot_->info()[i].name(),
agg_snapshot_->info()[i].number(),
agg_snapshot_->info()[i].bytes(),
0,
0);
}
}
AllocatingConstructorHeapProfileIterator alloc_cons_iter(
snapshot, &root_child_index);
agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
entries_map.UpdateEntries(&allocator);
// Fill up references.
IterateRetainers<AllocatingRetainersIterator>(&entries_map);
} }

64
deps/v8/src/heap-profiler.h

@ -56,8 +56,8 @@ class HeapProfiler {
static void TearDown(); static void TearDown();
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name); static HeapSnapshot* TakeSnapshot(const char* name, int type);
static HeapSnapshot* TakeSnapshot(String* name); static HeapSnapshot* TakeSnapshot(String* name, int type);
static int GetSnapshotsCount(); static int GetSnapshotsCount();
static HeapSnapshot* GetSnapshot(int index); static HeapSnapshot* GetSnapshot(int index);
static HeapSnapshot* FindSnapshot(unsigned uid); static HeapSnapshot* FindSnapshot(unsigned uid);
@ -75,12 +75,8 @@ class HeapProfiler {
private: private:
HeapProfiler(); HeapProfiler();
~HeapProfiler(); ~HeapProfiler();
HeapSnapshot* TakeSnapshotImpl(const char* name); HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
HeapSnapshot* TakeSnapshotImpl(String* name); HeapSnapshot* TakeSnapshotImpl(String* name, int type);
// Obsolete interface.
// Update the array info with stats from obj.
static void CollectStats(HeapObject* obj, HistogramInfo* info);
HeapSnapshotsCollection* snapshots_; HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_; unsigned next_snapshot_uid_;
@ -132,7 +128,9 @@ class JSObjectsCluster BASE_EMBEDDED {
bool is_null() const { return constructor_ == NULL; } bool is_null() const { return constructor_ == NULL; }
bool can_be_coarsed() const { return instance_ != NULL; } bool can_be_coarsed() const { return instance_ != NULL; }
String* constructor() const { return constructor_; } String* constructor() const { return constructor_; }
Object* instance() const { return instance_; }
const char* GetSpecialCaseName() const;
void Print(StringStream* accumulator) const; void Print(StringStream* accumulator) const;
// Allows null clusters to be printed. // Allows null clusters to be printed.
void DebugPrint(StringStream* accumulator) const; void DebugPrint(StringStream* accumulator) const;
@ -179,6 +177,9 @@ class ConstructorHeapProfile BASE_EMBEDDED {
virtual ~ConstructorHeapProfile() {} virtual ~ConstructorHeapProfile() {}
void CollectStats(HeapObject* obj); void CollectStats(HeapObject* obj);
void PrintStats(); void PrintStats();
template<class Callback>
void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); }
// Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests. // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests.
virtual void Call(const JSObjectsCluster& cluster, virtual void Call(const JSObjectsCluster& cluster,
const NumberAndSizeInfo& number_and_size); const NumberAndSizeInfo& number_and_size);
@ -282,6 +283,8 @@ class ClustersCoarser BASE_EMBEDDED {
// "retainer profile" of JS objects allocated on heap. // "retainer profile" of JS objects allocated on heap.
// It is run during garbage collection cycle, thus it doesn't need // It is run during garbage collection cycle, thus it doesn't need
// to use handles. // to use handles.
class RetainerTreeAggregator;
class RetainerHeapProfile BASE_EMBEDDED { class RetainerHeapProfile BASE_EMBEDDED {
public: public:
class Printer { class Printer {
@ -292,7 +295,14 @@ class RetainerHeapProfile BASE_EMBEDDED {
}; };
RetainerHeapProfile(); RetainerHeapProfile();
~RetainerHeapProfile();
RetainerTreeAggregator* aggregator() { return aggregator_; }
ClustersCoarser* coarser() { return &coarser_; }
JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; }
void CollectStats(HeapObject* obj); void CollectStats(HeapObject* obj);
void CoarseAndAggregate();
void PrintStats(); void PrintStats();
void DebugPrintStats(Printer* printer); void DebugPrintStats(Printer* printer);
void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref); void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref);
@ -301,6 +311,44 @@ class RetainerHeapProfile BASE_EMBEDDED {
ZoneScope zscope_; ZoneScope zscope_;
JSObjectsRetainerTree retainers_tree_; JSObjectsRetainerTree retainers_tree_;
ClustersCoarser coarser_; ClustersCoarser coarser_;
RetainerTreeAggregator* aggregator_;
};
class AggregatedHeapSnapshot {
public:
AggregatedHeapSnapshot();
~AggregatedHeapSnapshot();
HistogramInfo* info() { return info_; }
ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; }
RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; }
private:
HistogramInfo* info_;
ConstructorHeapProfile js_cons_profile_;
RetainerHeapProfile js_retainer_profile_;
};
class HeapEntriesMap;
class HeapSnapshot;
class AggregatedHeapSnapshotGenerator {
public:
explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot);
void GenerateSnapshot();
void FillHeapSnapshot(HeapSnapshot* snapshot);
static const int kAllStringsType = LAST_TYPE + 1;
private:
void CalculateStringsStats();
void CollectStats(HeapObject* obj);
template<class Iterator>
void IterateRetainers(HeapEntriesMap* entries_map);
AggregatedHeapSnapshot* agg_snapshot_;
}; };

209
deps/v8/src/heap.cc

@ -104,6 +104,7 @@ List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL;
HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in // Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap. // ConfigureHeap.
@ -193,6 +194,33 @@ bool Heap::HasBeenSetup() {
} }
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
MapWord map_word = object->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
return object->SizeFromMap(map_word.ToMap());
}
int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
ASSERT(MarkCompactCollector::are_map_pointers_encoded());
uint32_t marker = Memory::uint32_at(object->address());
if (marker == MarkCompactCollector::kSingleFreeEncoding) {
return kIntSize;
} else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
return Memory::int_at(object->address() + kIntSize);
} else {
MapWord map_word = object->map_word();
Address map_address = map_word.DecodeMapAddress(Heap::map_space());
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
return object->SizeFromMap(map);
}
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested? // Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) { if (space != NEW_SPACE || FLAG_gc_global) {
@ -540,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() {
// Committing memory to from space failed. // Committing memory to from space failed.
// Try shrinking and try again. // Try shrinking and try again.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
space->RelinkPageListInChunkOrder(true);
}
Shrink(); Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return; if (new_space_.CommitFromSpaceIfNeeded()) return;
@ -571,6 +606,22 @@ void Heap::ClearJSFunctionResultCaches() {
} }
class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor {
virtual void VisitThread(ThreadLocalTop* top) {
Context* context = top->context_;
if (context == NULL) return;
context->global()->global_context()->normalized_map_cache()->Clear();
}
};
void Heap::ClearNormalizedMapCaches() {
if (Bootstrapper::IsActive()) return;
ClearThreadNormalizedMapCachesVisitor visitor;
ThreadManager::IterateArchivedThreads(&visitor);
}
#ifdef DEBUG #ifdef DEBUG
enum PageWatermarkValidity { enum PageWatermarkValidity {
@ -637,12 +688,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
int start_new_space_size = Heap::new_space()->Size(); int start_new_space_size = Heap::new_space()->Size();
if (collector == MARK_COMPACTOR) { if (collector == MARK_COMPACTOR) {
if (FLAG_flush_code) {
// Flush all potentially unused code.
GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE);
FlushCode();
}
// Perform mark-sweep with optional compaction. // Perform mark-sweep with optional compaction.
MarkCompact(tracer); MarkCompact(tracer);
@ -732,8 +777,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
MarkCompactCollector::CollectGarbage(); MarkCompactCollector::CollectGarbage();
MarkCompactEpilogue(is_compacting);
LOG(ResourceEvent("markcompact", "end")); LOG(ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC; gc_state_ = NOT_IN_GC;
@ -755,18 +798,11 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
CompilationCache::MarkCompactPrologue(); CompilationCache::MarkCompactPrologue();
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
CompletelyClearInstanceofCache(); CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache(); if (is_compacting) FlushNumberStringCache();
}
ClearNormalizedMapCaches();
void Heap::MarkCompactEpilogue(bool is_compacting) {
Top::MarkCompactEpilogue(is_compacting);
ThreadManager::MarkCompactEpilogue(is_compacting);
} }
@ -1100,6 +1136,10 @@ class ScavengingVisitor : public StaticVisitorBase {
&ObjectEvacuationStrategy<POINTER_OBJECT>:: &ObjectEvacuationStrategy<POINTER_OBJECT>::
VisitSpecialized<SharedFunctionInfo::kSize>); VisitSpecialized<SharedFunctionInfo::kSize>);
table_.Register(kVisitJSFunction,
&ObjectEvacuationStrategy<POINTER_OBJECT>::
VisitSpecialized<JSFunction::kSize>);
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject, kVisitDataObject,
kVisitDataObjectGeneric>(); kVisitDataObjectGeneric>();
@ -1415,7 +1455,7 @@ bool Heap::CreateInitialMaps() {
set_meta_map(new_meta_map); set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map); new_meta_map->set_map(new_meta_map);
obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); obj = AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_fixed_array_map(Map::cast(obj)); set_fixed_array_map(Map::cast(obj));
@ -1457,6 +1497,11 @@ bool Heap::CreateInitialMaps() {
oddball_map()->set_prototype(null_value()); oddball_map()->set_prototype(null_value());
oddball_map()->set_constructor(null_value()); oddball_map()->set_constructor(null_value());
obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false;
set_fixed_cow_array_map(Map::cast(obj));
ASSERT(fixed_array_map() != fixed_cow_array_map());
obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize); obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_heap_number_map(Map::cast(obj)); set_heap_number_map(Map::cast(obj));
@ -1472,17 +1517,17 @@ bool Heap::CreateInitialMaps() {
roots_[entry.index] = Map::cast(obj); roots_[entry.index] = Map::cast(obj);
} }
obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize); obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_undetectable_string_map(Map::cast(obj)); set_undetectable_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable(); Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); obj = AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_undetectable_ascii_string_map(Map::cast(obj)); set_undetectable_ascii_string_map(Map::cast(obj));
Map::cast(obj)->set_is_undetectable(); Map::cast(obj)->set_is_undetectable();
obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize); obj = AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_byte_array_map(Map::cast(obj)); set_byte_array_map(Map::cast(obj));
@ -1525,7 +1570,7 @@ bool Heap::CreateInitialMaps() {
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_external_float_array_map(Map::cast(obj)); set_external_float_array_map(Map::cast(obj));
obj = AllocateMap(CODE_TYPE, Code::kHeaderSize); obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_code_map(Map::cast(obj)); set_code_map(Map::cast(obj));
@ -1549,19 +1594,19 @@ bool Heap::CreateInitialMaps() {
roots_[entry.index] = Map::cast(obj); roots_[entry.index] = Map::cast(obj);
} }
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_hash_table_map(Map::cast(obj)); set_hash_table_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_context_map(Map::cast(obj)); set_context_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_catch_context_map(Map::cast(obj)); set_catch_context_map(Map::cast(obj));
obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
if (obj->IsFailure()) return false; if (obj->IsFailure()) return false;
set_global_context_map(Map::cast(obj)); set_global_context_map(Map::cast(obj));
@ -2354,109 +2399,6 @@ Object* Heap::AllocateExternalArray(int length,
} }
// The StackVisitor is used to traverse all the archived threads to see if
// there are activations on any of the stacks corresponding to the code.
class FlushingStackVisitor : public ThreadVisitor {
public:
explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
void VisitThread(ThreadLocalTop* top) {
// If we already found the code in a previous traversed thread we return.
if (found_) return;
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
if (code_->contains(it.frame()->pc())) {
found_ = true;
return;
}
}
}
bool FoundCode() {return found_;}
private:
bool found_;
Code* code_;
};
static bool CodeIsActive(Code* code) {
// Make sure we are not referencing the code from the stack.
for (StackFrameIterator it; !it.done(); it.Advance()) {
if (code->contains(it.frame()->pc())) return true;
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
FlushingStackVisitor threadvisitor(code);
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return true;
return false;
}
static void FlushCodeForFunction(JSFunction* function) {
SharedFunctionInfo* shared_info = function->shared();
// Special handling if the function and shared info objects
// have different code objects.
if (function->code() != shared_info->code()) {
// If the shared function has been flushed but the function has not,
// we flush the function if possible.
if (!shared_info->is_compiled() && function->is_compiled() &&
!CodeIsActive(function->code())) {
function->set_code(shared_info->code());
}
return;
}
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
// We never flush code for Api functions.
if (shared_info->IsApiFunction()) return;
// Only flush code for functions.
if (!shared_info->code()->kind() == Code::FUNCTION) return;
// Function must be lazy compilable.
if (!shared_info->allows_lazy_compilation()) return;
// If this is a full script wrapped in a function we do no flush the code.
if (shared_info->is_toplevel()) return;
// If this function is in the compilation cache we do not flush the code.
if (CompilationCache::HasFunction(shared_info)) return;
// Check stack and archived threads for the code.
if (CodeIsActive(shared_info->code())) return;
// Compute the lazy compilable version of the code.
Code* code = Builtins::builtin(Builtins::LazyCompile);
shared_info->set_code(code);
function->set_code(code);
}
void Heap::FlushCode() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Do not flush code if the debugger is loaded or there are breakpoints.
if (Debug::IsLoaded() || Debug::has_break_points()) return;
#endif
HeapObjectIterator it(old_pointer_space());
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
// The function must have a valid context and not be a builtin.
if (function->unchecked_context()->IsContext() &&
!function->IsBuiltin()) {
FlushCodeForFunction(function);
}
}
}
}
Object* Heap::CreateCode(const CodeDesc& desc, Object* Heap::CreateCode(const CodeDesc& desc,
Code::Flags flags, Code::Flags flags,
Handle<Object> self_reference) { Handle<Object> self_reference) {
@ -2910,7 +2852,9 @@ Object* Heap::CopyJSObject(JSObject* source) {
FixedArray* properties = FixedArray::cast(source->properties()); FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary. // Update elements if necessary.
if (elements->length() > 0) { if (elements->length() > 0) {
Object* elem = CopyFixedArray(elements); Object* elem =
(elements->map() == fixed_cow_array_map()) ?
elements : CopyFixedArray(elements);
if (elem->IsFailure()) return elem; if (elem->IsFailure()) return elem;
JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
} }
@ -4057,8 +4001,8 @@ bool Heap::ConfigureHeapDefault() {
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = 0xDECADE00; *stats->start_marker = HeapStats::kStartMarker;
*stats->end_marker = 0xDECADE01; *stats->end_marker = HeapStats::kEndMarker;
*stats->new_space_size = new_space_.Size(); *stats->new_space_size = new_space_.Size();
*stats->new_space_capacity = new_space_.Capacity(); *stats->new_space_capacity = new_space_.Capacity();
*stats->old_pointer_space_size = old_pointer_space_->Size(); *stats->old_pointer_space_size = old_pointer_space_->Size();
@ -4129,6 +4073,8 @@ bool Heap::Setup(bool create_heap_objects) {
NewSpaceScavenger::Initialize(); NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize(); MarkCompactCollector::Initialize();
MarkMapPointersAsEncoded(false);
// Setup memory allocator and reserve a chunk of memory for new // Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved // space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that // new space size to ensure that we can find a pair of semispaces that
@ -4815,7 +4761,6 @@ GCTracer::~GCTracer() {
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP])); PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE])); PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT])); PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
PrintF("flushcode=%d ", static_cast<int>(scopes_[Scope::MC_FLUSH_CODE]));
PrintF("total_size_before=%d ", start_size_); PrintF("total_size_before=%d ", start_size_);
PrintF("total_size_after=%d ", Heap::SizeOfObjects()); PrintF("total_size_after=%d ", Heap::SizeOfObjects());

195
deps/v8/src/heap.h

@ -30,6 +30,7 @@
#include <math.h> #include <math.h>
#include "spaces.h"
#include "splay-tree-inl.h" #include "splay-tree-inl.h"
#include "v8-counters.h" #include "v8-counters.h"
@ -55,6 +56,7 @@ namespace internal {
V(Map, heap_number_map, HeapNumberMap) \ V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \ V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \ V(Map, fixed_array_map, FixedArrayMap) \
V(Map, fixed_cow_array_map, FixedCOWArrayMap) \
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Map, meta_map, MetaMap) \ V(Map, meta_map, MetaMap) \
V(Object, termination_exception, TerminationException) \ V(Object, termination_exception, TerminationException) \
@ -312,61 +314,64 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateJSObject(JSFunction* constructor, MUST_USE_RESULT static Object* AllocateJSObject(
PretenureFlag pretenure = NOT_TENURED); JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor. // Allocates and initializes a new global object based on a constructor.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateGlobalObject(JSFunction* constructor); MUST_USE_RESULT static Object* AllocateGlobalObject(JSFunction* constructor);
// Returns a deep copy of the JavaScript object. // Returns a deep copy of the JavaScript object.
// Properties and elements are copied too. // Properties and elements are copied too.
// Returns failure if allocation failed. // Returns failure if allocation failed.
static Object* CopyJSObject(JSObject* source); MUST_USE_RESULT static Object* CopyJSObject(JSObject* source);
// Allocates the function prototype. // Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateFunctionPrototype(JSFunction* function); MUST_USE_RESULT static Object* AllocateFunctionPrototype(
JSFunction* function);
// Reinitialize an JSGlobalProxy based on a constructor. The object // Reinitialize an JSGlobalProxy based on a constructor. The object
// must have the same size as objects allocated using the // must have the same size as objects allocated using the
// constructor. The object is reinitialized and behaves as an // constructor. The object is reinitialized and behaves as an
// object that has been freshly allocated using the constructor. // object that has been freshly allocated using the constructor.
static Object* ReinitializeJSGlobalProxy(JSFunction* constructor, MUST_USE_RESULT static Object* ReinitializeJSGlobalProxy(
JSFunction* constructor,
JSGlobalProxy* global); JSGlobalProxy* global);
// Allocates and initializes a new JavaScript object based on a map. // Allocates and initializes a new JavaScript object based on a map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateJSObjectFromMap(Map* map, MUST_USE_RESULT static Object* AllocateJSObjectFromMap(
PretenureFlag pretenure = NOT_TENURED); Map* map, PretenureFlag pretenure = NOT_TENURED);
// Allocates a heap object based on the map. // Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static Object* Allocate(Map* map, AllocationSpace space); MUST_USE_RESULT static Object* Allocate(Map* map, AllocationSpace space);
// Allocates a JS Map in the heap. // Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static Object* AllocateMap(InstanceType instance_type, int instance_size); MUST_USE_RESULT static Object* AllocateMap(InstanceType instance_type,
int instance_size);
// Allocates a partial map for bootstrapping. // Allocates a partial map for bootstrapping.
static Object* AllocatePartialMap(InstanceType instance_type, MUST_USE_RESULT static Object* AllocatePartialMap(InstanceType instance_type,
int instance_size); int instance_size);
// Allocate a map for the specified function // Allocate a map for the specified function
static Object* AllocateInitialMap(JSFunction* fun); MUST_USE_RESULT static Object* AllocateInitialMap(JSFunction* fun);
// Allocates an empty code cache. // Allocates an empty code cache.
static Object* AllocateCodeCache(); MUST_USE_RESULT static Object* AllocateCodeCache();
// Clear the Instanceof cache (used when a prototype changes). // Clear the Instanceof cache (used when a prototype changes).
static void ClearInstanceofCache() { static void ClearInstanceofCache() {
@ -391,13 +396,13 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateStringFromAscii( MUST_USE_RESULT static Object* AllocateStringFromAscii(
Vector<const char> str, Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
static Object* AllocateStringFromUtf8( MUST_USE_RESULT static Object* AllocateStringFromUtf8(
Vector<const char> str, Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
static Object* AllocateStringFromTwoByte( MUST_USE_RESULT static Object* AllocateStringFromTwoByte(
Vector<const uc16> str, Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
@ -405,15 +410,14 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static inline Object* AllocateSymbol(Vector<const char> str, MUST_USE_RESULT static inline Object* AllocateSymbol(Vector<const char> str,
int chars, int chars,
uint32_t hash_field); uint32_t hash_field);
static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer, MUST_USE_RESULT static Object* AllocateInternalSymbol(
int chars, unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
uint32_t hash_field);
static Object* AllocateExternalSymbol(Vector<const char> str, MUST_USE_RESULT static Object* AllocateExternalSymbol(Vector<const char> str,
int chars); int chars);
@ -424,10 +428,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateRawAsciiString( MUST_USE_RESULT static Object* AllocateRawAsciiString(
int length, int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
static Object* AllocateRawTwoByteString( MUST_USE_RESULT static Object* AllocateRawTwoByteString(
int length, int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
@ -435,25 +439,27 @@ class Heap : public AllStatic {
// A cache is used for ascii codes. // A cache is used for ascii codes.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. Please note this does not perform a garbage collection. // failed. Please note this does not perform a garbage collection.
static Object* LookupSingleCharacterStringFromCode(uint16_t code); MUST_USE_RESULT static Object* LookupSingleCharacterStringFromCode(
uint16_t code);
// Allocate a byte array of the specified length // Allocate a byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateByteArray(int length, PretenureFlag pretenure); MUST_USE_RESULT static Object* AllocateByteArray(int length,
PretenureFlag pretenure);
// Allocate a non-tenured byte array of the specified length // Allocate a non-tenured byte array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateByteArray(int length); MUST_USE_RESULT static Object* AllocateByteArray(int length);
// Allocate a pixel array of the specified length // Allocate a pixel array of the specified length
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocatePixelArray(int length, MUST_USE_RESULT static Object* AllocatePixelArray(int length,
uint8_t* external_pointer, uint8_t* external_pointer,
PretenureFlag pretenure); PretenureFlag pretenure);
@ -461,7 +467,8 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateExternalArray(int length, MUST_USE_RESULT static Object* AllocateExternalArray(
int length,
ExternalArrayType array_type, ExternalArrayType array_type,
void* external_pointer, void* external_pointer,
PretenureFlag pretenure); PretenureFlag pretenure);
@ -470,59 +477,62 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateJSGlobalPropertyCell(Object* value); MUST_USE_RESULT static Object* AllocateJSGlobalPropertyCell(Object* value);
// Allocates a fixed array initialized with undefined values // Allocates a fixed array initialized with undefined values
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateFixedArray(int length, PretenureFlag pretenure); MUST_USE_RESULT static Object* AllocateFixedArray(int length,
PretenureFlag pretenure);
// Allocates a fixed array initialized with undefined values // Allocates a fixed array initialized with undefined values
static Object* AllocateFixedArray(int length); MUST_USE_RESULT static Object* AllocateFixedArray(int length);
// Allocates an uninitialized fixed array. It must be filled by the caller. // Allocates an uninitialized fixed array. It must be filled by the caller.
// //
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateUninitializedFixedArray(int length); MUST_USE_RESULT static Object* AllocateUninitializedFixedArray(int length);
// Make a copy of src and return it. Returns // Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
static Object* CopyFixedArray(FixedArray* src); MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src);
// Allocates a fixed array initialized with the hole values. // Allocates a fixed array initialized with the hole values.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateFixedArrayWithHoles( MUST_USE_RESULT static Object* AllocateFixedArrayWithHoles(
int length, int length,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
// AllocateHashTable is identical to AllocateFixedArray except // AllocateHashTable is identical to AllocateFixedArray except
// that the resulting object has hash_table_map as map. // that the resulting object has hash_table_map as map.
static Object* AllocateHashTable(int length, MUST_USE_RESULT static Object* AllocateHashTable(
PretenureFlag pretenure = NOT_TENURED); int length, PretenureFlag pretenure = NOT_TENURED);
// Allocate a global (but otherwise uninitialized) context. // Allocate a global (but otherwise uninitialized) context.
static Object* AllocateGlobalContext(); MUST_USE_RESULT static Object* AllocateGlobalContext();
// Allocate a function context. // Allocate a function context.
static Object* AllocateFunctionContext(int length, JSFunction* closure); MUST_USE_RESULT static Object* AllocateFunctionContext(int length,
JSFunction* closure);
// Allocate a 'with' context. // Allocate a 'with' context.
static Object* AllocateWithContext(Context* previous, MUST_USE_RESULT static Object* AllocateWithContext(Context* previous,
JSObject* extension, JSObject* extension,
bool is_catch_context); bool is_catch_context);
// Allocates a new utility object in the old generation. // Allocates a new utility object in the old generation.
static Object* AllocateStruct(InstanceType type); MUST_USE_RESULT static Object* AllocateStruct(InstanceType type);
// Allocates a function initialized with a shared part. // Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateFunction(Map* function_map, MUST_USE_RESULT static Object* AllocateFunction(
Map* function_map,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
Object* prototype, Object* prototype,
PretenureFlag pretenure = TENURED); PretenureFlag pretenure = TENURED);
@ -537,47 +547,52 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateArgumentsObject(Object* callee, int length); MUST_USE_RESULT static Object* AllocateArgumentsObject(Object* callee,
int length);
// Same as NewNumberFromDouble, but may return a preallocated/immutable // Same as NewNumberFromDouble, but may return a preallocated/immutable
// number object (e.g., minus_zero_value_, nan_value_) // number object (e.g., minus_zero_value_, nan_value_)
static Object* NumberFromDouble(double value, MUST_USE_RESULT static Object* NumberFromDouble(
PretenureFlag pretenure = NOT_TENURED); double value, PretenureFlag pretenure = NOT_TENURED);
// Allocated a HeapNumber from value. // Allocated a HeapNumber from value.
static Object* AllocateHeapNumber(double value, PretenureFlag pretenure); MUST_USE_RESULT static Object* AllocateHeapNumber(double value,
static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED PretenureFlag pretenure);
// pretenure = NOT_TENURED.
MUST_USE_RESULT static Object* AllocateHeapNumber(double value);
// Converts an int into either a Smi or a HeapNumber object. // Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static inline Object* NumberFromInt32(int32_t value); MUST_USE_RESULT static inline Object* NumberFromInt32(int32_t value);
// Converts an int into either a Smi or a HeapNumber object. // Converts an int into either a Smi or a HeapNumber object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static inline Object* NumberFromUint32(uint32_t value); MUST_USE_RESULT static inline Object* NumberFromUint32(uint32_t value);
// Allocates a new proxy object. // Allocates a new proxy object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateProxy(Address proxy, MUST_USE_RESULT static Object* AllocateProxy(
Address proxy,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
// Allocates a new SharedFunctionInfo object. // Allocates a new SharedFunctionInfo object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateSharedFunctionInfo(Object* name); MUST_USE_RESULT static Object* AllocateSharedFunctionInfo(Object* name);
// Allocates a new cons string object. // Allocates a new cons string object.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateConsString(String* first, String* second); MUST_USE_RESULT static Object* AllocateConsString(String* first,
String* second);
// Allocates a new sub string object which is a substring of an underlying // Allocates a new sub string object which is a substring of an underlying
// string buffer stretching from the index start (inclusive) to the index // string buffer stretching from the index start (inclusive) to the index
@ -585,7 +600,8 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateSubString(String* buffer, MUST_USE_RESULT static Object* AllocateSubString(
String* buffer,
int start, int start,
int end, int end,
PretenureFlag pretenure = NOT_TENURED); PretenureFlag pretenure = NOT_TENURED);
@ -595,9 +611,9 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateExternalStringFromAscii( MUST_USE_RESULT static Object* AllocateExternalStringFromAscii(
ExternalAsciiString::Resource* resource); ExternalAsciiString::Resource* resource);
static Object* AllocateExternalStringFromTwoByte( MUST_USE_RESULT static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource); ExternalTwoByteString::Resource* resource);
// Finalizes an external string by deleting the associated external // Finalizes an external string by deleting the associated external
@ -609,7 +625,8 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static inline Object* AllocateRaw(int size_in_bytes, MUST_USE_RESULT static inline Object* AllocateRaw(
int size_in_bytes,
AllocationSpace space, AllocationSpace space,
AllocationSpace retry_space); AllocationSpace retry_space);
@ -623,26 +640,26 @@ class Heap : public AllStatic {
// self_reference. This allows generated code to reference its own Code // self_reference. This allows generated code to reference its own Code
// object by containing this pointer. // object by containing this pointer.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static Object* CreateCode(const CodeDesc& desc, MUST_USE_RESULT static Object* CreateCode(const CodeDesc& desc,
Code::Flags flags, Code::Flags flags,
Handle<Object> self_reference); Handle<Object> self_reference);
static Object* CopyCode(Code* code); MUST_USE_RESULT static Object* CopyCode(Code* code);
// Copy the code and scope info part of the code object, but insert // Copy the code and scope info part of the code object, but insert
// the provided data as the relocation information. // the provided data as the relocation information.
static Object* CopyCode(Code* code, Vector<byte> reloc_info); MUST_USE_RESULT static Object* CopyCode(Code* code, Vector<byte> reloc_info);
// Finds the symbol for string in the symbol table. // Finds the symbol for string in the symbol table.
// If not found, a new symbol is added to the table and returned. // If not found, a new symbol is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static Object* LookupSymbol(Vector<const char> str); MUST_USE_RESULT static Object* LookupSymbol(Vector<const char> str);
static Object* LookupAsciiSymbol(const char* str) { MUST_USE_RESULT static Object* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str)); return LookupSymbol(CStrVector(str));
} }
static Object* LookupSymbol(String* str); MUST_USE_RESULT static Object* LookupSymbol(String* str);
static bool LookupSymbolIfExists(String* str, String** symbol); static bool LookupSymbolIfExists(String* str, String** symbol);
static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol); static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@ -657,7 +674,7 @@ class Heap : public AllStatic {
// string might stay non-flat even when not a failure is returned. // string might stay non-flat even when not a failure is returned.
// //
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static inline Object* PrepareForCompare(String* str); MUST_USE_RESULT static inline Object* PrepareForCompare(String* str);
// Converts the given boolean condition to JavaScript boolean value. // Converts the given boolean condition to JavaScript boolean value.
static Object* ToBoolean(bool condition) { static Object* ToBoolean(bool condition) {
@ -817,6 +834,13 @@ class Heap : public AllStatic {
roots_[kCodeStubsRootIndex] = value; roots_[kCodeStubsRootIndex] = value;
} }
// Support for computing object sizes for old objects during GCs. Returns
// a function that is guaranteed to be safe for computing object sizes in
// the current GC phase.
static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
return gc_safe_size_of_old_object_;
}
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary). // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void public_set_non_monomorphic_cache(NumberDictionary* value) { static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value; roots_[kNonMonomorphicCacheRootIndex] = value;
@ -856,8 +880,10 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this function does not perform a garbage collection. // Please note this function does not perform a garbage collection.
static Object* CreateSymbol(const char* str, int length, int hash); MUST_USE_RESULT static Object* CreateSymbol(const char* str,
static Object* CreateSymbol(String* str); int length,
int hash);
MUST_USE_RESULT static Object* CreateSymbol(String* str);
// Write barrier support for address[offset] = o. // Write barrier support for address[offset] = o.
static inline void RecordWrite(Address address, int offset); static inline void RecordWrite(Address address, int offset);
@ -929,8 +955,8 @@ class Heap : public AllStatic {
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
// Allocate uninitialized fixed array. // Allocate uninitialized fixed array.
static Object* AllocateRawFixedArray(int length); MUST_USE_RESULT static Object* AllocateRawFixedArray(int length);
static Object* AllocateRawFixedArray(int length, MUST_USE_RESULT static Object* AllocateRawFixedArray(int length,
PretenureFlag pretenure); PretenureFlag pretenure);
// True if we have reached the allocation limit in the old generation that // True if we have reached the allocation limit in the old generation that
@ -974,7 +1000,8 @@ class Heap : public AllStatic {
kRootListLength kRootListLength
}; };
static Object* NumberToString(Object* number, MUST_USE_RESULT static Object* NumberToString(
Object* number,
bool check_number_string_cache = true); bool check_number_string_cache = true);
static Map* MapForExternalArrayType(ExternalArrayType array_type); static Map* MapForExternalArrayType(ExternalArrayType array_type);
@ -1020,6 +1047,8 @@ class Heap : public AllStatic {
static void ClearJSFunctionResultCaches(); static void ClearJSFunctionResultCaches();
static void ClearNormalizedMapCaches();
static GCTracer* tracer() { return tracer_; } static GCTracer* tracer() { return tracer_; }
private: private:
@ -1168,6 +1197,18 @@ class Heap : public AllStatic {
static GCCallback global_gc_prologue_callback_; static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_; static GCCallback global_gc_epilogue_callback_;
// Support for computing object sizes during GC.
static HeapObjectCallback gc_safe_size_of_old_object_;
static int GcSafeSizeOfOldObject(HeapObject* object);
static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
// Update the GC state. Called from the mark-compact collector.
static void MarkMapPointersAsEncoded(bool encoded) {
gc_safe_size_of_old_object_ = encoded
? &GcSafeSizeOfOldObjectWithEncodedMap
: &GcSafeSizeOfOldObject;
}
// Checks whether a global GC is necessary // Checks whether a global GC is necessary
static GarbageCollector SelectGarbageCollector(AllocationSpace space); static GarbageCollector SelectGarbageCollector(AllocationSpace space);
@ -1180,10 +1221,10 @@ class Heap : public AllStatic {
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
// have to test the allocation space argument and (b) can reduce code size // have to test the allocation space argument and (b) can reduce code size
// (since both AllocateRaw and AllocateRawMap are inlined). // (since both AllocateRaw and AllocateRawMap are inlined).
static inline Object* AllocateRawMap(); MUST_USE_RESULT static inline Object* AllocateRawMap();
// Allocate an uninitialized object in the global property cell space. // Allocate an uninitialized object in the global property cell space.
static inline Object* AllocateRawCell(); MUST_USE_RESULT static inline Object* AllocateRawCell();
// Initializes a JSObject based on its map. // Initializes a JSObject based on its map.
static void InitializeJSObjectFromMap(JSObject* obj, static void InitializeJSObjectFromMap(JSObject* obj,
@ -1221,7 +1262,6 @@ class Heap : public AllStatic {
// Code to be run before and after mark-compact. // Code to be run before and after mark-compact.
static void MarkCompactPrologue(bool is_compacting); static void MarkCompactPrologue(bool is_compacting);
static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive // Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC). // around a GC).
@ -1245,7 +1285,8 @@ class Heap : public AllStatic {
// other parts of the VM could use it. Specifically, a function that creates // other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function. // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static inline Object* InitializeFunction(JSFunction* function, MUST_USE_RESULT static inline Object* InitializeFunction(
JSFunction* function,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
Object* prototype); Object* prototype);
@ -1257,10 +1298,6 @@ class Heap : public AllStatic {
// Flush the number to string cache. // Flush the number to string cache.
static void FlushNumberStringCache(); static void FlushNumberStringCache();
// Flush code from functions we do not expect to use again. The code will
// be replaced with a lazy compilable version.
static void FlushCode();
static void UpdateSurvivalRateTrend(int start_new_space_size); static void UpdateSurvivalRateTrend(int start_new_space_size);
enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING }; enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
@ -1317,11 +1354,15 @@ class Heap : public AllStatic {
friend class DisallowAllocationFailure; friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope; friend class AlwaysAllocateScope;
friend class LinearAllocationScope; friend class LinearAllocationScope;
friend class MarkCompactCollector;
}; };
class HeapStats { class HeapStats {
public: public:
static const int kStartMarker = 0xDECADE00;
static const int kEndMarker = 0xDECADE01;
int* start_marker; // 0 int* start_marker; // 0
int* new_space_size; // 1 int* new_space_size; // 1
int* new_space_capacity; // 2 int* new_space_capacity; // 2
@ -1861,7 +1902,7 @@ class TranscendentalCache {
// Returns a heap number with f(input), where f is a math function specified // Returns a heap number with f(input), where f is a math function specified
// by the 'type' argument. // by the 'type' argument.
static inline Object* Get(Type type, double input) { MUST_USE_RESULT static inline Object* Get(Type type, double input) {
TranscendentalCache* cache = caches_[type]; TranscendentalCache* cache = caches_[type];
if (cache == NULL) { if (cache == NULL) {
caches_[type] = cache = new TranscendentalCache(type); caches_[type] = cache = new TranscendentalCache(type);
@ -1874,7 +1915,7 @@ class TranscendentalCache {
static void Clear(); static void Clear();
private: private:
inline Object* Get(double input) { MUST_USE_RESULT inline Object* Get(double input) {
Converter c; Converter c;
c.dbl = input; c.dbl = input;
int hash = Hash(c); int hash = Hash(c);

148
deps/v8/src/ia32/builtins-ia32.cc

@ -29,6 +29,7 @@
#if defined(V8_TARGET_ARCH_IA32) #if defined(V8_TARGET_ARCH_IA32)
#include "code-stubs.h"
#include "codegen-inl.h" #include "codegen-inl.h"
namespace v8 { namespace v8 {
@ -95,10 +96,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// edi: called object // edi: called object
// eax: number of arguments // eax: number of arguments
__ bind(&non_function_call); __ bind(&non_function_call);
// CALL_NON_FUNCTION expects the non-function constructor as receiver
// (instead of the original receiver from the call site). The receiver is
// stack element argc+1.
__ mov(Operand(esp, eax, times_4, kPointerSize), edi);
// Set expected number of arguments to zero (not changing eax). // Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0)); __ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@ -567,9 +564,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, __ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx); __ SmiUntag(ebx);
__ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx)); __ cmp(eax, Operand(ebx));
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline))); __ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
@ -700,17 +696,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
} }
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
// Load the global context.
__ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
// Load the Array function from the global context.
__ mov(result,
Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
}
// Number of empty elements to allocate for an empty array. // Number of empty elements to allocate for an empty array.
static const int kPreallocatedArrayElements = 4; static const int kPreallocatedArrayElements = 4;
@ -1100,7 +1085,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
Label generic_array_code; Label generic_array_code;
// Get the Array function. // Get the Array function.
GenerateLoadArrayFunction(masm, edi); __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map. // Initial map for the builtin Array function shoud be a map.
@ -1136,7 +1121,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) { if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which // The array construct code is only set for the builtin Array function which
// does always have a map. // does always have a map.
GenerateLoadArrayFunction(masm, ebx); __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx);
__ cmp(edi, Operand(ebx)); __ cmp(edi, Operand(ebx));
__ Assert(equal, "Unexpected Array function"); __ Assert(equal, "Unexpected Array function");
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
@ -1160,6 +1145,131 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
} }
void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
// -- esp[0] : return address
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
__ IncrementCounter(&Counters::string_ctor_calls, 1);
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
__ cmp(edi, Operand(ecx));
__ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
// (including the receiver).
Label no_arguments;
__ test(eax, Operand(eax));
__ j(zero, &no_arguments);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ pop(ecx);
__ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
__ push(ecx);
__ mov(eax, ebx);
// Lookup the argument in the number to string cache.
Label not_cached, argument_is_string;
NumberToStringStub::GenerateLookupNumberStringCache(
masm,
eax, // Input.
ebx, // Result.
ecx, // Scratch 1.
edx, // Scratch 2.
false, // Input is known to be smi?
&not_cached);
__ IncrementCounter(&Counters::string_ctor_cached_number, 1);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
// -- ebx : argument converted to string
// -- edi : constructor function
// -- esp[0] : return address
// -----------------------------------
// Allocate a JSValue and put the tagged pointer into eax.
Label gc_required;
__ AllocateInNewSpace(JSValue::kSize,
eax, // Result.
ecx, // New allocation top (we ignore it).
no_reg,
&gc_required,
TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(edi, ecx);
if (FLAG_debug_code) {
__ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
JSValue::kSize >> kPointerSizeLog2);
__ Assert(equal, "Unexpected string wrapper instance size");
__ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
__ Assert(equal, "Unexpected unused properties of string wrapper");
}
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
// Set properties and elements.
__ Set(ecx, Immediate(Factory::empty_fixed_array()));
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
// Set the value.
__ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
// Ensure the object is fully initialized.
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
// We're done. Return.
__ ret(0);
// The argument was not found in the number to string cache. Check
// if it's a string already before calling the conversion builtin.
Label convert_argument;
__ bind(&not_cached);
STATIC_ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &convert_argument);
Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
__ j(NegateCondition(is_string), &convert_argument);
__ mov(ebx, eax);
__ IncrementCounter(&Counters::string_ctor_string_value, 1);
__ jmp(&argument_is_string);
// Invoke the conversion builtin and put the result into ebx.
__ bind(&convert_argument);
__ IncrementCounter(&Counters::string_ctor_conversions, 1);
__ EnterInternalFrame();
__ push(edi); // Preserve the function.
__ push(eax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ pop(edi);
__ LeaveInternalFrame();
__ mov(ebx, eax);
__ jmp(&argument_is_string);
// Load the empty string into ebx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
__ Set(ebx, Immediate(Factory::empty_string()));
__ pop(ecx);
__ lea(esp, Operand(esp, kPointerSize));
__ push(ecx);
__ jmp(&argument_is_string);
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(&Counters::string_ctor_gc_required, 1);
__ EnterInternalFrame();
__ push(ebx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
__ LeaveInternalFrame();
__ ret(0);
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp); __ push(ebp);
__ mov(ebp, Operand(esp)); __ mov(ebp, Operand(esp));

4615
deps/v8/src/ia32/code-stubs-ia32.cc

File diff suppressed because it is too large

376
deps/v8/src/ia32/code-stubs-ia32.h

@ -0,0 +1,376 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODE_STUBS_IA32_H_
#define V8_IA32_CODE_STUBS_IA32_H_
#include "macro-assembler.h"
#include "code-stubs.h"
#include "ic-inl.h"
namespace v8 {
namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm);
};
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags,
TypeInfo operands_type)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) {
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
}
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
flags_(FlagBits::decode(key)),
args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(TypeInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))),
runtime_operands_type_(runtime_operands_type),
name_(NULL) {
}
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
bool ArgsInRegistersSupported() {
return op_ == Token::ADD || op_ == Token::SUB
|| op_ == Token::MUL || op_ == Token::DIV;
}
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
// Number type information of operands, determined by code generator.
TypeInfo static_operands_type_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub %d (op %s), "
"(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
static_operands_type_.ToString());
}
#endif
// Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class StaticTypeInfoBits: public BitField<int, 13, 3> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
| StaticTypeInfoBits::encode(
static_operands_type_.ThreeBitRepresentation())
| RuntimeTypeInfoBits::encode(runtime_operands_type_);
}
void Generate(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
bool HasArgsReversed() { return args_reversed_; }
bool ShouldGenerateSmiCode() {
return HasSmiCodeInStub() &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
friend class CodeGenerator;
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
// overhead. Copying of overlapping regions is not supported.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying characters using the rep movs instruction.
// Copies ecx characters from esi to edi. Copying of overlapping regions is
// not supported.
static void GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest, // Must be edi.
Register src, // Must be esi.
Register count, // Must be ecx.
Register scratch, // Neither of above.
bool ascii);
// Probe the symbol table for a two character string. If the string
// requires non-standard hashing a jump to the label not_probed is
// performed and registers c1 and c2 are preserved. In all other
// cases they are clobbered. If the string is not found by probing a
// jump to the label not_found is performed. This jump does not
// guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in
// register eax.
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_probed,
Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
// Omit left string check in stub (left is definitely a string).
NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
// Omit right string check in stub (right is definitely a string).
NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
void GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* slow);
const StringAddFlags flags_;
};
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {
}
// Compare two flat ascii strings and returns result in eax after popping two
// arguments from the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
} } // namespace v8::internal
#endif // V8_IA32_CODE_STUBS_IA32_H_

4805
deps/v8/src/ia32/codegen-ia32.cc

File diff suppressed because it is too large

338
deps/v8/src/ia32/codegen-ia32.h

@ -574,6 +574,11 @@ class CodeGenerator: public AstVisitor {
void Int32BinaryOperation(BinaryOperation* node); void Int32BinaryOperation(BinaryOperation* node);
// Generate a stub call from the virtual frame.
Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
Result* left,
Result* right);
void Comparison(AstNode* node, void Comparison(AstNode* node,
Condition cc, Condition cc,
bool strict, bool strict,
@ -627,9 +632,6 @@ class CodeGenerator: public AstVisitor {
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name); static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node); bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
void ProcessDeclarations(ZoneList<Declaration*>* declarations); void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@ -699,8 +701,14 @@ class CodeGenerator: public AstVisitor {
// Support for direct calls from JavaScript to native RegExp code. // Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args); void GenerateRegExpExec(ZoneList<Expression*>* args);
// Construct a RegExp exec result with two in-object properties.
void GenerateRegExpConstructResult(ZoneList<Expression*>* args); void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Clone the result of a regexp function.
// Must be an object created by GenerateRegExpConstructResult with
// no extra properties.
void GenerateRegExpCloneResult(ZoneList<Expression*>* args);
// Support for fast native caches. // Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args); void GenerateGetFromCache(ZoneList<Expression*>* args);
@ -724,6 +732,9 @@ class CodeGenerator: public AstVisitor {
// Check whether two RegExps are equivalent // Check whether two RegExps are equivalent
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args); void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
// Simple condition analysis. // Simple condition analysis.
enum ConditionAnalysis { enum ConditionAnalysis {
ALWAYS_TRUE, ALWAYS_TRUE,
@ -797,327 +808,6 @@ class CodeGenerator: public AstVisitor {
}; };
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction();
void GenerateOperation(MacroAssembler* masm);
};
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return ToBoolean; }
int MinorKey() { return 0; }
};
// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
enum GenericBinaryFlags {
NO_GENERIC_BINARY_FLAGS = 0,
NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
};
class GenericBinaryOpStub: public CodeStub {
public:
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
GenericBinaryFlags flags,
TypeInfo operands_type)
: op_(op),
mode_(mode),
flags_(flags),
args_in_registers_(false),
args_reversed_(false),
static_operands_type_(operands_type),
runtime_operands_type_(BinaryOpIC::DEFAULT),
name_(NULL) {
if (static_operands_type_.IsSmi()) {
mode_ = NO_OVERWRITE;
}
use_sse3_ = CpuFeatures::IsSupported(SSE3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
flags_(FlagBits::decode(key)),
args_in_registers_(ArgsInRegistersBits::decode(key)),
args_reversed_(ArgsReversedBits::decode(key)),
use_sse3_(SSE3Bits::decode(key)),
static_operands_type_(TypeInfo::ExpandedRepresentation(
StaticTypeInfoBits::decode(key))),
runtime_operands_type_(runtime_operands_type),
name_(NULL) {
}
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
Result GenerateCall(MacroAssembler* masm,
VirtualFrame* frame,
Result* left,
Result* right);
private:
Token::Value op_;
OverwriteMode mode_;
GenericBinaryFlags flags_;
bool args_in_registers_; // Arguments passed in registers not on the stack.
bool args_reversed_; // Left and right argument are swapped.
bool use_sse3_;
// Number type information of operands, determined by code generator.
TypeInfo static_operands_type_;
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("GenericBinaryOpStub %d (op %s), "
"(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
static_cast<int>(flags_),
static_cast<int>(args_in_registers_),
static_cast<int>(args_reversed_),
static_operands_type_.ToString());
}
#endif
// Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class SSE3Bits: public BitField<bool, 9, 1> {};
class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
class ArgsReversedBits: public BitField<bool, 11, 1> {};
class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
class StaticTypeInfoBits: public BitField<int, 13, 3> {};
class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| FlagBits::encode(flags_)
| SSE3Bits::encode(use_sse3_)
| ArgsInRegistersBits::encode(args_in_registers_)
| ArgsReversedBits::encode(args_reversed_)
| StaticTypeInfoBits::encode(
static_operands_type_.ThreeBitRepresentation())
| RuntimeTypeInfoBits::encode(runtime_operands_type_);
}
void Generate(MacroAssembler* masm);
void GenerateSmiCode(MacroAssembler* masm, Label* slow);
void GenerateLoadArguments(MacroAssembler* masm);
void GenerateReturn(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
void GenerateTypeTransition(MacroAssembler* masm);
bool ArgsInRegistersSupported() {
return op_ == Token::ADD || op_ == Token::SUB
|| op_ == Token::MUL || op_ == Token::DIV;
}
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
void SetArgsInRegisters() { args_in_registers_ = true; }
void SetArgsReversed() { args_reversed_ = true; }
bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
bool HasArgsInRegisters() { return args_in_registers_; }
bool HasArgsReversed() { return args_reversed_; }
bool ShouldGenerateSmiCode() {
return HasSmiCodeInStub() &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
}
};
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
// overhead. Copying of overlapping regions is not supported.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
bool ascii);
// Generate code for copying characters using the rep movs instruction.
// Copies ecx characters from esi to edi. Copying of overlapping regions is
// not supported.
static void GenerateCopyCharactersREP(MacroAssembler* masm,
Register dest, // Must be edi.
Register src, // Must be esi.
Register count, // Must be ecx.
Register scratch, // Neither of above.
bool ascii);
// Probe the symbol table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
// does not guarantee that the string is not in the symbol table. If the
// string is found the code falls through with the string in register eax.
static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
Register scratch2,
Register scratch3,
Label* not_found);
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character,
Register scratch);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
};
class SubStringStub: public CodeStub {
public:
SubStringStub() {}
private:
Major MajorKey() { return SubString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public CodeStub {
public:
explicit StringCompareStub() {
}
// Compare two flat ascii strings and returns result in eax after popping two
// arguments from the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class NumberToStringStub: public CodeStub {
public:
NumberToStringStub() { }
// Generate code to do a lookup in the number string cache. If the number in
// the register object is found in the cache the generated code falls through
// with the result in the result register. The object and the result register
// can be the same. If the number is not found in the cache the code jumps to
// the label not_found with only the content of register object unchanged.
static void GenerateLookupNumberStringCache(MacroAssembler* masm,
Register object,
Register result,
Register scratch1,
Register scratch2,
bool object_is_smi,
Label* not_found);
private:
Major MajorKey() { return NumberToString; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "NumberToStringStub"; }
#ifdef DEBUG
void Print() {
PrintF("NumberToStringStub\n");
}
#endif
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_ #endif // V8_IA32_CODEGEN_IA32_H_

87
deps/v8/src/ia32/debug-ia32.cc

@ -94,22 +94,33 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() {
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList pointer_regs, RegList object_regs,
RegList non_object_regs,
bool convert_call_to_jmp) { bool convert_call_to_jmp) {
// Save the content of all general purpose registers in memory. This copy in
// memory is later pushed onto the JS expression stack for the fake JS frame
// generated and also to the C frame generated on top of that. In the JS
// frame ONLY the registers containing pointers will be pushed on the
// expression stack. This causes the GC to update these pointers so that
// they will have the correct value when returning from the debugger.
__ SaveRegistersToMemory(kJSCallerSaved);
// Enter an internal frame. // Enter an internal frame.
__ EnterInternalFrame(); __ EnterInternalFrame();
// Store the registers containing object pointers on the expression stack to // Store the registers containing live values on the expression stack to
// make sure that these are correctly updated during GC. // make sure that these are correctly updated during GC. Non object values
__ PushRegistersFromMemory(pointer_regs); // are stored as a smi causing it to be untouched by GC.
ASSERT((object_regs & ~kJSCallerSaved) == 0);
ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
ASSERT((object_regs & non_object_regs) == 0);
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if ((object_regs & (1 << r)) != 0) {
__ push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
if (FLAG_debug_code) {
__ test(reg, Immediate(0xc0000000));
__ Assert(zero, "Unable to encode value as smi");
}
__ SmiTag(reg);
__ push(reg);
}
}
#ifdef DEBUG #ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over"); __ RecordComment("// Calling from debug break to runtime - come in - over");
@ -117,12 +128,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(eax, Immediate(0)); // no arguments __ Set(eax, Immediate(0)); // no arguments
__ mov(ebx, Immediate(ExternalReference::debug_break())); __ mov(ebx, Immediate(ExternalReference::debug_break()));
CEntryStub ceb(1, ExitFrame::MODE_DEBUG); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
// Restore the register values containing object pointers from the expression // Restore the register values containing object pointers from the expression
// stack in the reverse order as they where pushed. // stack.
__ PopRegistersToMemory(pointer_regs); for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
Register reg = { r };
if (FLAG_debug_code) {
__ Set(reg, Immediate(kDebugZapValue));
}
if ((object_regs & (1 << r)) != 0) {
__ pop(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ pop(reg);
__ SmiUntag(reg);
}
}
// Get rid of the internal frame. // Get rid of the internal frame.
__ LeaveInternalFrame(); __ LeaveInternalFrame();
@ -130,12 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will // If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that. // be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) { if (convert_call_to_jmp) {
__ pop(eax); __ add(Operand(esp), Immediate(kPointerSize));
} }
// Finally restore all registers.
__ RestoreRegistersFromMemory(kJSCallerSaved);
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was // jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX. // overwritten by the address of DebugBreakXXX.
@ -151,7 +172,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// -- eax : receiver // -- eax : receiver
// -- ecx : name // -- ecx : name
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false); Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false);
} }
@ -162,7 +183,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : name // -- ecx : name
// -- edx : receiver // -- edx : receiver
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false); Generate_DebugBreakCallHelper(
masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
} }
@ -172,7 +194,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
// -- edx : receiver // -- edx : receiver
// -- eax : key // -- eax : key
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false); Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false);
} }
@ -183,19 +205,17 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// -- ecx : key // -- ecx : key
// -- edx : receiver // -- edx : receiver
// ----------------------------------- // -----------------------------------
// Register eax contains an object that needs to be pushed on the Generate_DebugBreakCallHelper(
// expression stack of the fake JS frame. masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false);
} }
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
// Register state for keyed IC call call (from ic-ia32.cc) // Register state for keyed IC call call (from ic-ia32.cc)
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax: number of arguments // -- ecx: name
// ----------------------------------- // -----------------------------------
// The number of arguments in eax is not smi encoded. Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false);
Generate_DebugBreakCallHelper(masm, 0, false);
} }
@ -204,10 +224,11 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
// eax is the actual number of arguments not encoded as a smi see comment // eax is the actual number of arguments not encoded as a smi see comment
// above IC call. // above IC call.
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax: number of arguments // -- eax: number of arguments (not smi)
// -- edi: constructor function
// ----------------------------------- // -----------------------------------
// The number of arguments in eax is not smi encoded. // The number of arguments in eax is not smi encoded.
Generate_DebugBreakCallHelper(masm, 0, false); Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false);
} }
@ -216,7 +237,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax: return value // -- eax: return value
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, eax.bit(), true); Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
} }
@ -225,7 +246,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// No registers used on entry. // No registers used on entry.
// ----------------------------------- // -----------------------------------
Generate_DebugBreakCallHelper(masm, 0, false); Generate_DebugBreakCallHelper(masm, 0, 0, false);
} }
@ -245,7 +266,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
// In the places where a debug break slot is inserted no registers can contain // In the places where a debug break slot is inserted no registers can contain
// object pointers. // object pointers.
Generate_DebugBreakCallHelper(masm, 0, true); Generate_DebugBreakCallHelper(masm, 0, 0, true);
} }

64
deps/v8/src/ia32/frames-ia32.cc

@ -35,21 +35,6 @@ namespace v8 {
namespace internal { namespace internal {
StackFrame::Type StackFrame::ComputeType(State* state) {
ASSERT(state->fp != NULL);
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
return ARGUMENTS_ADAPTOR;
}
// The marker and function offsets overlap. If the marker isn't a
// smi then the frame is a JavaScript frame -- and the marker is
// really the function.
const int offset = StandardFrameConstants::kMarkerOffset;
Object* marker = Memory::Object_at(state->fp + offset);
if (!marker->IsSmi()) return JAVA_SCRIPT;
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE; if (fp == 0) return NONE;
// Compute the stack pointer. // Compute the stack pointer.
@ -58,58 +43,11 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
state->fp = fp; state->fp = fp;
state->sp = sp; state->sp = sp;
state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
ASSERT(*state->pc_address != NULL);
return EXIT; return EXIT;
} }
void ExitFrame::Iterate(ObjectVisitor* v) const {
v->VisitPointer(&code_slot());
// The arguments are traversed as part of the expression stack of
// the calling frame.
}
int JavaScriptFrame::GetProvidedParametersCount() const {
return ComputeParametersCount();
}
Address JavaScriptFrame::GetCallerStackPointer() const {
int arguments;
if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
// The arguments for cooked frames are traversed as if they were
// expression stack elements of the calling frame. The reason for
// this rather strange decision is that we cannot access the
// function during mark-compact GCs when the stack is cooked.
// In fact accessing heap objects (like function->shared() below)
// at all during GC is problematic.
arguments = 0;
} else {
// Compute the number of arguments by getting the number of formal
// parameters of the function. We must remember to take the
// receiver into account (+1).
JSFunction* function = JSFunction::cast(this->function());
arguments = function->shared()->formal_parameter_count() + 1;
}
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments * kPointerSize);
}
Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
const int arguments = Smi::cast(GetExpression(0))->value();
const int offset = StandardFrameConstants::kCallerSPOffset;
return fp() + offset + (arguments + 1) * kPointerSize;
}
Address InternalFrame::GetCallerStackPointer() const {
// Internal frames have no arguments. The stack pointer of the
// caller is at a fixed offset from the frame pointer.
return fp() + StandardFrameConstants::kCallerSPOffset;
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32 #endif // V8_TARGET_ARCH_IA32

1224
deps/v8/src/ia32/full-codegen-ia32.cc

File diff suppressed because it is too large

54
deps/v8/src/ia32/ic-ia32.cc

@ -452,6 +452,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
// Loads an indexed element from a fast case array. // Loads an indexed element from a fast case array.
// If not_fast_array is NULL, doesn't perform the elements map check.
static void GenerateFastArrayLoad(MacroAssembler* masm, static void GenerateFastArrayLoad(MacroAssembler* masm,
Register receiver, Register receiver,
Register key, Register key,
@ -468,8 +469,12 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
// we fall through. // we fall through.
__ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset)); __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). if (not_fast_array != NULL) {
// Check that the object is in fast mode and writable.
__ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true); __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true);
} else {
__ AssertFastElements(scratch);
}
// Check that the key (index) is within bounds. // Check that the key (index) is within bounds.
__ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset)); __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
__ j(above_equal, out_of_range); __ j(above_equal, out_of_range);
@ -514,31 +519,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm,
} }
// Picks out an array index from the hash field.
static void GenerateIndexFromHash(MacroAssembler* masm,
Register key,
Register hash) {
// Register use:
// key - holds the overwritten key on exit.
// hash - holds the key's hash. Clobbered.
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(hash, String::kArrayIndexValueMask);
__ shr(hash, String::kHashShift - kSmiTagSize);
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
__ mov(key, hash);
}
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- eax : key // -- eax : key
@ -558,12 +538,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateKeyedLoadReceiverCheck( GenerateKeyedLoadReceiverCheck(
masm, edx, ecx, Map::kHasIndexedInterceptor, &slow); masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
// Check the "has fast elements" bit in the receiver's map which is
// now in ecx.
__ test_b(FieldOperand(ecx, Map::kBitField2Offset),
1 << Map::kHasFastElements);
__ j(zero, &check_pixel_array, not_taken);
GenerateFastArrayLoad(masm, GenerateFastArrayLoad(masm,
edx, edx,
eax, eax,
ecx, ecx,
eax, eax,
&check_pixel_array, NULL,
&slow); &slow);
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
__ ret(0); __ ret(0);
@ -572,7 +558,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array. // Check whether the elements is a pixel array.
// edx: receiver // edx: receiver
// eax: key // eax: key
// ecx: elements __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ mov(ebx, eax); __ mov(ebx, eax);
__ SmiUntag(ebx); __ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true); __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
@ -693,7 +679,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0); __ ret(0);
__ bind(&index_string); __ bind(&index_string);
GenerateIndexFromHash(masm, eax, ebx); __ IndexFromHash(ebx, eax);
// Now jump to the place where smi keys are handled. // Now jump to the place where smi keys are handled.
__ jmp(&index_smi); __ jmp(&index_smi);
} }
@ -967,7 +953,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edx: JSObject // edx: JSObject
// ecx: key (a smi) // ecx: key (a smi)
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary). // Check that the object is in fast mode and writable.
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true); __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
__ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken); __ j(below, &fast, taken);
@ -1023,8 +1009,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ jmp(&fast); __ jmp(&fast);
// Array case: Get the length and the elements array from the JS // Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the // array. Check that the array is in fast mode (and writable); if it
// length is always a smi. // is the length is always a smi.
__ bind(&array); __ bind(&array);
// eax: value // eax: value
// edx: receiver, a JSArray // edx: receiver, a JSArray
@ -1554,7 +1540,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
GenerateMiss(masm, argc); GenerateMiss(masm, argc);
__ bind(&index_string); __ bind(&index_string);
GenerateIndexFromHash(masm, ecx, ebx); __ IndexFromHash(ebx, ecx);
// Now jump to the place where smi keys are handled. // Now jump to the place where smi keys are handled.
__ jmp(&index_smi); __ jmp(&index_smi);
} }
@ -1872,6 +1858,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);
// Check that elements are FixedArray. // Check that elements are FixedArray.
// We rely on StoreIC_ArrayLength below to deal with all types of
// fast elements (including COW).
__ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
__ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
__ j(not_equal, &miss, not_taken); __ j(not_equal, &miss, not_taken);

259
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -191,81 +191,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
mov(Operand::StaticVariable(reg_addr), reg);
}
}
}
void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of memory location to registers.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
Register reg = { r };
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
mov(reg, Operand::StaticVariable(reg_addr));
}
}
}
void MacroAssembler::PushRegistersFromMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Push the content of the memory location to the stack.
for (int i = 0; i < kNumJSCallerSaved; i++) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
push(Operand::StaticVariable(reg_addr));
}
}
}
void MacroAssembler::PopRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Pop the content from the stack to the memory location.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
pop(Operand::StaticVariable(reg_addr));
}
}
}
void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of the stack to the memory location and adjust base.
for (int i = kNumJSCallerSaved; --i >= 0;) {
int r = JSCallerSavedCode(i);
if ((regs & (1 << r)) != 0) {
mov(scratch, Operand(base, 0));
ExternalReference reg_addr =
ExternalReference(Debug_Address::Register(i));
mov(Operand::StaticVariable(reg_addr), scratch);
lea(base, Operand(base, kPointerSize));
}
}
}
void MacroAssembler::DebugBreak() { void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0)); Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak))); mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak)));
@ -274,6 +199,7 @@ void MacroAssembler::DebugBreak() {
} }
#endif #endif
void MacroAssembler::Set(Register dst, const Immediate& x) { void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) { if (x.is_zero()) {
xor_(dst, Operand(dst)); // shorter than mov xor_(dst, Operand(dst)); // shorter than mov
@ -377,6 +303,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
} }
void MacroAssembler::AbortIfNotString(Register object) {
test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is not a string");
push(object);
mov(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
pop(object);
Assert(below, "Operand is not a string");
}
void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::AbortIfSmi(Register object) {
test(object, Immediate(kSmiTagMask)); test(object, Immediate(kSmiTagMask));
Assert(not_equal, "Operand is a smi"); Assert(not_equal, "Operand is a smi");
@ -405,7 +342,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) {
leave(); leave();
} }
void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
void MacroAssembler::EnterExitFramePrologue() {
// Setup the frame structure on the stack. // Setup the frame structure on the stack.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@ -413,7 +351,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
push(ebp); push(ebp);
mov(ebp, Operand(esp)); mov(ebp, Operand(esp));
// Reserve room for entry stack pointer and push the debug marker. // Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call. push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
@ -425,21 +363,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
mov(Operand::StaticVariable(context_address), esi); mov(Operand::StaticVariable(context_address), esi);
} }
void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (mode == ExitFrame::MODE_DEBUG) {
// TODO(1243899): This should be symmetric to
// CopyRegistersFromStackToMemory() but it isn't! esp is assumed
// correct here, but computed for the other call. Very error
// prone! FIX THIS. Actually there are deeper problems with
// register saving than this asymmetry (see the bug report
// associated with this issue).
PushRegistersFromMemory(kJSCallerSaved);
}
#endif
void MacroAssembler::EnterExitFrameEpilogue(int argc) {
// Reserve space for arguments. // Reserve space for arguments.
sub(Operand(esp), Immediate(argc * kPointerSize)); sub(Operand(esp), Immediate(argc * kPointerSize));
@ -455,44 +380,30 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
} }
void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { void MacroAssembler::EnterExitFrame() {
EnterExitFramePrologue(mode); EnterExitFramePrologue();
// Setup argc and argv in callee-saved registers. // Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
mov(edi, Operand(eax)); mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset)); lea(esi, Operand(ebp, eax, times_4, offset));
EnterExitFrameEpilogue(mode, 2); EnterExitFrameEpilogue(2);
} }
void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode, void MacroAssembler::EnterApiExitFrame(int stack_space,
int stack_space,
int argc) { int argc) {
EnterExitFramePrologue(mode); EnterExitFramePrologue();
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset)); lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
EnterExitFrameEpilogue(mode, argc); EnterExitFrameEpilogue(argc);
} }
void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { void MacroAssembler::LeaveExitFrame() {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (mode == ExitFrame::MODE_DEBUG) {
// It's okay to clobber register ebx below because we don't need
// the function pointer after this.
const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
#endif
// Get the return address from the stack and restore the frame pointer. // Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize)); mov(ecx, Operand(ebp, 1 * kPointerSize));
mov(ebp, Operand(ebp, 0 * kPointerSize)); mov(ebp, Operand(ebp, 0 * kPointerSize));
@ -871,6 +782,31 @@ void MacroAssembler::AllocateAsciiString(Register result,
} }
void MacroAssembler::AllocateAsciiString(Register result,
int length,
Register scratch1,
Register scratch2,
Label* gc_required) {
ASSERT(length > 0);
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::SizeFor(length),
result,
scratch1,
scratch2,
gc_required,
TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Immediate(Factory::ascii_string_map()));
mov(FieldOperand(result, String::kLengthOffset),
Immediate(Smi::FromInt(length)));
mov(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
void MacroAssembler::AllocateConsString(Register result, void MacroAssembler::AllocateConsString(Register result,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
@ -1040,6 +976,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
} }
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
// We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
and_(hash, String::kArrayIndexValueMask);
STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0);
if (String::kHashShift > kSmiTagSize) {
shr(hash, String::kHashShift - kSmiTagSize);
}
if (!index.is(hash)) {
mov(index, hash);
}
}
void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
CallRuntime(Runtime::FunctionForId(id), num_arguments); CallRuntime(Runtime::FunctionForId(id), num_arguments);
} }
@ -1298,11 +1253,10 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
SmiUntag(ebx); SmiUntag(ebx);
mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));
ParameterCount expected(ebx); ParameterCount expected(ebx);
InvokeCode(Operand(edx), expected, actual, flag); InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, actual, flag);
} }
@ -1313,7 +1267,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// Get the function and setup the context. // Get the function and setup the context.
mov(edi, Immediate(Handle<JSFunction>(function))); mov(edi, Immediate(Handle<JSFunction>(function)));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Invoke the cached code. // Invoke the cached code.
Handle<Code> code(function->code()); Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count()); ParameterCount expected(function->shared()->formal_parameter_count());
@ -1329,33 +1282,26 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// arguments match the expected number of arguments. Fake a // arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check. // parameter count to avoid emitting code to do the check.
ParameterCount expected(0); ParameterCount expected(0);
GetBuiltinEntry(edx, id); GetBuiltinFunction(edi, id);
InvokeCode(Operand(edx), expected, expected, flag); InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, expected, flag);
} }
void MacroAssembler::GetBuiltinFunction(Register target,
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { Builtins::JavaScript id) {
ASSERT(!target.is(edi)); // Load the JavaScript builtin function from the builtins object.
// Load the builtins object into target register.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
mov(target, FieldOperand(target,
JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
ASSERT(!target.is(edi));
// Load the JavaScript builtin function from the builtins object. // Load the JavaScript builtin function from the builtins object.
mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); GetBuiltinFunction(edi, id);
// Load the code entry point from the function into the target register.
// Load the code entry point from the builtins object. mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id)));
if (FLAG_debug_code) {
// Make sure the code objects in the builtins object and in the
// builtin function are the same.
push(target);
mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
cmp(target, Operand(esp, 0));
Assert(equal, "Builtin code object changed");
pop(target);
}
lea(target, FieldOperand(target, Code::kHeaderSize));
} }
@ -1378,6 +1324,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
} }
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Load the global context from the global or builtins object.
mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
// Load the function from the global context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (FLAG_debug_code) {
Label ok, fail;
CheckMap(map, Factory::meta_map(), &fail, false);
jmp(&ok);
bind(&fail);
Abort("Global functions must have initial map");
bind(&ok);
}
}
void MacroAssembler::Ret() { void MacroAssembler::Ret() {
ret(0); ret(0);
@ -1464,6 +1434,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) {
} }
void MacroAssembler::AssertFastElements(Register elements) {
if (FLAG_debug_code) {
Label ok;
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
j(equal, &ok);
cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::fixed_cow_array_map()));
j(equal, &ok);
Abort("JSObject with fast elements map has slow elements");
bind(&ok);
}
}
void MacroAssembler::Check(Condition cc, const char* msg) { void MacroAssembler::Check(Condition cc, const char* msg) {
Label L; Label L;
j(cc, &L, taken); j(cc, &L, taken);

43
deps/v8/src/ia32/macro-assembler-ia32.h

@ -99,13 +99,6 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
void SaveRegistersToMemory(RegList regs);
void RestoreRegistersFromMemory(RegList regs);
void PushRegistersFromMemory(RegList regs);
void PopRegistersToMemory(RegList regs);
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
void DebugBreak(); void DebugBreak();
#endif #endif
@ -128,18 +121,25 @@ class MacroAssembler: public Assembler {
// Expects the number of arguments in register eax and // Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer // sets up the number of arguments in register edi and the pointer
// to the first argument in register esi. // to the first argument in register esi.
void EnterExitFrame(ExitFrame::Mode mode); void EnterExitFrame();
void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc); void EnterApiExitFrame(int stack_space, int argc);
// Leave the current exit frame. Expects the return value in // Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first // register eax:edx (untouched) and the pointer to the first
// argument in register esi. // argument in register esi.
void LeaveExitFrame(ExitFrame::Mode mode); void LeaveExitFrame();
// Find the function context up the context chain. // Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length); void LoadContext(Register dst, int context_chain_length);
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
void LoadGlobalFunctionInitialMap(Register function, Register map);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// JavaScript invokes // JavaScript invokes
@ -169,6 +169,9 @@ class MacroAssembler: public Assembler {
// the unresolved list if the name does not resolve. // the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag); void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
// Store the code object for the given builtin in the target register. // Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id); void GetBuiltinEntry(Register target, Builtins::JavaScript id);
@ -264,6 +267,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is a smi. Used in debug code. // Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object); void AbortIfSmi(Register object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
@ -350,6 +356,11 @@ class MacroAssembler: public Assembler {
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Label* gc_required); Label* gc_required);
void AllocateAsciiString(Register result,
int length,
Register scratch1,
Register scratch2,
Label* gc_required);
// Allocate a raw cons string object. Only the map field of the result is // Allocate a raw cons string object. Only the map field of the result is
// initialized. // initialized.
@ -393,6 +404,12 @@ class MacroAssembler: public Assembler {
// occurred. // occurred.
void IllegalOperation(int num_arguments); void IllegalOperation(int num_arguments);
// Picks out an array index from the hash field.
// Register use:
// hash - holds the index's hash. Clobbered.
// index - holds the overwritten index on exit.
void IndexFromHash(Register hash, Register index);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Runtime calls // Runtime calls
@ -508,6 +525,8 @@ class MacroAssembler: public Assembler {
// Use --debug_code to enable. // Use --debug_code to enable.
void Assert(Condition cc, const char* msg); void Assert(Condition cc, const char* msg);
void AssertFastElements(Register elements);
// Like Assert(), but always enabled. // Like Assert(), but always enabled.
void Check(Condition cc, const char* msg); void Check(Condition cc, const char* msg);
@ -559,8 +578,8 @@ class MacroAssembler: public Assembler {
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type);
void EnterExitFramePrologue(ExitFrame::Mode mode); void EnterExitFramePrologue();
void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc); void EnterExitFrameEpilogue(int argc);
// Allocation support helpers. // Allocation support helpers.
void LoadAllocationTopHelper(Register result, void LoadAllocationTopHelper(Register result,

2
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -31,11 +31,9 @@
#include "unicode.h" #include "unicode.h"
#include "log.h" #include "log.h"
#include "ast.h"
#include "regexp-stack.h" #include "regexp-stack.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h" #include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 { namespace v8 {

27
deps/v8/src/ia32/stub-cache-ia32.cc

@ -257,16 +257,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index, int index,
Register prototype) { Register prototype) {
// Load the global or builtins object from the current context. __ LoadGlobalFunction(index, prototype);
__ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ LoadGlobalFunctionInitialMap(prototype, prototype);
// Load the global context from the global or builtins object.
__ mov(prototype,
FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
// Load the function from the global context.
__ mov(prototype, Operand(prototype, Context::SlotOffset(index)));
// Load the initial map. The global functions all have initial maps.
__ mov(prototype,
FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
// Load the prototype from the initial map. // Load the prototype from the initial map.
__ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
} }
@ -1366,16 +1358,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
} else { } else {
Label call_builtin;
// Get the elements array of the object. // Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode (not dictionary). // Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset), __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map())); Immediate(Factory::fixed_array_map()));
__ j(not_equal, &miss); __ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin. if (argc == 1) { // Otherwise fall through to call builtin.
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements; Label exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length. // Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@ -1456,10 +1450,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Elements are in new space, so write barrier is not required. // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize); __ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
} }
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush),
argc + 1, argc + 1,
1); 1);
@ -1511,10 +1504,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
// Get the elements array of the object. // Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
// Check that the elements are in fast mode (not dictionary). // Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset), __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map())); Immediate(Factory::fixed_array_map()));
__ j(not_equal, &miss); __ j(not_equal, &call_builtin);
// Get the array's length into ecx and calculate new length. // Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));

4
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -1143,9 +1143,9 @@ Result VirtualFrame::CallConstructor(int arg_count) {
// and receiver on the stack. // and receiver on the stack.
Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall)); Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
// Duplicate the function before preparing the frame. // Duplicate the function before preparing the frame.
PushElementAt(arg_count + 1); PushElementAt(arg_count);
Result function = Pop(); Result function = Pop();
PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver. PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args.
function.ToRegister(edi); function.ToRegister(edi);
// Constructors are called with the number of arguments in register // Constructors are called with the number of arguments in register

4
deps/v8/src/ic-inl.h

@ -108,10 +108,10 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
} }
Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) { JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype()); Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
ASSERT(map_owner->IsJSObject()); ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner)->map(); return JSObject::cast(map_owner);
} }

6
deps/v8/src/ic.cc

@ -165,14 +165,14 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) { if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject. // The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheMap is not applicable. // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC; return MONOMORPHIC;
} else if (cache_holder == PROTOTYPE_MAP && } else if (cache_holder == PROTOTYPE_MAP &&
receiver->GetPrototype()->IsNull()) { receiver->GetPrototype()->IsNull()) {
// IC::GetCodeCacheMap is not applicable. // IC::GetCodeCacheHolder is not applicable.
return MONOMORPHIC; return MONOMORPHIC;
} }
Map* map = IC::GetCodeCacheMap(receiver, cache_holder); Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
// Decide whether the inline cache failed because of changes to the // Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes. // receiver itself or changes to one of its prototypes.

2
deps/v8/src/ic.h

@ -123,7 +123,7 @@ class IC {
JSObject* holder); JSObject* holder);
static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object, static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
JSObject* holder); JSObject* holder);
static inline Map* GetCodeCacheMap(Object* object, static inline JSObject* GetCodeCacheHolder(Object* object,
InlineCacheHolderFlag holder); InlineCacheHolderFlag holder);
protected: protected:

16
deps/v8/src/json.js

@ -68,15 +68,13 @@ function JSONParse(text, reviver) {
} }
var characterQuoteCache = { var characterQuoteCache = {
'\b': '\\b', // ASCII 8, Backspace
'\t': '\\t', // ASCII 9, Tab
'\n': '\\n', // ASCII 10, Newline
'\f': '\\f', // ASCII 12, Formfeed
'\r': '\\r', // ASCII 13, Carriage Return
'\"': '\\"', '\"': '\\"',
'\\': '\\\\', '\\': '\\\\'
'/': '\\/',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
'\x0B': '\\u000b'
}; };
function QuoteSingleJSONCharacter(c) { function QuoteSingleJSONCharacter(c) {
@ -95,7 +93,7 @@ function QuoteSingleJSONCharacter(c) {
} }
function QuoteJSONString(str) { function QuoteJSONString(str) {
var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g; var quotable = /[\\\"\x00-\x1f]/g;
return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"'; return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
} }

11
deps/v8/src/jsregexp.cc

@ -380,10 +380,11 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
} }
RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle<JSRegExp> regexp, RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<JSRegExp> regexp,
Handle<String> subject, Handle<String> subject,
int index, int index,
Vector<int> output) { Vector<int32_t> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data())); Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0); ASSERT(index >= 0);
@ -478,10 +479,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers); OffsetsVector registers(required_registers);
IrregexpResult res = IrregexpExecOnce(jsregexp, IrregexpResult res = RegExpImpl::IrregexpExecOnce(
subject, jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
previous_index,
Vector<int>(registers.vector(),
registers.length())); registers.length()));
if (res == RE_SUCCESS) { if (res == RE_SUCCESS) {
int capture_register_count = int capture_register_count =

4
deps/v8/src/jump-target-heavy.h

@ -117,7 +117,7 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// the target and the fall-through. // the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
virtual void Branch(Condition cc, void Branch(Condition cc,
Result* arg0, Result* arg0,
Result* arg1, Result* arg1,
Hint hint = no_hint); Hint hint = no_hint);
@ -127,7 +127,7 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// jump. // jump.
virtual void Bind(); virtual void Bind();
virtual void Bind(Result* arg); virtual void Bind(Result* arg);
virtual void Bind(Result* arg0, Result* arg1); void Bind(Result* arg0, Result* arg1);
// Emit a call to a jump target. There must be a current frame at // Emit a call to a jump target. There must be a current frame at
// the call. The frame at the target is the same as the current // the call. The frame at the target is the same as the current

50
deps/v8/src/liveedit.cc

@ -739,7 +739,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
Handle<String> name_handle(String::cast(info->name())); Handle<String> name_handle(String::cast(info->name()));
info_wrapper.SetProperties(name_handle, info->start_position(), info_wrapper.SetProperties(name_handle, info->start_position(),
info->end_position(), info); info->end_position(), info);
array->SetElement(i, *(info_wrapper.GetJSArray())); SetElement(array, i, info_wrapper.GetJSArray());
} }
} }
@ -750,7 +750,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
class ReferenceCollectorVisitor : public ObjectVisitor { class ReferenceCollectorVisitor : public ObjectVisitor {
public: public:
explicit ReferenceCollectorVisitor(Code* original) explicit ReferenceCollectorVisitor(Code* original)
: original_(original), rvalues_(10), reloc_infos_(10) { : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) {
} }
virtual void VisitPointers(Object** start, Object** end) { virtual void VisitPointers(Object** start, Object** end) {
@ -761,7 +761,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
} }
} }
void VisitCodeTarget(RelocInfo* rinfo) { virtual void VisitCodeEntry(Address entry) {
if (Code::GetObjectFromEntryAddress(entry) == original_) {
code_entries_.Add(entry);
}
}
virtual void VisitCodeTarget(RelocInfo* rinfo) {
if (RelocInfo::IsCodeTarget(rinfo->rmode()) && if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) { Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
reloc_infos_.Add(*rinfo); reloc_infos_.Add(*rinfo);
@ -778,8 +784,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
for (int i = 0; i < rvalues_.length(); i++) { for (int i = 0; i < rvalues_.length(); i++) {
*(rvalues_[i]) = substitution; *(rvalues_[i]) = substitution;
} }
Address substitution_entry = substitution->instruction_start();
for (int i = 0; i < reloc_infos_.length(); i++) { for (int i = 0; i < reloc_infos_.length(); i++) {
reloc_infos_[i].set_target_address(substitution->instruction_start()); reloc_infos_[i].set_target_address(substitution_entry);
}
for (int i = 0; i < code_entries_.length(); i++) {
Address entry = code_entries_[i];
Memory::Address_at(entry) = substitution_entry;
} }
} }
@ -787,28 +798,10 @@ class ReferenceCollectorVisitor : public ObjectVisitor {
Code* original_; Code* original_;
ZoneList<Object**> rvalues_; ZoneList<Object**> rvalues_;
ZoneList<RelocInfo> reloc_infos_; ZoneList<RelocInfo> reloc_infos_;
ZoneList<Address> code_entries_;
}; };
class FrameCookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::CookFramesForThread(top);
}
};
class FrameUncookingThreadVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
StackFrame::UncookFramesForThread(top);
}
};
static void IterateAllThreads(ThreadVisitor* visitor) {
Top::IterateThread(visitor);
ThreadManager::IterateArchivedThreads(visitor);
}
// Finds all references to original and replaces them with substitution. // Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) { static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution)); ASSERT(!Heap::InNewSpace(substitution));
@ -824,13 +817,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) {
// so temporary replace the pointers with offset numbers // so temporary replace the pointers with offset numbers
// in prologue/epilogue. // in prologue/epilogue.
{ {
FrameCookingThreadVisitor cooking_visitor;
IterateAllThreads(&cooking_visitor);
Heap::IterateStrongRoots(&visitor, VISIT_ALL); Heap::IterateStrongRoots(&visitor, VISIT_ALL);
FrameUncookingThreadVisitor uncooking_visitor;
IterateAllThreads(&uncooking_visitor);
} }
// Now iterate over all pointers of all objects, including code_target // Now iterate over all pointers of all objects, including code_target
@ -1372,8 +1359,9 @@ static const char* DropActivationsInActiveThread(
for (int i = 0; i < array_len; i++) { for (int i = 0; i < array_len; i++) {
if (result->GetElement(i) == if (result->GetElement(i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) { Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
result->SetElement(i, Smi::FromInt( Handle<Object> replaced(
LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK)); Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
SetElement(result, i, replaced);
} }
} }
return NULL; return NULL;

4
deps/v8/src/log.cc

@ -30,6 +30,7 @@
#include "v8.h" #include "v8.h"
#include "bootstrapper.h" #include "bootstrapper.h"
#include "code-stubs.h"
#include "global-handles.h" #include "global-handles.h"
#include "log.h" #include "log.h"
#include "macro-assembler.h" #include "macro-assembler.h"
@ -1266,7 +1267,8 @@ void Logger::LogCodeObject(Object* object) {
case Code::BINARY_OP_IC: case Code::BINARY_OP_IC:
// fall through // fall through
case Code::STUB: case Code::STUB:
description = CodeStub::MajorName(code_object->major_key(), true); description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
if (description == NULL) if (description == NULL)
description = "A stub from the snapshot"; description = "A stub from the snapshot";
tag = Logger::STUB_TAG; tag = Logger::STUB_TAG;

27
deps/v8/src/macro-assembler.h

@ -83,4 +83,31 @@ const int kInvalidProtoDepth = -1;
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
namespace v8 {
namespace internal {
// Support for "structured" code comments.
#ifdef DEBUG
class Comment {
public:
Comment(MacroAssembler* masm, const char* msg);
~Comment();
private:
MacroAssembler* masm_;
const char* msg_;
};
#else
class Comment {
public:
Comment(MacroAssembler*, const char*) {}
};
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_H_ #endif // V8_MACRO_ASSEMBLER_H_

2
deps/v8/src/macros.py

@ -120,7 +120,7 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once. # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg)); macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg)); macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg))); macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0)); macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0); macro TO_UINT32(arg) = (arg >>> 0);

315
deps/v8/src/mark-compact.cc

@ -27,6 +27,7 @@
#include "v8.h" #include "v8.h"
#include "compilation-cache.h"
#include "execution.h" #include "execution.h"
#include "heap-profiler.h" #include "heap-profiler.h"
#include "global-handles.h" #include "global-handles.h"
@ -84,11 +85,15 @@ void MarkCompactCollector::CollectGarbage() {
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses(); EncodeForwardingAddresses();
Heap::MarkMapPointersAsEncoded(true);
UpdatePointers(); UpdatePointers();
Heap::MarkMapPointersAsEncoded(false);
PcToCodeCache::FlushPcToCodeCache();
RelocateObjects(); RelocateObjects();
} else { } else {
SweepSpaces(); SweepSpaces();
PcToCodeCache::FlushPcToCodeCache();
} }
Finish(); Finish();
@ -252,6 +257,14 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.GetVisitor(map)(map, obj); table_.GetVisitor(map)(map, obj);
} }
static void EnableCodeFlushing(bool enabled) {
if (enabled) {
table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
} else {
table_.Register(kVisitJSFunction, &VisitJSFunction);
}
}
static void Initialize() { static void Initialize() {
table_.Register(kVisitShortcutCandidate, table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticMarkingVisitor, &FixedBodyVisitor<StaticMarkingVisitor,
@ -289,6 +302,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
table_.Register(kVisitCode, &VisitCode); table_.Register(kVisitCode, &VisitCode);
table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
table_.Register(kVisitPropertyCell, table_.Register(kVisitPropertyCell,
&FixedBodyVisitor<StaticMarkingVisitor, &FixedBodyVisitor<StaticMarkingVisitor,
JSGlobalPropertyCell::BodyDescriptor, JSGlobalPropertyCell::BodyDescriptor,
@ -405,6 +420,160 @@ class StaticMarkingVisitor : public StaticVisitorBase {
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(); reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
} }
// Code flushing support.
// How many collections newly compiled code object will survive before being
// flushed.
static const int kCodeAgeThreshold = 5;
inline static bool HasSourceCode(SharedFunctionInfo* info) {
Object* undefined = Heap::raw_unchecked_undefined_value();
return (info->script() != undefined) &&
(reinterpret_cast<Script*>(info->script())->source() != undefined);
}
inline static bool IsCompiled(JSFunction* function) {
return
function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
}
inline static bool IsCompiled(SharedFunctionInfo* function) {
return
function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
}
static void FlushCodeForFunction(JSFunction* function) {
SharedFunctionInfo* shared_info = function->unchecked_shared();
if (shared_info->IsMarked()) return;
// Special handling if the function and shared info objects
// have different code objects.
if (function->unchecked_code() != shared_info->unchecked_code()) {
// If the shared function has been flushed but the function has not,
// we flush the function if possible.
if (!IsCompiled(shared_info) &&
IsCompiled(function) &&
!function->unchecked_code()->IsMarked()) {
function->set_code(shared_info->unchecked_code());
}
return;
}
// Code is either on stack or in compilation cache.
if (shared_info->unchecked_code()->IsMarked()) {
shared_info->set_code_age(0);
return;
}
// The function must be compiled and have the source code available,
// to be able to recompile it in case we need the function again.
if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
if (function_data->IsHeapObject() &&
(SafeMap(function_data)->instance_type() ==
FUNCTION_TEMPLATE_INFO_TYPE)) {
return;
}
// Only flush code for functions.
if (shared_info->code()->kind() != Code::FUNCTION) return;
// Function must be lazy compilable.
if (!shared_info->allows_lazy_compilation()) return;
// If this is a full script wrapped in a function we do no flush the code.
if (shared_info->is_toplevel()) return;
// Age this shared function info.
if (shared_info->code_age() < kCodeAgeThreshold) {
shared_info->set_code_age(shared_info->code_age() + 1);
return;
}
// Compute the lazy compilable version of the code.
Code* code = Builtins::builtin(Builtins::LazyCompile);
shared_info->set_code(code);
function->set_code(code);
}
static inline Map* SafeMap(Object* obj) {
MapWord map_word = HeapObject::cast(obj)->map_word();
map_word.ClearMark();
map_word.ClearOverflow();
return map_word.ToMap();
}
static inline bool IsJSBuiltinsObject(Object* obj) {
return obj->IsHeapObject() &&
(SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
}
static inline bool IsValidNotBuiltinContext(Object* ctx) {
if (!ctx->IsHeapObject()) return false;
Map* map = SafeMap(ctx);
if (!(map == Heap::raw_unchecked_context_map() ||
map == Heap::raw_unchecked_catch_context_map() ||
map == Heap::raw_unchecked_global_context_map())) {
return false;
}
Context* context = reinterpret_cast<Context*>(ctx);
if (IsJSBuiltinsObject(context->global())) {
return false;
}
return true;
}
static void VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
VisitPointer(&code);
if (code != old_code) {
Memory::Address_at(entry_address) =
reinterpret_cast<Code*>(code)->entry();
}
}
static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
// The function must have a valid context and not be a builtin.
if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
FlushCodeForFunction(jsfunction);
}
VisitJSFunction(map, object);
}
static void VisitJSFunction(Map* map, HeapObject* object) {
#define SLOT_ADDR(obj, offset) \
reinterpret_cast<Object**>((obj)->address() + offset)
VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
VisitPointers(SLOT_ADDR(object,
JSFunction::kCodeEntryOffset + kPointerSize),
SLOT_ADDR(object, JSFunction::kSize));
#undef SLOT_ADDR
}
typedef void (*Callback)(Map* map, HeapObject* object); typedef void (*Callback)(Map* map, HeapObject* object);
static VisitorDispatchTable<Callback> table_; static VisitorDispatchTable<Callback> table_;
@ -435,6 +604,66 @@ class MarkingVisitor : public ObjectVisitor {
}; };
class CodeMarkingVisitor : public ThreadVisitor {
public:
void VisitThread(ThreadLocalTop* top) {
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
}
}
};
class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
void VisitPointer(Object** slot) {
Object* obj = *slot;
if (obj->IsHeapObject()) {
MarkCompactCollector::MarkObject(HeapObject::cast(obj));
}
}
};
void MarkCompactCollector::PrepareForCodeFlushing() {
if (!FLAG_flush_code) {
StaticMarkingVisitor::EnableCodeFlushing(false);
return;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
if (Debug::IsLoaded() || Debug::has_break_points()) {
StaticMarkingVisitor::EnableCodeFlushing(false);
return;
}
#endif
StaticMarkingVisitor::EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
MarkObject(Heap::raw_unchecked_empty_descriptor_array());
// Make sure we are not referencing the code from the stack.
for (StackFrameIterator it; !it.done(); it.Advance()) {
MarkObject(it.frame()->unchecked_code());
}
// Iterate the archived stacks in all threads to check if
// the code is referenced.
CodeMarkingVisitor code_marking_visitor;
ThreadManager::IterateArchivedThreads(&code_marking_visitor);
SharedFunctionInfoMarkingVisitor visitor;
CompilationCache::IterateFunctions(&visitor);
ProcessMarkingStack();
}
// Visitor class for marking heap roots. // Visitor class for marking heap roots.
class RootMarkingVisitor : public ObjectVisitor { class RootMarkingVisitor : public ObjectVisitor {
public: public:
@ -793,6 +1022,8 @@ void MarkCompactCollector::MarkLiveObjects() {
ASSERT(!marking_stack.overflowed()); ASSERT(!marking_stack.overflowed());
PrepareForCodeFlushing();
RootMarkingVisitor root_visitor; RootMarkingVisitor root_visitor;
MarkRoots(&root_visitor); MarkRoots(&root_visitor);
@ -962,8 +1193,6 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// pair of distinguished invalid map encodings (for single word and multiple // pair of distinguished invalid map encodings (for single word and multiple
// words) to indicate free regions in the page found during computation of // words) to indicate free regions in the page found during computation of
// forwarding addresses and skipped over in subsequent sweeps. // forwarding addresses and skipped over in subsequent sweeps.
static const uint32_t kSingleFreeEncoding = 0;
static const uint32_t kMultiFreeEncoding = 1;
// Encode a free region, defined by the given start address and size, in the // Encode a free region, defined by the given start address and size, in the
@ -971,10 +1200,10 @@ static const uint32_t kMultiFreeEncoding = 1;
void EncodeFreeRegion(Address free_start, int free_size) { void EncodeFreeRegion(Address free_start, int free_size) {
ASSERT(free_size >= kIntSize); ASSERT(free_size >= kIntSize);
if (free_size == kIntSize) { if (free_size == kIntSize) {
Memory::uint32_at(free_start) = kSingleFreeEncoding; Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
} else { } else {
ASSERT(free_size >= 2 * kIntSize); ASSERT(free_size >= 2 * kIntSize);
Memory::uint32_at(free_start) = kMultiFreeEncoding; Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Memory::int_at(free_start + kIntSize) = free_size; Memory::int_at(free_start + kIntSize) = free_size;
} }
@ -1404,7 +1633,7 @@ static void SweepNewSpace(NewSpace* space) {
} }
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE); PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences // During sweeping of paged space we are trying to find longest sequences
@ -1445,10 +1674,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count(); MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live. if (!is_previous_alive) { // Transition from free to live.
dealloc(free_start, space->DeallocateBlock(free_start,
static_cast<int>(current - free_start), static_cast<int>(current - free_start),
true, true);
false);
is_previous_alive = true; is_previous_alive = true;
} }
} else { } else {
@ -1478,7 +1706,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list. // without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) { if (size_in_bytes > 0) {
dealloc(free_start, size_in_bytes, false, true); space->DeallocateBlock(free_start, size_in_bytes, false);
} }
} }
} else { } else {
@ -1494,7 +1722,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) { if (last_free_size > 0) {
Page::FromAddress(last_free_start)-> Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start); SetAllocationWatermark(last_free_start);
dealloc(last_free_start, last_free_size, true, true); space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL; last_free_start = NULL;
last_free_size = 0; last_free_size = 0;
} }
@ -1525,7 +1753,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page. // There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation // Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area. // top to the beginning of this free area.
dealloc(last_free_start, last_free_size, false, true); space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start; new_allocation_top = last_free_start;
} }
@ -1546,61 +1774,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
} }
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
bool add_to_freelist,
bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::EncodeForwardingAddresses() { void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be // Objects in the active semispace of the young generation may be
@ -1865,14 +2038,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and // the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for // the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects. // non-live objects.
SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); SweepSpace(Heap::old_pointer_space());
SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); SweepSpace(Heap::old_data_space());
SweepSpace(Heap::code_space(), &DeallocateCodeBlock); SweepSpace(Heap::code_space());
SweepSpace(Heap::cell_space(), &DeallocateCellBlock); SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space()); SweepNewSpace(Heap::new_space());
} }
SweepSpace(Heap::map_space(), &DeallocateMapBlock); SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(), Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion, &Heap::IteratePointersInDirtyMapsRegion,

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save