Browse Source

Upgrade V8 to 3.0.8

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
cf2e4f44af
  1. 971
      deps/v8/0001-Patch-for-Cygwin.patch
  2. 103
      deps/v8/0003-Fix-Solaris-build.patch
  3. 56
      deps/v8/ChangeLog
  4. 4
      deps/v8/SConstruct
  5. 69
      deps/v8/include/v8.h
  6. 11
      deps/v8/src/SConscript
  7. 6
      deps/v8/src/accessors.cc
  8. 56
      deps/v8/src/api.cc
  9. 6
      deps/v8/src/arm/assembler-arm.cc
  10. 21
      deps/v8/src/arm/assembler-arm.h
  11. 4
      deps/v8/src/arm/builtins-arm.cc
  12. 74
      deps/v8/src/arm/code-stubs-arm.cc
  13. 43
      deps/v8/src/arm/code-stubs-arm.h
  14. 21
      deps/v8/src/arm/codegen-arm.cc
  15. 5
      deps/v8/src/arm/deoptimizer-arm.cc
  16. 13
      deps/v8/src/arm/full-codegen-arm.cc
  17. 2
      deps/v8/src/arm/ic-arm.cc
  18. 466
      deps/v8/src/arm/lithium-arm.cc
  19. 416
      deps/v8/src/arm/lithium-arm.h
  20. 1262
      deps/v8/src/arm/lithium-codegen-arm.cc
  21. 39
      deps/v8/src/arm/lithium-codegen-arm.h
  22. 84
      deps/v8/src/arm/macro-assembler-arm.cc
  23. 66
      deps/v8/src/arm/macro-assembler-arm.h
  24. 10
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  25. 12
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  26. 12
      deps/v8/src/arm/simulator-arm.cc
  27. 6
      deps/v8/src/arm/stub-cache-arm.cc
  28. 50
      deps/v8/src/array.js
  29. 12
      deps/v8/src/assembler.cc
  30. 5
      deps/v8/src/assembler.h
  31. 34
      deps/v8/src/ast.cc
  32. 10
      deps/v8/src/ast.h
  33. 67
      deps/v8/src/builtins.cc
  34. 34
      deps/v8/src/code-stubs.cc
  35. 60
      deps/v8/src/code-stubs.h
  36. 23
      deps/v8/src/compiler.cc
  37. 6
      deps/v8/src/compiler.h
  38. 3
      deps/v8/src/cpu-profiler.cc
  39. 19
      deps/v8/src/d8-debug.cc
  40. 6
      deps/v8/src/d8-debug.h
  41. 3
      deps/v8/src/d8.cc
  42. 611
      deps/v8/src/d8.js
  43. 7
      deps/v8/src/data-flow.h
  44. 2
      deps/v8/src/date.js
  45. 33
      deps/v8/src/debug-agent.cc
  46. 6
      deps/v8/src/debug-agent.h
  47. 167
      deps/v8/src/debug-debugger.js
  48. 11
      deps/v8/src/debug.cc
  49. 10
      deps/v8/src/debug.h
  50. 14
      deps/v8/src/deoptimizer.cc
  51. 6
      deps/v8/src/deoptimizer.h
  52. 11
      deps/v8/src/disassembler.cc
  53. 50
      deps/v8/src/extensions/experimental/experimental.gyp
  54. 8
      deps/v8/src/factory.cc
  55. 2
      deps/v8/src/factory.h
  56. 12
      deps/v8/src/flag-definitions.h
  57. 50
      deps/v8/src/frames.cc
  58. 6
      deps/v8/src/frames.h
  59. 13
      deps/v8/src/globals.h
  60. 13
      deps/v8/src/handles.cc
  61. 11
      deps/v8/src/handles.h
  62. 78
      deps/v8/src/heap-inl.h
  63. 76
      deps/v8/src/heap.cc
  64. 19
      deps/v8/src/heap.h
  65. 5
      deps/v8/src/hydrogen-instructions.cc
  66. 176
      deps/v8/src/hydrogen-instructions.h
  67. 246
      deps/v8/src/hydrogen.cc
  68. 124
      deps/v8/src/hydrogen.h
  69. 45
      deps/v8/src/ia32/assembler-ia32.cc
  70. 4
      deps/v8/src/ia32/assembler-ia32.h
  71. 8
      deps/v8/src/ia32/builtins-ia32.cc
  72. 196
      deps/v8/src/ia32/code-stubs-ia32.cc
  73. 11
      deps/v8/src/ia32/code-stubs-ia32.h
  74. 351
      deps/v8/src/ia32/codegen-ia32.cc
  75. 2
      deps/v8/src/ia32/debug-ia32.cc
  76. 23
      deps/v8/src/ia32/deoptimizer-ia32.cc
  77. 28
      deps/v8/src/ia32/disasm-ia32.cc
  78. 373
      deps/v8/src/ia32/full-codegen-ia32.cc
  79. 15
      deps/v8/src/ia32/ic-ia32.cc
  80. 512
      deps/v8/src/ia32/lithium-codegen-ia32.cc
  81. 34
      deps/v8/src/ia32/lithium-codegen-ia32.h
  82. 678
      deps/v8/src/ia32/lithium-ia32.cc
  83. 859
      deps/v8/src/ia32/lithium-ia32.h
  84. 98
      deps/v8/src/ia32/macro-assembler-ia32.cc
  85. 23
      deps/v8/src/ia32/macro-assembler-ia32.h
  86. 6
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  87. 1
      deps/v8/src/ia32/stub-cache-ia32.cc
  88. 93
      deps/v8/src/json.js
  89. 6
      deps/v8/src/jsregexp.cc
  90. 2
      deps/v8/src/jsregexp.h
  91. 57
      deps/v8/src/lithium-allocator.cc
  92. 57
      deps/v8/src/lithium-allocator.h
  93. 93
      deps/v8/src/lithium.cc
  94. 169
      deps/v8/src/lithium.h
  95. 12
      deps/v8/src/liveedit-debugger.js
  96. 123
      deps/v8/src/liveedit.cc
  97. 9
      deps/v8/src/liveedit.h
  98. 3
      deps/v8/src/log.cc
  99. 2
      deps/v8/src/macros.py
  100. 42
      deps/v8/src/math.js

971
deps/v8/0001-Patch-for-Cygwin.patch

@ -1,971 +0,0 @@
From 0ed25321cf3d2ca54dd148cc58e79712b73b9b3e Mon Sep 17 00:00:00 2001
From: unknown <cyg_server@.(none)>
Date: Fri, 12 Nov 2010 00:15:30 +0100
Subject: [PATCH] Patch for Cygwin
---
SConstruct | 3 +-
src/SConscript | 1 +
src/platform-cygwin.cc | 865 ++++++++++++++++++++++++++++++++++++++++++++++++
src/platform.h | 5 +
src/utils.h | 2 +-
tools/utils.py | 2 +
6 files changed, 875 insertions(+), 3 deletions(-)
create mode 100644 src/platform-cygwin.cc
diff --git a/SConstruct b/SConstruct
index 820c1a1..7107e91 100644
--- a/SConstruct
+++ b/SConstruct
@@ -292,7 +292,6 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
- '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -666,7 +665,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'default': OS_GUESS,
'help': 'the os to build for (%s)' % OS_GUESS
},
diff --git a/src/SConscript b/src/SConscript
index 8995d48..ef5485d 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -204,6 +204,7 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
+ 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
new file mode 100644
index 0000000..ad0ad8c
--- /dev/null
+++ b/src/platform-cygwin.cc
@@ -0,0 +1,865 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Cygwin goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <sys/types.h> // mmap & munmap
+#include <sys/mman.h> // mmap & munmap
+#include <sys/stat.h> // open
+#include <fcntl.h> // open
+#include <unistd.h> // sysconf
+#ifdef __GLIBC__
+#include <execinfo.h> // backtrace, backtrace_symbols
+#endif // def __GLIBC__
+#include <strings.h> // index
+#include <errno.h>
+#include <stdarg.h>
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+#include "top.h"
+#include "v8threads.h"
+
+
+namespace v8 {
+namespace internal {
+
+// 0 is never a valid thread id on Linux since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+ return ceil(x);
+}
+
+
+void OS::Setup() {
+ // Seed the random number generator.
+ // Convert the current time to a 64-bit integer first, before converting it
+ // to an unsigned. Going directly can cause an overflow and the seed to be
+ // set to all ones. The seed will be identical for different instances that
+ // call this setup code within the same millisecond.
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+ srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+#if (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we
+ // have VFP3 instructions. If gcc can assume it then so can we.
+ return 1u << VFP3;
+#elif CAN_USE_ARMV7_INSTRUCTIONS
+ return 1u << ARMv7;
+#else
+ return 0; // Linux runs on anything.
+#endif
+}
+
+
+#ifdef __arm__
+bool OS::ArmCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of VFP at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to ARM (mid 2009), no similar
+ // facility is universally available on the ARM architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+ switch (feature) {
+ case VFP3:
+ search_string = "vfp";
+ break;
+ case ARMv7:
+ search_string = "ARMv7";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
+}
+#endif // def __arm__
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef V8_TARGET_ARCH_ARM
+ // On EABI ARM targets this is required for fp correctness in the
+ // runtime system.
+ return 8;
+#elif V8_TARGET_ARCH_MIPS
+ return 8;
+#endif
+ // With gcc 4.4 the tree vectorization optimizer can generate code
+ // that requires 16 byte alignment such as movdqa on x86.
+ return 16;
+}
+
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ // An x86 store acts as a release barrier.
+ *ptr = value;
+}
+
+const char* OS::LocalTimezone(double time) {
+ if (isnan(time)) return "";
+ time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+ struct tm* t = localtime(&tv);
+ if (NULL == t) return "";
+ return tzname[0]; // The location of the timezone string on Cywin.
+}
+
+
+double OS::LocalTimeOffset() {
+ //
+ // On Cygwin, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification). The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap. The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ lowest_ever_allocated = Min(lowest_ever_allocated, address);
+ highest_ever_allocated =
+ Max(highest_ever_allocated,
+ reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+ return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+
+void* OS::Allocate(const size_t requested,
+ size_t* allocated,
+ bool is_executable) {
+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (mbase == MAP_FAILED) {
+ LOG(StringEvent("OS::Allocate", "mmap failed"));
+ return NULL;
+ }
+ *allocated = msize;
+ UpdateAllocatedSpaceLimits(mbase, msize);
+ return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+ // TODO(1240712): munmap has a return value which is ignored here.
+ int result = munmap(address, size);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+ // TODO(1240712): mprotect has a return value which is ignored here.
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ mprotect(address, size, prot);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+ unsigned int ms = static_cast<unsigned int>(milliseconds);
+ usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+ // Redirect to std abort to signal abnormal program termination.
+ abort();
+}
+
+
+void OS::DebugBreak() {
+// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x,
+// which is the architecture of generated code).
+#if (defined(__arm__) || defined(__thumb__)) && \
+ defined(CAN_USE_ARMV5_INSTRUCTIONS)
+ asm("bkpt 0");
+#elif defined(__mips__)
+ asm("break");
+#else
+ asm("int $3");
+#endif
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+ PosixMemoryMappedFile(FILE* file, void* memory, int size)
+ : file_(file), memory_(memory), size_(size) { }
+ virtual ~PosixMemoryMappedFile();
+ virtual void* memory() { return memory_; }
+ private:
+ FILE* file_;
+ void* memory_;
+ int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+ void* initial) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+ int result = fwrite(initial, size, 1, file);
+ if (result < 1) {
+ fclose(file);
+ return NULL;
+ }
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+ if (memory_) munmap(memory_, size_);
+ fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // This function assumes that the layout of the file is as follows:
+ // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+ // If we encounter an unexpected situation we abort scanning further entries.
+ FILE* fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) return;
+
+ // Allocate enough room to be able to store a full file name.
+ const int kLibNameLen = FILENAME_MAX + 1;
+ char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+ // This loop will terminate once the scanning hits an EOF.
+ while (true) {
+ uintptr_t start, end;
+ char attr_r, attr_w, attr_x, attr_p;
+ // Parse the addresses and permission bits at the beginning of the line.
+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+ int c;
+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+ // Found a read-only executable entry. Skip characters until we reach
+ // the beginning of the filename or the end of the line.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n') && (c != '/'));
+ if (c == EOF) break; // EOF: Was unexpected, just exit.
+
+ // Process the filename if found.
+ if (c == '/') {
+ ungetc(c, fp); // Push the '/' back into the stream to be read below.
+
+ // Read to the end of the line. Exit if the read fails.
+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+ // Drop the newline character read by fgets. We do not need to check
+ // for a zero-length string because we know that we at least read the
+ // '/' character.
+ lib_name[strlen(lib_name) - 1] = '\0';
+ } else {
+ // No library name found, just record the raw address range.
+ snprintf(lib_name, kLibNameLen,
+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ }
+ LOG(SharedLibraryEvent(lib_name, start, end));
+ } else {
+ // Entry not describing executable data. Skip to end of line to setup
+ // reading the next entry.
+ do {
+ c = getc(fp);
+ } while ((c != EOF) && (c != '\n'));
+ if (c == EOF) break;
+ }
+ }
+ free(lib_name);
+ fclose(fp);
+#endif
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+ // backtrace is a glibc extension.
+#ifdef __GLIBC__
+ int frames_size = frames.length();
+ ScopedVector<void*> addresses(frames_size);
+
+ int frames_count = backtrace(addresses.start(), frames_size);
+
+ char** symbols = backtrace_symbols(addresses.start(), frames_count);
+ if (symbols == NULL) {
+ return kStackWalkError;
+ }
+
+ for (int i = 0; i < frames_count; i++) {
+ frames[i].address = addresses[i];
+ // Format a text representation of the frame based on the information
+ // available.
+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+ "%s",
+ symbols[i]);
+ // Make sure line termination is in place.
+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+ }
+
+ free(symbols);
+
+ return frames_count;
+#else // ndef __GLIBC__
+ return 0;
+#endif // ndef __GLIBC__
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+ kMmapFd, kMmapFdOffset);
+ size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+ if (IsReserved()) {
+ if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+ }
+}
+
+
+bool VirtualMemory::IsReserved() {
+ return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+
+#ifdef HAS_MAP_FIXED
+ if (MAP_FAILED == mmap(address, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
+ kMmapFd, kMmapFdOffset)) {
+ return false;
+ }
+#else
+ if (mprotect(address, size, prot) != 0) {
+ return false;
+ }
+#endif
+
+ UpdateAllocatedSpaceLimits(address, size);
+ return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return mmap(address, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED
+ kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+ explicit PlatformData(ThreadHandle::Kind kind) {
+ Initialize(kind);
+ }
+
+ void Initialize(ThreadHandle::Kind kind) {
+ switch (kind) {
+ case ThreadHandle::SELF: thread_ = pthread_self(); break;
+ case ThreadHandle::INVALID: thread_ = kNoThread; break;
+ }
+ }
+
+ pthread_t thread_; // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+ data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+ data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+ delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+ return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+ return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+ Thread* thread = reinterpret_cast<Thread*>(arg);
+ // This is also initialized by the first argument to pthread_create() but we
+ // don't know which thread will run first (the original thread or the new
+ // one) so we initialize it here too.
+ thread->thread_handle_data()->thread_ = pthread_self();
+ ASSERT(thread->IsValid());
+ thread->Run();
+ return NULL;
+}
+
+
+void Thread::Start() {
+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+ ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+ pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+ pthread_key_t key;
+ int result = pthread_key_create(&key, NULL);
+ USE(result);
+ ASSERT(result == 0);
+ return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ int result = pthread_key_delete(pthread_key);
+ USE(result);
+ ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+ pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+ pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+ sched_yield();
+}
+
+
+class CygwinMutex : public Mutex {
+ public:
+
+ CygwinMutex() {
+ pthread_mutexattr_t attrs;
+ memset(&attrs, 0, sizeof(attrs));
+
+ int result = pthread_mutexattr_init(&attrs);
+ ASSERT(result == 0);
+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT(result == 0);
+ result = pthread_mutex_init(&mutex_, &attrs);
+ ASSERT(result == 0);
+ }
+
+ virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); }
+
+ virtual int Lock() {
+ int result = pthread_mutex_lock(&mutex_);
+ return result;
+ }
+
+ virtual int Unlock() {
+ int result = pthread_mutex_unlock(&mutex_);
+ return result;
+ }
+
+ private:
+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
+};
+
+
+Mutex* OS::CreateMutex() {
+ return new CygwinMutex();
+}
+
+
+class CygwinSemaphore : public Semaphore {
+ public:
+ explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); }
+ virtual ~CygwinSemaphore() { sem_destroy(&sem_); }
+
+ virtual void Wait();
+ virtual bool Wait(int timeout);
+ virtual void Signal() { sem_post(&sem_); }
+ private:
+ sem_t sem_;
+};
+
+
+void CygwinSemaphore::Wait() {
+ while (true) {
+ int result = sem_wait(&sem_);
+ if (result == 0) return; // Successfully got semaphore.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+} while (false)
+#endif
+
+
+bool CygwinSemaphore::Wait(int timeout) {
+ const long kOneSecondMicros = 1000000; // NOLINT
+
+ // Split timeout into second and nanosecond parts.
+ struct timeval delta;
+ delta.tv_usec = timeout % kOneSecondMicros;
+ delta.tv_sec = timeout / kOneSecondMicros;
+
+ struct timeval current_time;
+ // Get the current time.
+ if (gettimeofday(&current_time, NULL) == -1) {
+ return false;
+ }
+
+ // Calculate time for end of timeout.
+ struct timeval end_time;
+ timeradd(&current_time, &delta, &end_time);
+
+ struct timespec ts;
+ TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+ // Wait for semaphore signalled or timeout.
+ while (true) {
+ int result = sem_timedwait(&sem_, &ts);
+ if (result == 0) return true; // Successfully got semaphore.
+ if (result > 0) {
+ // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1.
+ errno = result;
+ result = -1;
+ }
+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout.
+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup.
+ }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+ return new CygwinSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+static pthread_t vm_thread_ = 0;
+
+
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
+// Android runs a fairly new Linux kernel, so signal info is there,
+// but the C library doesn't have the structs defined.
+
+struct sigcontext {
+ uint32_t trap_no;
+ uint32_t error_code;
+ uint32_t oldmask;
+ uint32_t gregs[16];
+ uint32_t arm_cpsr;
+ uint32_t fault_address;
+};
+typedef uint32_t __sigset_t;
+typedef struct sigcontext mcontext_t;
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ __sigset_t uc_sigmask;
+} ucontext_t;
+enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
+
+#endif
+
+
+// A function that determines if a signal handler is called in the context
+// of a VM thread.
+//
+// The problem is that SIGPROF signal can be delivered to an arbitrary thread
+// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2)
+// So, if the signal is being handled in the context of a non-VM thread,
+// it means that the VM thread is running, and trying to sample its stack can
+// cause a crash.
+static inline bool IsVmThread() {
+ // In the case of a single VM thread, this check is enough.
+ if (pthread_equal(pthread_self(), vm_thread_)) return true;
+ // If there are multiple threads that use VM, they must have a thread id
+ // stored in TLS. To verify that the thread is really executing VM,
+ // we check Top's data. Having that ThreadManager::RestoreThread first
+ // restores ThreadLocalTop from TLS, and only then erases the TLS value,
+ // reading Top::thread_id() should not be affected by races.
+ if (ThreadManager::HasId() && !ThreadManager::IsArchived() &&
+ ThreadManager::CurrentId() == Top::thread_id()) {
+ return true;
+ }
+ return false;
+}
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+#ifndef V8_HOST_ARCH_MIPS
+ USE(info);
+ if (signal != SIGPROF) return;
+ if (active_sampler_ == NULL) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ // We always sample the VM state.
+ sample->state = VMState::current_state();
+
+#if 0
+ // If profiling, we extract the current pc and sp.
+ if (active_sampler_->IsProfiling()) {
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+// An undefined macro evaluates to 0, so this applies to Android's Bionic also.
+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+#else
+ sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+#endif
+#elif V8_HOST_ARCH_MIPS
+ // Implement this on MIPS.
+ UNIMPLEMENTED();
+#endif
+ if (IsVmThread()) {
+ active_sampler_->SampleStack(sample);
+ }
+ }
+#endif
+
+ active_sampler_->Tick(sample);
+#endif
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData() {
+ signal_handler_installed_ = false;
+ }
+
+ bool signal_handler_installed_;
+ struct sigaction old_signal_handler_;
+ struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+ : interval_(interval),
+ profiling_(profiling),
+ synchronous_(profiling),
+ active_(false) {
+ data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+ delete data_;
+}
+
+
+void Sampler::Start() {
+ // There can only be one active sampler at the time on POSIX
+ // platforms.
+ if (active_sampler_ != NULL) return;
+
+ vm_thread_ = pthread_self();
+
+ // Request profiling signals.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+ data_->signal_handler_installed_ = true;
+
+ // Set the itimer to generate a tick for each interval.
+ itimerval itimer;
+ itimer.it_interval.tv_sec = interval_ / 1000;
+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+ // Set this sampler as the active sampler.
+ active_sampler_ = this;
+ active_ = true;
+}
+
+
+void Sampler::Stop() {
+ // Restore old signal handler
+ if (data_->signal_handler_installed_) {
+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+ sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+ data_->signal_handler_installed_ = false;
+ }
+
+ // This sampler is no longer the active sampler.
+ active_sampler_ = NULL;
+ active_ = false;
+}
+
+
+#endif // ENABLE_LOGGING_AND_PROFILING
+
+} } // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
index 42e6eae..c4ef230 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -367,6 +367,7 @@ class ThreadHandle {
class Thread: public ThreadHandle {
public:
+#ifndef __CYGWIN__
// Opaque data type for thread-local storage keys.
// LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
// to ensure that enumeration type has correct value range (see Issue 830 for
@@ -375,6 +376,10 @@ class Thread: public ThreadHandle {
LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
+#else
+ typedef void *LocalStorageKey;
+#endif
+
// Create new thread.
Thread();
diff --git a/src/utils.h b/src/utils.h
index ffdb639..0521767 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -963,7 +963,7 @@ inline Dest BitCast(const Source& source) {
}
template <class Dest, class Source>
-inline Dest BitCast(Source* source) {
+inline Dest BitCast(Source*& source) {
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
diff --git a/tools/utils.py b/tools/utils.py
index 8083091..7733157 100644
--- a/tools/utils.py
+++ b/tools/utils.py
@@ -59,6 +59,8 @@ def GuessOS():
return 'openbsd'
elif id == 'SunOS':
return 'solaris'
+ elif id.find('CYGWIN') >= 0:
+ return 'cygwin'
else:
return None
--
1.7.2

103
deps/v8/0003-Fix-Solaris-build.patch

@ -1,103 +0,0 @@
From 2a40926a1e1c8d385afefc1fe5255e21cc1a8f1e Mon Sep 17 00:00:00 2001
From: Ryan Dahl <ry@tinyclouds.org>
Date: Thu, 16 Dec 2010 22:20:44 +0000
Subject: [PATCH] Fix Solaris build
---
src/platform-solaris.cc | 49 ++++++++++++++++++++++++++++++++++++----------
src/v8utils.h | 1 +
2 files changed, 39 insertions(+), 11 deletions(-)
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index f84e80d..b302c59 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -45,6 +45,7 @@
#include <errno.h>
#include <ieeefp.h> // finite()
#include <signal.h> // sigemptyset(), etc
+#include <sys/kdi_regs.h>
#undef MAP_TYPE
@@ -481,6 +482,16 @@ class SolarisMutex : public Mutex {
int Unlock() { return pthread_mutex_unlock(&mutex_); }
+ virtual bool TryLock() {
+ int result = pthread_mutex_trylock(&mutex_);
+ // Return false if the lock is busy and locking failed.
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT(result == 0); // Verify no other errors.
+ return true;
+ }
+
private:
pthread_mutex_t mutex_;
};
@@ -572,21 +583,37 @@ Semaphore* OS::CreateSemaphore(int count) {
#ifdef ENABLE_LOGGING_AND_PROFILING
static Sampler* active_sampler_ = NULL;
+static pthread_t vm_tid_ = 0;
+
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
USE(info);
if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
-
- TickSample sample;
- sample.pc = 0;
- sample.sp = 0;
- sample.fp = 0;
-
- // We always sample the VM state.
- sample.state = VMState::current_state();
-
- active_sampler_->Tick(&sample);
+ if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
+ if (vm_tid_ != pthread_self()) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+ sample->state = Top::current_vm_state();
+
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RBP]);
+#else
+ UNIMPLEMENTED();
+#endif
+ active_sampler_->SampleStack(sample);
+ active_sampler_->Tick(sample);
}
diff --git a/src/v8utils.h b/src/v8utils.h
index f6ed520..59b3898 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -29,6 +29,7 @@
#define V8_V8UTILS_H_
#include "utils.h"
+#include <stdarg.h>
namespace v8 {
namespace internal {
--
1.6.3.3

56
deps/v8/ChangeLog

@ -1,3 +1,57 @@
2011-01-17: Version 3.0.8
Exposed heap size limit to the heap statistics gathered by
the GetHeapStatistics API.
Wrapped external pointers more carefully (issue 1037).
Hardened the implementation of error objects to avoid setters
intercepting the properties set then throwing an error.
Avoided trashing the FPSCR when calculating Math.floor on ARM.
Performance improvements on the IA32 platform.
2011-01-10: Version 3.0.7
Stopped calling inherited setters when creating object literals
(issue 1015).
Changed interpretation of malformed \c? escapes in RegExp to match
JSC.
Enhanced the command-line debugger interface and fixed some minor
bugs in the debugger.
Performance improvements on the IA32 platform.
2011-01-05: Version 3.0.6
Allowed getters and setters on JSArray elements (issue 900).
Stopped JSON objects from hitting inherited setters (part of
issue 1015).
Allowed numbers and strings as names of getters/setters in object
initializer (issue 820).
Added use_system_v8 option to gyp (off by default), to make it easier
for Linux distributions to ship with system-provided V8 library.
Exported external array data accessors (issue 1016).
Added labelled thread names to help with debugging (on Linux).
2011-01-03: Version 3.0.5
Fixed a couple of cast errors for gcc-3.4.3.
Performance improvements in GC and IA32 code generator.
2010-12-21: Version 3.0.4
Added Date::ResetCache() to the API so that the cached values in the
@ -152,7 +206,7 @@
Allowed forcing the use of a simulator from the build script
independently of the host architecture.
Fixed FreeBSD port (Issue 912).
Fixed FreeBSD port (issue 912).
Made windows-tick-processor respect D8_PATH.

4
deps/v8/SConstruct

@ -662,6 +662,8 @@ def GuessVisibility(os, toolchain):
if os == 'win32' and toolchain == 'gcc':
# MinGW can't do it.
return 'default'
elif os == 'solaris':
return 'default'
else:
return 'hidden'
@ -679,7 +681,7 @@ SIMPLE_OPTIONS = {
'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'],
'default': OS_GUESS,
'help': 'the os to build for (%s)' % OS_GUESS
},

69
deps/v8/include/v8.h

@ -1651,9 +1651,9 @@ class Object : public Value {
* the backing store is preserved while V8 has a reference.
*/
V8EXPORT void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
bool HasIndexedPropertiesInPixelData();
uint8_t* GetIndexedPropertiesPixelData();
int GetIndexedPropertiesPixelDataLength();
V8EXPORT bool HasIndexedPropertiesInPixelData();
V8EXPORT uint8_t* GetIndexedPropertiesPixelData();
V8EXPORT int GetIndexedPropertiesPixelDataLength();
/**
* Set the backing store of the indexed properties to be managed by the
@ -1666,10 +1666,10 @@ class Object : public Value {
void* data,
ExternalArrayType array_type,
int number_of_elements);
bool HasIndexedPropertiesInExternalArrayData();
void* GetIndexedPropertiesExternalArrayData();
ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
int GetIndexedPropertiesExternalArrayDataLength();
V8EXPORT bool HasIndexedPropertiesInExternalArrayData();
V8EXPORT void* GetIndexedPropertiesExternalArrayData();
V8EXPORT ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
V8EXPORT int GetIndexedPropertiesExternalArrayDataLength();
V8EXPORT static Local<Object> New();
static inline Object* Cast(Value* obj);
@ -2515,6 +2515,7 @@ class V8EXPORT HeapStatistics {
size_t total_heap_size() { return total_heap_size_; }
size_t total_heap_size_executable() { return total_heap_size_executable_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
private:
void set_total_heap_size(size_t size) { total_heap_size_ = size; }
@ -2522,10 +2523,12 @@ class V8EXPORT HeapStatistics {
total_heap_size_executable_ = size;
}
void set_used_heap_size(size_t size) { used_heap_size_ = size; }
void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
size_t total_heap_size_;
size_t total_heap_size_executable_;
size_t used_heap_size_;
size_t heap_size_limit_;
friend class V8;
};
@ -3076,6 +3079,18 @@ class V8EXPORT Context {
* Returns a persistent handle to the newly allocated context. This
* persistent handle has to be disposed when the context is no
* longer used so the context can be garbage collected.
*
* \param extensions An optional extension configuration containing
* the extensions to be installed in the newly created context.
*
* \param global_template An optional object template from which the
* global object for the newly created context will be created.
*
* \param global_object An optional global object to be reused for
* the newly created context. This global object must have been
* created by a previous call to Context::New with the same global
* template. The state of the global object will be completely reset
* and only object identify will remain.
*/
static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
@ -3338,10 +3353,10 @@ const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t ptr_size> struct SmiConstants;
template <size_t ptr_size> struct SmiTagging;
// Smi constants for 32-bit systems.
template <> struct SmiConstants<4> {
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
static inline int SmiToInt(internal::Object* value) {
@ -3349,10 +3364,15 @@ template <> struct SmiConstants<4> {
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
// For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
// with a plain reinterpret_cast.
static const intptr_t kEncodablePointerMask = 0x1;
static const int kPointerToSmiShift = 0;
};
// Smi constants for 64-bit systems.
template <> struct SmiConstants<8> {
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
static inline int SmiToInt(internal::Object* value) {
@ -3360,10 +3380,26 @@ template <> struct SmiConstants<8> {
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
// To maximize the range of pointers that can be encoded
// in the available 32 bits, we require them to be 8 bytes aligned.
// This gives 2 ^ (32 + 3) = 32G address space covered.
// It might be not enough to cover stack allocated objects on some platforms.
static const int kPointerAlignment = 3;
static const intptr_t kEncodablePointerMask =
~(intptr_t(0xffffffff) << kPointerAlignment);
static const int kPointerToSmiShift =
kSmiTagSize + kSmiShiftSize - kPointerAlignment;
};
const int kSmiShiftSize = SmiConstants<kApiPointerSize>::kSmiShiftSize;
const int kSmiValueSize = SmiConstants<kApiPointerSize>::kSmiValueSize;
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const intptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
template <size_t ptr_size> struct InternalConstants;
@ -3411,7 +3447,7 @@ class Internals {
}
static inline int SmiValue(internal::Object* value) {
return SmiConstants<kApiPointerSize>::SmiToInt(value);
return PlatformSmiTagging::SmiToInt(value);
}
static inline int GetInstanceType(internal::Object* obj) {
@ -3420,9 +3456,14 @@ class Internals {
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
static inline void* GetExternalPointerFromSmi(internal::Object* value) {
const intptr_t address = reinterpret_cast<intptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift);
}
static inline void* GetExternalPointer(internal::Object* obj) {
if (HasSmiTag(obj)) {
return obj;
return GetExternalPointerFromSmi(obj);
} else if (GetInstanceType(obj) == kProxyType) {
return ReadField<void*>(obj, kProxyProxyOffset);
} else {

11
deps/v8/src/SConscript

@ -1,4 +1,4 @@
# Copyright 2008 the V8 project authors. All rights reserved.
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -85,6 +85,7 @@ SOURCES = {
jsregexp.cc
jump-target.cc
lithium-allocator.cc
lithium.cc
liveedit.cc
log-utils.cc
log.cc
@ -211,6 +212,8 @@ SOURCES = {
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/lithium-x64.cc
x64/lithium-codegen-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
@ -225,14 +228,14 @@ SOURCES = {
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
'os:macos': ['platform-macos.cc', 'platform-posix.cc'],
'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'],
'os:nullos': ['platform-nullos.cc'],
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
'objects-debug.cc', 'objects-printer.cc', 'prettyprinter.cc',
'regexp-macro-assembler-tracer.cc'
],
'objectprint:on': ['objects-debug.cc']
'objectprint:on': ['objects-printer.cc']
}

6
deps/v8/src/accessors.cc

@ -126,8 +126,8 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
// This means one of the object's prototypes is a JSArray and
// the object does not have a 'length' property.
// Calling SetProperty causes an infinite loop.
return object->IgnoreAttributesAndSetLocalProperty(Heap::length_symbol(),
value, NONE);
return object->SetLocalPropertyIgnoreAttributes(Heap::length_symbol(),
value, NONE);
}
}
return Top::Throw(*Factory::NewRangeError("invalid_array_length",
@ -775,7 +775,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
if (index >= 0) {
Handle<Object> arguments =
Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
if (!arguments->IsArgumentsMarker()) return *arguments;
}
// If there isn't an arguments variable in the stack, we need to

56
deps/v8/src/api.cc

@ -3266,18 +3266,35 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
}
static bool CanBeEncodedAsSmi(void* ptr) {
const intptr_t address = reinterpret_cast<intptr_t>(ptr);
return ((address & i::kEncodablePointerMask) == 0);
}
static i::Smi* EncodeAsSmi(void* ptr) {
ASSERT(CanBeEncodedAsSmi(ptr));
const intptr_t address = reinterpret_cast<intptr_t>(ptr);
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
ASSERT(i::Internals::HasSmiTag(result));
ASSERT_EQ(result, i::Smi::FromInt(result->value()));
ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
return result;
}
void v8::Object::SetPointerInInternalField(int index, void* value) {
ENTER_V8;
i::Object* as_object = reinterpret_cast<i::Object*>(value);
if (as_object->IsSmi()) {
Utils::OpenHandle(this)->SetInternalField(index, as_object);
return;
if (CanBeEncodedAsSmi(value)) {
Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
} else {
HandleScope scope;
i::Handle<i::Proxy> proxy =
i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
if (!proxy.is_null())
Utils::OpenHandle(this)->SetInternalField(index, *proxy);
}
HandleScope scope;
i::Handle<i::Proxy> proxy =
i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
if (!proxy.is_null())
Utils::OpenHandle(this)->SetInternalField(index, *proxy);
ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@ -3299,7 +3316,8 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
used_heap_size_(0) { }
used_heap_size_(0),
heap_size_limit_(0) { }
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
@ -3307,6 +3325,7 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size_executable(
i::Heap::CommittedMemoryExecutable());
heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
heap_statistics->set_heap_size_limit(i::Heap::MaxReserved());
}
@ -3560,11 +3579,13 @@ Local<Value> v8::External::Wrap(void* data) {
LOG_API("External::Wrap");
EnsureInitialized("v8::External::Wrap()");
ENTER_V8;
i::Object* as_object = reinterpret_cast<i::Object*>(data);
if (as_object->IsSmi()) {
return Utils::ToLocal(i::Handle<i::Object>(as_object));
}
return ExternalNewImpl(data);
v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
: v8::Local<v8::Value>(ExternalNewImpl(data));
ASSERT_EQ(data, Unwrap(result));
return result;
}
@ -3572,7 +3593,7 @@ void* v8::Object::SlowGetPointerFromInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetInternalField(index);
if (value->IsSmi()) {
return value;
return i::Internals::GetExternalPointerFromSmi(value);
} else if (value->IsProxy()) {
return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
} else {
@ -3586,8 +3607,7 @@ void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
void* result;
if (obj->IsSmi()) {
// The external value was an aligned pointer.
result = *obj;
result = i::Internals::GetExternalPointerFromSmi(*obj);
} else if (obj->IsProxy()) {
result = ExternalValueImpl(obj);
} else {

6
deps/v8/src/arm/assembler-arm.cc

@ -2337,12 +2337,11 @@ void Assembler::vdiv(const DwVfpRegister dst,
void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
@ -2351,12 +2350,11 @@ void Assembler::vcmp(const DwVfpRegister src1,
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const SBit s,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406A, A8-570.
// cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | 0000(3-0)
// Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT(src2 == 0.0);
emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |

21
deps/v8/src/arm/assembler-arm.h

@ -66,13 +66,14 @@ namespace internal {
// such that we use an enum in optimized mode, and the struct in debug
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
// Core register
struct Register {
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kNumAllocatableRegisters);
return reg.code();
}
@ -132,7 +133,7 @@ const Register r5 = { 5 };
const Register r6 = { 6 };
const Register r7 = { 7 };
const Register r8 = { 8 }; // Used as context register.
const Register r9 = { 9 };
const Register r9 = { 9 }; // Used as lithium codegen scratch register.
const Register r10 = { 10 }; // Used as roots register.
const Register fp = { 11 };
const Register ip = { 12 };
@ -166,6 +167,9 @@ struct SwVfpRegister {
struct DwVfpRegister {
// d0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded. This should be revisited.
// Currently d0 is used as a scratch register.
// d1 has also been excluded from allocation to be used as a scratch
// register as well.
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 15;
@ -297,11 +301,18 @@ const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// VFP FPSCR constants.
static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;
static const uint32_t kVFPCConditionFlagBit = 1 << 29;
static const uint32_t kVFPVConditionFlagBit = 1 << 28;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPRoundingModeMask = 3 << 22;
static const uint32_t kVFPRoundToMinusInfinityBits = 2 << 22;
static const uint32_t kVFPExceptionMask = 0xf;
// Coprocessor register
struct CRegister {
bool is_valid() const { return 0 <= code_ && code_ < 16; }
@ -1144,11 +1155,9 @@ class Assembler : public Malloced {
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vcmp(const DwVfpRegister src1,
const double src2,
const SBit s = LeaveCC,
const Condition cond = al);
void vmrs(const Register dst,
const Condition cond = al);

4
deps/v8/src/arm/builtins-arm.cc

@ -502,7 +502,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
__ cmp(r0, Operand(0));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@ -546,7 +546,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
__ Assert(eq, "Unexpected string wrapper instance size");
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
__ cmp(r4, Operand(0));
__ cmp(r4, Operand(0, RelocInfo::NONE));
__ Assert(eq, "Unexpected unused properties of string wrapper");
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));

74
deps/v8/src/arm/code-stubs-arm.cc

@ -866,8 +866,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
__ sub(probe, probe, Operand(kHeapObjectTag));
__ vldr(d1, probe, HeapNumber::kValueOffset);
__ vcmp(d0, d1);
__ vmrs(pc);
__ VFPCompareAndSetFlags(d0, d1);
__ b(ne, not_found); // The cache did not contain this value.
__ b(&load_result_from_cache);
} else {
@ -917,13 +916,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
void RecordWriteStub::Generate(MacroAssembler* masm) {
__ add(offset_, object_, Operand(offset_));
__ RecordWriteHelper(object_, offset_, scratch_);
__ Ret();
}
// On entry lhs_ and rhs_ are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
@ -982,8 +974,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope scope(VFP3);
Label no_nan;
// ARMv7 VFP3 instructions to implement double precision comparison.
__ vcmp(d7, d6);
__ vmrs(pc); // Move vector status bits to normal status bits.
__ VFPCompareAndSetFlags(d7, d6);
Label nan;
__ b(vs, &nan);
__ mov(r0, Operand(EQUAL), LeaveCC, eq);
@ -1103,8 +1094,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ sub(ip, tos_, Operand(kHeapObjectTag));
__ vldr(d1, ip, HeapNumber::kValueOffset);
__ vcmp(d1, 0.0);
__ vmrs(pc);
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
@ -1229,16 +1219,22 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
bool generate_code_to_calculate_answer = true;
if (ShouldGenerateFPCode()) {
// DIV has neither SmiSmi fast code nor specialized slow code.
// So don't try to patch a DIV Stub.
if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
GenerateTypeTransition(masm); // Tail call.
generate_code_to_calculate_answer = false;
break;
case Token::DIV:
// DIV has neither SmiSmi fast code nor specialized slow code.
// So don't try to patch a DIV Stub.
break;
default:
break;
}
@ -1299,7 +1295,8 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
// HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
// r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
Label r1_is_not_smi;
if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) {
if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
HasSmiSmiFastPath()) {
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm); // Tail call.
@ -2519,7 +2516,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ mov(r0, Operand(false));
__ mov(r0, Operand(false, RelocInfo::NONE));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
@ -2894,45 +2891,45 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Uses registers r0 to r4. Expected input is
// function in r0 (or at sp+1*ptrsz) and object in
// object in r0 (or at sp+1*kPointerSize) and function in
// r1 (or at sp), depending on whether or not
// args_in_registers() is true.
void InstanceofStub::Generate(MacroAssembler* masm) {
// Fixed register usage throughout the stub:
const Register object = r1; // Object (lhs).
const Register object = r0; // Object (lhs).
const Register map = r3; // Map of the object.
const Register function = r0; // Function (rhs).
const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function.
const Register scratch = r2;
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!args_in_registers()) {
__ ldr(function, MemOperand(sp, 1 * kPointerSize));
__ ldr(object, MemOperand(sp, 0));
if (!HasArgsInRegisters()) {
__ ldr(object, MemOperand(sp, 1 * kPointerSize));
__ ldr(function, MemOperand(sp, 0));
}
// Check that the left hand is a JS object and load map.
__ BranchOnSmi(object, &slow);
__ IsObjectJSObjectType(object, map, scratch, &slow);
__ BranchOnSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// Look up the function and the map in the instanceof cache.
Label miss;
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
__ cmp(object, ip);
__ cmp(function, ip);
__ b(ne, &miss);
__ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
__ cmp(map, ip);
__ b(ne, &miss);
__ LoadRoot(function, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(args_in_registers() ? 0 : 2);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&miss);
__ TryGetFunctionPrototype(object, prototype, scratch, &slow);
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ BranchOnSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
__ StoreRoot(object, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
// Register mapping: r3 is object map and r4 is function prototype.
@ -2953,11 +2950,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ bind(&is_instance);
__ mov(r0, Operand(Smi::FromInt(0)));
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(args_in_registers() ? 0 : 2);
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&is_not_instance);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(args_in_registers() ? 0 : 2);
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
@ -2971,22 +2969,25 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ cmp(scratch, Operand(Factory::null_value()));
__ b(ne, &object_not_null);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(args_in_registers() ? 0 : 2);
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ BranchOnNotSmi(object, &object_not_null_or_smi);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(args_in_registers() ? 0 : 2);
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
__ mov(r0, Operand(Smi::FromInt(1)));
__ Ret(args_in_registers() ? 0 : 2);
__ Ret(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
__ bind(&slow);
if (HasArgsInRegisters()) {
__ Push(r0, r1);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
@ -3012,7 +3013,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// through register r0. Use unsigned comparison to get negative
// check for free.
__ cmp(r1, r0);
__ b(cs, &slow);
__ b(hs, &slow);
// Read the argument from the stack and return it.
__ sub(r3, r0, r1);
@ -4911,8 +4912,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ vldr(d1, r2, HeapNumber::kValueOffset);
// Compare operands
__ vcmp(d0, d1);
__ vmrs(pc); // Move vector status bits to normal status bits.
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
__ b(vs, &unordered);

43
deps/v8/src/arm/code-stubs-arm.h

@ -77,7 +77,7 @@ class GenericBinaryOpStub : public CodeStub {
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::DEFAULT),
runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
@ -178,6 +178,10 @@ class GenericBinaryOpStub : public CodeStub {
return lhs_is_r0 ? r1 : r0;
}
bool HasSmiSmiFastPath() {
return op_ != Token::DIV;
}
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
@ -437,43 +441,6 @@ class NumberToStringStub: public CodeStub {
};
class RecordWriteStub : public CodeStub {
public:
RecordWriteStub(Register object, Register offset, Register scratch)
: object_(object), offset_(offset), scratch_(scratch) { }
void Generate(MacroAssembler* masm);
private:
Register object_;
Register offset_;
Register scratch_;
// Minor key encoding in 12 bits. 4 bits for each of the three
// registers (object, offset and scratch) OOOOAAAASSSS.
class ScratchBits: public BitField<uint32_t, 0, 4> {};
class OffsetBits: public BitField<uint32_t, 4, 4> {};
class ObjectBits: public BitField<uint32_t, 8, 4> {};
Major MajorKey() { return RecordWrite; }
int MinorKey() {
// Encode the registers.
return ObjectBits::encode(object_.code()) |
OffsetBits::encode(offset_.code()) |
ScratchBits::encode(scratch_.code());
}
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (offset reg %d),"
" (scratch reg %d)\n",
object_.code(), offset_.code(), scratch_.code());
}
#endif
};
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.

21
deps/v8/src/arm/codegen-arm.cc

@ -596,7 +596,7 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
} else {
frame_->SpillAll();
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
@ -623,7 +623,7 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
// has a local variable named 'arguments'.
LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
Register arguments = frame_->PopToRegister();
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
__ cmp(arguments, ip);
done.Branch(ne);
}
@ -1748,7 +1748,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// named 'arguments' has been introduced.
JumpTarget slow;
Label done;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
__ cmp(ip, arguments_reg);
slow.Branch(ne);
@ -3255,7 +3255,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// If the loaded value is the sentinel that indicates that we
// haven't loaded the arguments object yet, we need to do it now.
JumpTarget exit;
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
__ cmp(tos, ip);
exit.Branch(ne);
frame_->Drop();
@ -4667,8 +4667,7 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
__ mov(scratch2, Operand(0x7FF00000));
__ mov(scratch1, Operand(0, RelocInfo::NONE));
__ vmov(d1, scratch1, scratch2); // Load infinity into d1.
__ vcmp(d0, d1);
__ vmrs(pc);
__ VFPCompareAndSetFlags(d0, d1);
runtime.Branch(eq); // d0 reached infinity.
__ vdiv(d0, d2, d0);
__ b(&allocate_return);
@ -5618,12 +5617,10 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// (or them and test against Smi mask.)
__ mov(tmp2, tmp1);
RecordWriteStub recordWrite1(tmp1, index1, tmp3);
__ CallStub(&recordWrite1);
RecordWriteStub recordWrite2(tmp2, index2, tmp3);
__ CallStub(&recordWrite2);
__ add(index1, index1, tmp1);
__ add(index2, index2, tmp1);
__ RecordWriteHelper(tmp1, index1, tmp3);
__ RecordWriteHelper(tmp2, index2, tmp3);
__ bind(&done);
deferred->BindExit();

5
deps/v8/src/arm/deoptimizer-arm.cc

@ -55,8 +55,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
int deoptimization_index = table.GetDeoptimizationIndex(i);
int gap_code_size = table.GetGapCodeSize(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?

13
deps/v8/src/arm/full-codegen-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -38,6 +38,8 @@
#include "scopes.h"
#include "stub-cache.h"
#include "arm/code-stubs-arm.h"
namespace v8 {
namespace internal {
@ -219,10 +221,17 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
RecordStackCheck(stmt->OsrEntryId());
}

2
deps/v8/src/arm/ic-arm.cc

@ -1591,7 +1591,7 @@ void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
__ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(r1, Operand(0x00));
__ teq(r1, Operand(0x00, RelocInfo::NONE));
__ b(eq, &exponent_rebiased);
__ teq(r1, Operand(0xff));

466
deps/v8/src/arm/lithium-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -93,32 +93,6 @@ void LLabel::PrintDataTo(StringStream* stream) const {
}
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
}
return true;
}
void LParallelMove::PrintDataTo(StringStream* stream) const {
for (int i = move_operands_.length() - 1; i >= 0; --i) {
if (!move_operands_[i].IsEliminated()) {
LOperand* from = move_operands_[i].from();
LOperand* to = move_operands_[i].to();
if (from->Equals(to)) {
to->PrintTo(stream);
} else {
to->PrintTo(stream);
stream->Add(" = ");
from->PrintTo(stream);
}
stream->Add("; ");
}
}
}
bool LGap::IsRedundant() const {
for (int i = 0; i < 4; i++) {
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
@ -270,6 +244,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
stream->Add("(%d, %d)", context_chain_length(), slot_index());
}
void LCallKeyed::PrintDataTo(StringStream* stream) const {
stream->Add("[r2] #%d / ", arity());
}
@ -472,151 +451,6 @@ void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
}
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand)
: nodes_(4),
identified_cycles_(4),
result_(4),
marker_operand_(marker_operand),
next_visited_id_(0) {
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
}
const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i]);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start) {
ZoneList<LOperand*> circle_operands(8);
circle_operands.Add(marker_operand_);
LGapNode* cur = start;
do {
cur->MarkResolved();
circle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
circle_operands.Add(marker_operand_);
for (int i = circle_operands.length() - 1; i > 0; --i) {
LOperand* from = circle_operands[i];
LOperand* to = circle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a circle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
}
@ -767,11 +601,6 @@ LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
}
LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
}
LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
@ -852,13 +681,6 @@ LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
}
LOperand* LChunkBuilder::Temp() {
LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
allocator_->RecordTemporary(operand);
return operand;
}
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
allocator_->RecordTemporary(operand);
@ -1016,9 +838,6 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
HInstruction* current = block->first();
int start = chunk_->instructions()->length();
while (current != NULL && !is_aborted()) {
if (FLAG_trace_environment) {
PrintF("Process instruction %d\n", current->id());
}
// Code for constants in registers is generated lazily.
if (!current->EmitAtUses()) {
VisitInstruction(current);
@ -1066,66 +885,13 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) {
}
void LEnvironment::WriteTranslation(LCodeGen* cgen,
Translation* translation) const {
if (this == NULL) return;
// The translation includes one command per value in the environment.
int translation_size = values()->length();
// The output frame height does not include the parameters.
int height = translation_size - parameter_count();
outer()->WriteTranslation(cgen, translation);
int closure_id = cgen->DefineDeoptimizationLiteral(closure());
translation->BeginFrame(ast_id(), closure_id, height);
for (int i = 0; i < translation_size; ++i) {
LOperand* value = values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
// both NULL or both set.
if (spilled_registers_ != NULL && value != NULL) {
if (value->IsRegister() &&
spilled_registers_[value->index()] != NULL) {
translation->MarkDuplicate();
cgen->AddToTranslation(translation,
spilled_registers_[value->index()],
HasTaggedValueAt(i));
} else if (value->IsDoubleRegister() &&
spilled_double_registers_[value->index()] != NULL) {
translation->MarkDuplicate();
cgen->AddToTranslation(translation,
spilled_double_registers_[value->index()],
false);
}
}
cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
}
}
void LEnvironment::PrintTo(StringStream* stream) const {
stream->Add("[id=%d|", ast_id());
stream->Add("[parameters=%d|", parameter_count());
stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
if (i != 0) stream->Add(";");
if (values_[i] == NULL) {
stream->Add("[hole]");
} else {
values_[i]->PrintTo(stream);
}
}
stream->Add("]");
}
LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
if (hydrogen_env == NULL) return NULL;
LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->values()->length();
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
ast_id,
hydrogen_env->parameter_count(),
@ -1176,7 +942,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
TempRegister(),
TempRegister(),
first_id,
second_id);
@ -1185,22 +950,21 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
Token::Value op = compare->token();
HValue* left = compare->left();
HValue* right = compare->right();
if (left->representation().IsInteger32()) {
Representation r = compare->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(op,
UseRegisterAtStart(left),
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right),
first_id,
second_id,
false);
} else if (left->representation().IsDouble()) {
second_id);
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(op,
UseRegisterAtStart(left),
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseRegisterAtStart(right),
first_id,
second_id,
true);
second_id);
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
@ -1225,7 +989,6 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
ASSERT(compare->value()->representation().IsTagged());
return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
TempRegister(),
first_id,
second_id);
} else if (v->IsHasCachedArrayIndex()) {
@ -1238,11 +1001,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
// We only need a temp register for non-strict compare.
LOperand* temp = compare->is_strict() ? NULL : TempRegister();
return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
compare->is_strict(),
temp,
first_id,
second_id);
} else if (v->IsIsObject()) {
@ -1295,17 +1054,13 @@ LInstruction* LChunkBuilder::DoCompareMapAndBranch(
HCompareMapAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
HBasicBlock* first = instr->FirstSuccessor();
HBasicBlock* second = instr->SecondSuccessor();
return new LCmpMapAndBranch(value,
instr->map(),
first->block_id(),
second->block_id());
LOperand* temp = TempRegister();
return new LCmpMapAndBranch(value, temp);
}
LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
return DefineAsRegister(new LArgumentsLength(Use(length->value())));
return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
}
@ -1316,8 +1071,16 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
LInstruction* result =
new LInstanceOf(UseFixed(instr->left(), r1),
UseFixed(instr->right(), r0));
new LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstruction* result =
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1362,7 +1125,8 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
LInstruction* result = new LUnaryMathOperation(input);
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LInstruction* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@ -1370,6 +1134,9 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
Abort("MathRound LUnaryMathOperation not implemented");
return NULL;
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
@ -1476,12 +1243,15 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
FixedTemp(r1);
// TODO(1042) The fixed register allocation
// is needed because we call GenericBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* value = UseFixed(instr->left(), r0);
LOperand* divisor = UseRegister(instr->right());
return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0));
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new LDivI(value, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@ -1490,18 +1260,17 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
if (instr->representation().IsInteger32()) {
// TODO(1042) The fixed register allocation
// is needed because we call GenericBinaryOpStub from
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
FixedTemp(r1);
LOperand* value = UseFixed(instr->left(), r0);
LOperand* divisor = UseRegister(instr->right());
LInstruction* result = DefineFixed(new LModI(value, divisor), r1);
if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero)) {
result = AssignEnvironment(result);
}
LOperand* divisor = UseFixed(instr->right(), r1);
LInstruction* result = DefineFixed(new LModI(value, divisor), r0);
result = AssignEnvironment(AssignPointerMap(result));
return result;
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
@ -1587,17 +1356,22 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
Token::Value op = instr->token();
if (instr->left()->representation().IsInteger32()) {
Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
return DefineAsRegister(new LCmpID(op, left, right, false));
} else if (instr->left()->representation().IsDouble()) {
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(op, left, right, true));
return DefineAsRegister(new LCmpID(left, right));
} else {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
@ -1620,8 +1394,7 @@ LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LIsNull(value,
instr->is_strict()));
return DefineAsRegister(new LIsNull(value));
}
@ -1661,24 +1434,19 @@ LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseTempRegister(instr->value());
return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
return DefineSameAsFirst(new LClassOfTest(value));
}
LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
LOperand* array = NULL;
LOperand* temporary = NULL;
LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LJSArrayLength(array));
}
if (instr->value()->IsLoadElements()) {
array = UseRegisterAtStart(instr->value());
} else {
array = UseRegister(instr->value());
temporary = TempRegister();
}
LInstruction* result = new LArrayLength(array, temporary);
return AssignEnvironment(DefineAsRegister(result));
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LFixedArrayLength(array));
}
@ -1691,7 +1459,7 @@ LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
Use(instr->length())));
UseRegister(instr->length())));
}
@ -1771,18 +1539,15 @@ LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
LInstruction* result = new LCheckInstanceType(value, temp);
LInstruction* result = new LCheckInstanceType(value);
return AssignEnvironment(result);
}
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LOperand* temp = TempRegister();
LInstruction* result =
new LCheckPrototypeMaps(temp,
instr->holder(),
instr->receiver_map());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(result);
}
@ -1822,7 +1587,7 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
} else {
Abort("unsupported constant of type double");
UNREACHABLE();
return NULL;
}
}
@ -1841,6 +1606,11 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
}
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
return DefineAsRegister(new LLoadContextSlot);
}
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(
new LLoadNamedField(UseRegisterAtStart(instr->object())));
@ -1854,6 +1624,13 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
}
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
new LLoadFunctionPrototype(UseRegister(instr->function()))));
}
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineSameAsFirst(new LLoadElements(input));
@ -1862,23 +1639,12 @@ LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
HLoadKeyedFastElement* instr) {
Representation r = instr->representation();
LOperand* obj = UseRegisterAtStart(instr->object());
ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* load_result = NULL;
// Double needs an extra temp, because the result is converted from heap
// number to a double register.
if (r.IsDouble()) load_result = TempRegister();
LInstruction* result = new LLoadKeyedFastElement(obj,
key,
load_result);
if (r.IsDouble()) {
result = DefineAsRegister(result);
} else {
result = DefineSameAsFirst(result);
}
return AssignEnvironment(result);
LInstruction* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineSameAsFirst(result));
}
@ -1925,7 +1691,7 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = !instr->value()->type().IsSmi();
bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* obj = needs_write_barrier
? UseTempRegister(instr->object())
@ -1935,19 +1701,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
? UseTempRegister(instr->value())
: UseRegister(instr->value());
// We only need a scratch register if we have a write barrier or we
// have a store into the properties array (not in-object-property).
LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
? TempRegister() : NULL;
return new LStoreNamedField(obj,
instr->name(),
val,
instr->is_in_object(),
instr->offset(),
temp,
needs_write_barrier,
instr->transition());
return new LStoreNamedField(obj, val);
}
@ -1955,7 +1709,7 @@ LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r1);
LOperand* val = UseFixed(instr->value(), r0);
LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
LInstruction* result = new LStoreNamedGeneric(obj, val);
return MarkAsCall(result, instr);
}
@ -1981,8 +1735,9 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LInstruction* result = new LDeleteProperty(Use(instr->object()),
UseOrConstant(instr->key()));
LOperand* object = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LInstruction* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -2022,14 +1777,14 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = Use(instr->index());
LOperand* index = UseRegister(instr->index());
LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
return DefineAsRegister(AssignEnvironment(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LInstruction* result = new LTypeof(Use(instr->value()));
LInstruction* result = new LTypeof(UseRegisterAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -2054,13 +1809,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
}
}
if (FLAG_trace_environment) {
PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
instr->ast_id(),
instr->id());
env->PrintToStd();
}
ASSERT(env->values()->length() == instr->environment_height());
ASSERT(env->length() == instr->environment_length());
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
@ -2102,21 +1851,4 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
}
void LPointerMap::RecordPointer(LOperand* op) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
pointer_operands_.Add(op);
}
void LPointerMap::PrintTo(StringStream* stream) const {
stream->Add("{");
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
stream->Add("} @%d", position());
}
} } // namespace v8::internal

416
deps/v8/src/arm/lithium-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -30,6 +30,7 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
namespace v8 {
@ -37,8 +38,6 @@ namespace internal {
// Forward declarations.
class LCodeGen;
class LEnvironment;
class Translation;
// Type hierarchy:
@ -62,6 +61,7 @@ class Translation;
// LDivI
// LInstanceOf
// LInstanceOfAndBranch
// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
// LModI
@ -76,6 +76,7 @@ class Translation;
// LCallNamed
// LCallRuntime
// LCallStub
// LCheckPrototypeMaps
// LConstant
// LConstantD
// LConstantI
@ -85,7 +86,8 @@ class Translation;
// LGlobalObject
// LGlobalReceiver
// LLabel
// LLayzBailout
// LLazyBailout
// LLoadContextSlot
// LLoadGlobal
// LMaterializedLiteral
// LArrayLiteral
@ -101,14 +103,14 @@ class Translation;
// LStoreNamedField
// LStoreNamedGeneric
// LUnaryOperation
// LArrayLength
// LJSArrayLength
// LFixedArrayLength
// LBitNotI
// LBranch
// LCallNew
// LCheckFunction
// LCheckInstanceType
// LCheckMap
// LCheckPrototypeMaps
// LCheckSmi
// LClassOfTest
// LClassOfTestAndBranch
@ -127,6 +129,7 @@ class Translation;
// LIsSmiAndBranch
// LLoadNamedField
// LLoadNamedGeneric
// LLoadFunctionPrototype
// LNumberTagD
// LNumberTagI
// LPushArgument
@ -161,7 +164,6 @@ class Translation;
V(ArgumentsLength) \
V(ArithmeticD) \
V(ArithmeticT) \
V(ArrayLength) \
V(ArrayLiteral) \
V(BitI) \
V(BitNotI) \
@ -195,6 +197,7 @@ class Translation;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GlobalObject) \
@ -202,6 +205,7 @@ class Translation;
V(Goto) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(Integer32ToDouble) \
V(IsNull) \
V(IsNullAndBranch) \
@ -209,6 +213,7 @@ class Translation;
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(JSArrayLength) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(HasCachedArrayIndex) \
@ -217,12 +222,14 @@ class Translation;
V(ClassOfTestAndBranch) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -325,53 +332,6 @@ class LInstruction: public ZoneObject {
};
class LGapNode;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
const ZoneList<LMoveOperands>* ResolveInReverseOrder();
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
LOperand* marker_operand_;
int next_visited_id_;
int bailout_after_ast_id_;
};
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
void AddMove(LOperand* from, LOperand* to) {
move_operands_.Add(LMoveOperands(from, to));
}
bool IsRedundant() const;
const ZoneList<LMoveOperands>* move_operands() const {
return &move_operands_;
}
void PrintDataTo(StringStream* stream) const;
private:
ZoneList<LMoveOperands> move_operands_;
};
class LGap: public LInstruction {
public:
explicit LGap(HBasicBlock* block)
@ -485,6 +445,10 @@ class LCallStub: public LInstruction {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
TranscendentalCache::Type transcendental_type() {
return hydrogen()->transcendental_type();
}
};
@ -621,29 +585,26 @@ class LMulI: public LBinaryOperation {
class LCmpID: public LBinaryOperation {
public:
LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
: LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
LCmpID(LOperand* left, LOperand* right)
: LBinaryOperation(left, right) { }
Token::Value op() const { return op_; }
bool is_double() const { return is_double_; }
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
private:
Token::Value op_;
bool is_double_;
DECLARE_HYDROGEN_ACCESSOR(Compare)
};
class LCmpIDAndBranch: public LCmpID {
public:
LCmpIDAndBranch(Token::Value op,
LOperand* left,
LCmpIDAndBranch(LOperand* left,
LOperand* right,
int true_block_id,
int false_block_id,
bool is_double)
: LCmpID(op, left, right, is_double),
int false_block_id)
: LCmpID(left, right),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@ -662,14 +623,18 @@ class LCmpIDAndBranch: public LCmpID {
class LUnaryMathOperation: public LUnaryOperation {
public:
explicit LUnaryMathOperation(LOperand* value)
: LUnaryOperation(value) { }
explicit LUnaryMathOperation(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
virtual void PrintDataTo(StringStream* stream) const;
BuiltinFunctionId op() const { return hydrogen()->op(); }
LOperand* temp() const { return temp_; }
private:
LOperand* temp_;
};
@ -706,27 +671,21 @@ class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
class LIsNull: public LUnaryOperation {
public:
LIsNull(LOperand* value, bool is_strict)
: LUnaryOperation(value), is_strict_(is_strict) {}
explicit LIsNull(LOperand* value) : LUnaryOperation(value) {}
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
DECLARE_HYDROGEN_ACCESSOR(IsNull);
bool is_strict() const { return is_strict_; }
private:
bool is_strict_;
bool is_strict() const { return hydrogen()->is_strict(); }
};
class LIsNullAndBranch: public LIsNull {
public:
LIsNullAndBranch(LOperand* value,
bool is_strict,
LOperand* temp,
int true_block_id,
int false_block_id)
: LIsNull(value, is_strict),
temp_(temp),
: LIsNull(value),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@ -737,10 +696,7 @@ class LIsNullAndBranch: public LIsNull {
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temp() const { return temp_; }
private:
LOperand* temp_;
int true_block_id_;
int false_block_id_;
};
@ -835,11 +791,9 @@ class LHasInstanceType: public LUnaryOperation {
class LHasInstanceTypeAndBranch: public LHasInstanceType {
public:
LHasInstanceTypeAndBranch(LOperand* value,
LOperand* temporary,
int true_block_id,
int false_block_id)
: LHasInstanceType(value),
temp_(temporary),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@ -851,10 +805,7 @@ class LHasInstanceTypeAndBranch: public LHasInstanceType {
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temp() { return temp_; }
private:
LOperand* temp_;
int true_block_id_;
int false_block_id_;
};
@ -894,18 +845,12 @@ class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
class LClassOfTest: public LUnaryOperation {
public:
LClassOfTest(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temporary_(temp) {}
explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
virtual void PrintDataTo(StringStream* stream) const;
LOperand* temporary() { return temporary_; }
private:
LOperand *temporary_;
};
@ -913,11 +858,10 @@ class LClassOfTestAndBranch: public LClassOfTest {
public:
LClassOfTestAndBranch(LOperand* value,
LOperand* temporary,
LOperand* temporary2,
int true_block_id,
int false_block_id)
: LClassOfTest(value, temporary),
temporary2_(temporary2),
: LClassOfTest(value),
temporary_(temporary),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
@ -928,10 +872,10 @@ class LClassOfTestAndBranch: public LClassOfTest {
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temporary2() { return temporary2_; }
LOperand* temporary() { return temporary_; }
private:
LOperand* temporary2_;
LOperand* temporary_;
int true_block_id_;
int false_block_id_;
};
@ -999,6 +943,19 @@ class LInstanceOfAndBranch: public LInstanceOf {
};
class LInstanceOfKnownGlobal: public LUnaryOperation {
public:
explicit LInstanceOfKnownGlobal(LOperand* left)
: LUnaryOperation(left) { }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
Handle<JSFunction> function() const { return hydrogen()->function(); }
};
class LBoundsCheck: public LBinaryOperation {
public:
LBoundsCheck(LOperand* index, LOperand* length)
@ -1117,42 +1074,43 @@ class LBranch: public LUnaryOperation {
class LCmpMapAndBranch: public LUnaryOperation {
public:
LCmpMapAndBranch(LOperand* value,
Handle<Map> map,
int true_block_id,
int false_block_id)
: LUnaryOperation(value),
map_(map),
true_block_id_(true_block_id),
false_block_id_(false_block_id) { }
LCmpMapAndBranch(LOperand* value, LOperand* temp)
: LUnaryOperation(value), temp_(temp) { }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return map_; }
int true_block_id() const { return true_block_id_; }
int false_block_id() const { return false_block_id_; }
LOperand* temp() const { return temp_; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
return hydrogen()->true_destination()->block_id();
}
int false_block_id() const {
return hydrogen()->false_destination()->block_id();
}
private:
Handle<Map> map_;
int true_block_id_;
int false_block_id_;
LOperand* temp_;
};
class LArrayLength: public LUnaryOperation {
class LJSArrayLength: public LUnaryOperation {
public:
LArrayLength(LOperand* input, LOperand* temporary)
: LUnaryOperation(input), temporary_(temporary) { }
explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { }
LOperand* temporary() const { return temporary_; }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
private:
LOperand* temporary_;
class LFixedArrayLength: public LUnaryOperation {
public:
explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { }
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
};
@ -1256,6 +1214,18 @@ class LLoadNamedGeneric: public LUnaryOperation {
};
class LLoadFunctionPrototype: public LUnaryOperation {
public:
explicit LLoadFunctionPrototype(LOperand* function)
: LUnaryOperation(function) { }
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
LOperand* function() const { return input(); }
};
class LLoadElements: public LUnaryOperation {
public:
explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
@ -1266,21 +1236,14 @@ class LLoadElements: public LUnaryOperation {
class LLoadKeyedFastElement: public LBinaryOperation {
public:
LLoadKeyedFastElement(LOperand* elements,
LOperand* key,
LOperand* load_result)
: LBinaryOperation(elements, key),
load_result_(load_result) { }
LLoadKeyedFastElement(LOperand* elements, LOperand* key)
: LBinaryOperation(elements, key) { }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
LOperand* elements() const { return left(); }
LOperand* key() const { return right(); }
LOperand* load_result() const { return load_result_; }
private:
LOperand* load_result_;
};
@ -1312,6 +1275,20 @@ class LStoreGlobal: public LUnaryOperation {
};
class LLoadContextSlot: public LInstruction {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
int context_chain_length() const {
return hydrogen()->context_chain_length();
}
int slot_index() const { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LUnaryOperation {
public:
explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
@ -1516,67 +1493,46 @@ class LSmiUntag: public LUnaryOperation {
class LStoreNamed: public LInstruction {
public:
LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
: object_(obj), name_(name), value_(val) { }
LStoreNamed(LOperand* obj, LOperand* val)
: object_(obj), value_(val) { }
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
virtual void PrintDataTo(StringStream* stream) const;
LOperand* object() const { return object_; }
Handle<Object> name() const { return name_; }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() const { return value_; }
private:
LOperand* object_;
Handle<Object> name_;
LOperand* value_;
};
class LStoreNamedField: public LStoreNamed {
public:
LStoreNamedField(LOperand* obj,
Handle<Object> name,
LOperand* val,
bool in_object,
int offset,
LOperand* temp,
bool needs_write_barrier,
Handle<Map> transition)
: LStoreNamed(obj, name, val),
is_in_object_(in_object),
offset_(offset),
temp_(temp),
needs_write_barrier_(needs_write_barrier),
transition_(transition) { }
LStoreNamedField(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
bool is_in_object() { return is_in_object_; }
int offset() { return offset_; }
LOperand* temp() { return temp_; }
bool needs_write_barrier() { return needs_write_barrier_; }
Handle<Map> transition() const { return transition_; }
void set_transition(Handle<Map> map) { transition_ = map; }
private:
bool is_in_object_;
int offset_;
LOperand* temp_;
bool needs_write_barrier_;
Handle<Map> transition_;
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LStoreNamed {
public:
LStoreNamedGeneric(LOperand* obj,
Handle<Object> name,
LOperand* val)
: LStoreNamed(obj, name, val) { }
LStoreNamedGeneric(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
@ -1631,8 +1587,7 @@ class LCheckFunction: public LUnaryOperation {
class LCheckInstanceType: public LUnaryOperation {
public:
LCheckInstanceType(LOperand* use, LOperand* temp)
: LUnaryOperation(use), temp_(temp) { }
explicit LCheckInstanceType(LOperand* use) : LUnaryOperation(use) { }
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
@ -1655,23 +1610,21 @@ class LCheckMap: public LUnaryOperation {
class LCheckPrototypeMaps: public LInstruction {
public:
LCheckPrototypeMaps(LOperand* temp,
Handle<JSObject> holder,
Handle<Map> receiver_map)
: temp_(temp),
holder_(holder),
receiver_map_(receiver_map) { }
LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)
: temp1_(temp1), temp2_(temp2) { }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
LOperand* temp() const { return temp_; }
Handle<JSObject> holder() const { return holder_; }
Handle<Map> receiver_map() const { return receiver_map_; }
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
LOperand* temp1() const { return temp1_; }
LOperand* temp2() const { return temp2_; }
private:
LOperand* temp_;
Handle<JSObject> holder_;
Handle<Map> receiver_map_;
LOperand* temp1_;
LOperand* temp2_;
};
@ -1811,108 +1764,6 @@ class LStackCheck: public LInstruction {
};
class LPointerMap: public ZoneObject {
public:
explicit LPointerMap(int position)
: pointer_operands_(8), position_(position), lithium_position_(-1) { }
const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
ASSERT(lithium_position_ == -1);
lithium_position_ = pos;
}
void RecordPointer(LOperand* op);
void PrintTo(StringStream* stream) const;
private:
ZoneList<LOperand*> pointer_operands_;
int position_;
int lithium_position_;
};
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
parameter_count_(parameter_count),
values_(value_count),
representations_(value_count),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
outer_(outer) {
}
Handle<JSFunction> closure() const { return closure_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
void AddValue(LOperand* operand, Representation representation) {
values_.Add(operand);
representations_.Add(representation);
}
bool HasTaggedValueAt(int index) const {
return representations_[index].IsTagged();
}
void Register(int deoptimization_index, int translation_index) {
ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
void SetSpilledRegisters(LOperand** registers,
LOperand** double_registers) {
spilled_registers_ = registers;
spilled_double_registers_ = double_registers;
}
// Emit frame translation commands for this environment.
void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
void PrintTo(StringStream* stream) const;
private:
Handle<JSFunction> closure_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
int ast_id_;
int parameter_count_;
ZoneList<LOperand*> values_;
ZoneList<Representation> representations_;
// Allocation index indexed arrays of spill slot operands for registers
// that are also in spill slots at an OSR entry. NULL for environments
// that do not correspond to an OSR entry.
LOperand** spilled_registers_;
LOperand** spilled_double_registers_;
LEnvironment* outer_;
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
@ -2051,7 +1902,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* Define(LInstruction* instr);
LInstruction* DefineAsRegister(LInstruction* instr);
LInstruction* DefineAsSpilled(LInstruction* instr, int index);
LInstruction* DefineSameAsAny(LInstruction* instr);
LInstruction* DefineSameAsFirst(LInstruction* instr);
LInstruction* DefineFixed(LInstruction* instr, Register reg);
LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg);
@ -2074,8 +1924,6 @@ class LChunkBuilder BASE_EMBEDDED {
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
// Temporary operand that may be a memory location.
LOperand* Temp();
// Temporary operand that must be in a register.
LUnallocated* TempRegister();
LOperand* FixedTemp(Register reg);

1262
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

39
deps/v8/src/arm/lithium-codegen-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -39,8 +39,30 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
@ -71,6 +93,7 @@ class LCodeGen BASE_EMBEDDED {
void FinishCode(Handle<Code> code);
// Deferred code support.
void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
@ -80,6 +103,9 @@ class LCodeGen BASE_EMBEDDED {
// Parallel move support.
void DoParallelMove(LParallelMove* move);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@ -103,6 +129,9 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -147,7 +176,7 @@ class LCodeGen BASE_EMBEDDED {
int arity,
LInstruction* instr);
void LoadPrototype(Register result, Handle<JSObject> prototype);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@ -192,6 +221,9 @@ class LCodeGen BASE_EMBEDDED {
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
int arguments,
int deoptimization_index);
void RecordPosition(int position);
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
@ -237,6 +269,9 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;

84
deps/v8/src/arm/macro-assembler-arm.cc

@ -466,6 +466,25 @@ void MacroAssembler::PopSafepointRegisters() {
}
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
kDoubleSize));
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
kDoubleSize));
PopSafepointRegisters();
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@ -519,6 +538,49 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
const Register scratch,
const Condition cond) {
vmrs(scratch, cond);
bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
vmsr(scratch, cond);
}
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const double src2,
const Condition cond) {
// Compare and move FPSCR flags to the normal condition flags.
VFPCompareAndLoadFlags(src1, src2, pc, cond);
}
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond) {
// Compare and load FPSCR.
vcmp(src1, src2, cond);
vmrs(fpscr_flags, cond);
}
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
const double src2,
const Register fpscr_flags,
const Condition cond) {
// Compare and load FPSCR.
vcmp(src1, src2, cond);
vmrs(fpscr_flags, cond);
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@ -675,7 +737,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
Label regular_invoke;
@ -731,6 +794,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
Call(adaptor, RelocInfo::CODE_TARGET);
if (post_call_generator != NULL) post_call_generator->Generate();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
@ -743,12 +807,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code);
if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
@ -782,7 +849,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
// Contract with called JS functions requires that function is passed in r1.
ASSERT(fun.is(r1));
@ -799,7 +867,7 @@ void MacroAssembler::InvokeFunction(Register fun,
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
InvokeCode(code_reg, expected, actual, flag);
InvokeCode(code_reg, expected, actual, flag, post_call_generator);
}
@ -1669,10 +1737,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags) {
InvokeJSFlags flags,
PostCallGenerator* post_call_generator) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
Call(r2);
if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
@ -1795,7 +1865,7 @@ void MacroAssembler::Abort(const char* msg) {
}
#endif
// Disable stub call restrictions to always allow calls to abort.
set_allow_stub_calls(true);
AllowStubCallsScope allow_scope(this, true);
mov(r0, Operand(p0));
push(r0);

66
deps/v8/src/arm/macro-assembler-arm.h

@ -33,6 +33,9 @@
namespace v8 {
namespace internal {
// Forward declaration.
class PostCallGenerator;
// ----------------------------------------------------------------------------
// Static helper functions
@ -229,6 +232,9 @@ class MacroAssembler: public Assembler {
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
static int SafepointRegisterStackIndex(int reg_code);
// Load two consecutive registers with two consecutive memory locations.
@ -243,6 +249,30 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// Clear specified FPSCR bits.
void ClearFPSCRBits(const uint32_t bits_to_clear,
const Register scratch,
const Condition cond = al);
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const double src2,
const Condition cond = al);
// Compare double values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Register fpscr_flags,
const Condition cond = al);
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
const double src2,
const Register fpscr_flags,
const Condition cond = al);
// ---------------------------------------------------------------------------
// Activation frames
@ -281,7 +311,8 @@ class MacroAssembler: public Assembler {
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
@ -293,7 +324,8 @@ class MacroAssembler: public Assembler {
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
@ -379,12 +411,13 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Allocation support
// Allocate an object in new space. The object_size is specified in words (not
// bytes). If the new space is exhausted control continues at the gc_required
// label. The allocated object is returned in result. If the flag
// tag_allocated_object is true the result is tagged as as a heap object. All
// registers are clobbered also when control continues at the gc_required
// label.
// Allocate an object in new space. The object_size is specified
// either in bytes or in words if the allocation flag SIZE_IN_WORDS
// is passed. If the new space is exhausted control continues at the
// gc_required label. The allocated object is returned in result. If
// the flag tag_allocated_object is true the result is tagged as as
// a heap object. All registers are clobbered also when control
// continues at the gc_required label.
void AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
@ -633,7 +666,9 @@ class MacroAssembler: public Assembler {
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator = NULL);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
@ -684,6 +719,16 @@ class MacroAssembler: public Assembler {
add(reg, reg, Operand(reg), s);
}
// Try to convert int32 to smi. If the value is to large, preserve
// the original value and jump to not_a_smi. Destroys scratch and
// sets flags.
void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
mov(scratch, reg);
SmiTag(scratch, SetCC);
b(vs, not_a_smi);
mov(reg, scratch);
}
void SmiUntag(Register reg) {
mov(reg, Operand(reg, ASR, kSmiTagSize));
}
@ -741,7 +786,8 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_reg,
Label* done,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
// Activation support.
void EnterFrame(StackFrame::Type type);

10
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -417,8 +417,8 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
int reg2,
Label* on_not_equal) {
int reg2,
Label* on_not_equal) {
__ ldr(r0, register_location(reg1));
__ ldr(r1, register_location(reg2));
__ cmp(r0, r1);
@ -426,7 +426,7 @@ void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
}
void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c,
void RegExpMacroAssemblerARM::CheckNotCharacter(unsigned c,
Label* on_not_equal) {
__ cmp(current_character(), Operand(c));
BranchOrBacktrack(ne, on_not_equal);
@ -442,8 +442,8 @@ void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
}
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal) {
__ and_(r0, current_character(), Operand(mask));
__ cmp(r0, Operand(c));

12
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -50,9 +50,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void Backtrack();
virtual void Bind(Label* label);
virtual void CheckAtStart(Label* on_at_start);
virtual void CheckCharacter(uint32_t c, Label* on_equal);
virtual void CheckCharacterAfterAnd(uint32_t c,
uint32_t mask,
virtual void CheckCharacter(unsigned c, Label* on_equal);
virtual void CheckCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_equal);
virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
virtual void CheckCharacterLT(uc16 limit, Label* on_less);
@ -68,9 +68,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
Label* on_no_match);
virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
uint32_t mask,
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
unsigned mask,
Label* on_not_equal);
virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
uc16 minus,

12
deps/v8/src/arm/simulator-arm.cc

@ -2600,11 +2600,6 @@ void Simulator::DecodeVCMP(Instr* instr) {
precision = kDoublePrecision;
}
if (instr->Bit(7) != 0) {
// Raising exceptions for quiet NaNs are not supported.
UNIMPLEMENTED(); // Not used by V8.
}
int d = instr->VFPDRegCode(precision);
int m = 0;
if (instr->Opc2Field() == 0x4) {
@ -2618,6 +2613,13 @@ void Simulator::DecodeVCMP(Instr* instr) {
dm_value = get_double_from_d_register(m);
}
// Raise exceptions for quiet NaNs if necessary.
if (instr->Bit(7) == 1) {
if (isnan(dd_value)) {
inv_op_vfp_flag_ = true;
}
}
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
UNIMPLEMENTED(); // Not used by V8.

6
deps/v8/src/arm/stub-cache-arm.cc

@ -1952,7 +1952,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ cmp(r7, Operand(HeapNumber::kMantissaBits));
// If greater or equal, the argument is already round and in r0.
__ b(&restore_fpscr_and_return, ge);
__ b(&slow);
__ b(&wont_fit_smi);
__ bind(&no_vfp_exception);
// Move the result back to general purpose register r0.
@ -1965,7 +1965,7 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Check for -0.
__ cmp(r0, Operand(0));
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ b(&restore_fpscr_and_return, ne);
// r5 already holds the HeapNumber exponent.
__ tst(r5, Operand(HeapNumber::kSignMask));
@ -1980,10 +1980,10 @@ MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
__ Ret();
__ bind(&wont_fit_smi);
__ bind(&slow);
// Restore FPCSR and fall to slow case.
__ vmsr(r3);
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);

50
deps/v8/src/array.js

@ -117,41 +117,59 @@ function Join(array, length, separator, convert) {
// Fast case for one-element arrays.
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
if (IS_STRING(e)) return e;
return convert(e);
}
if (IS_STRING(e)) return e;
return convert(e);
}
// Construct an array for the elements.
var elements;
var elements_length = 0;
var elements = new $Array(length);
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
elements = new $Array(length);
var elements_length = 0;
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e) || (i in array)) {
if (!IS_UNDEFINED(e)) {
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
}
}
elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, '');
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, '');
}
// Non-empty separator case.
// If the first element is a number then use the heuristic that the
// remaining elements are also likely to be numbers.
if (!IS_NUMBER(array[0])) {
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_STRING(e)) e = convert(e);
elements[i] = e;
}
} else {
elements = new $Array(length << 1);
for (var i = 0; i < length; i++) {
var e = array[i];
if (i != 0) elements[elements_length++] = separator;
if (!IS_UNDEFINED(e) || (i in array)) {
if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
else {
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
elements[i] = e;
}
}
}
elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, "");
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, '');
var length2 = (length << 1) - 1;
var j = length2;
var i = length;
elements[--j] = elements[--i];
while (i > 0) {
elements[--j] = separator;
elements[--j] = elements[--i];
}
return %StringBuilderConcat(elements, length2, '');
} finally {
// Make sure to pop the visited array no matter what happens.
if (is_array) visited_arrays.pop();
@ -160,7 +178,7 @@ function Join(array, length, separator, convert) {
function ConvertToString(x) {
if (IS_STRING(x)) return x;
// Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));

12
deps/v8/src/assembler.cc

@ -66,6 +66,7 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
@ -647,6 +648,11 @@ ExternalReference ExternalReference::the_hole_value_location() {
}
ExternalReference ExternalReference::arguments_marker_location() {
return ExternalReference(Factory::arguments_marker().location());
}
ExternalReference ExternalReference::roots_address() {
return ExternalReference(Heap::roots_address());
}
@ -724,6 +730,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::minus_zero)));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));

5
deps/v8/src/assembler.h

@ -50,6 +50,7 @@ class DoubleConstant: public AllStatic {
public:
static const double min_int;
static const double one_half;
static const double minus_zero;
static const double negative_infinity;
};
@ -512,6 +513,9 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location();
// Static variable Heap::roots_address()
static ExternalReference roots_address();
@ -552,6 +556,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_zero();
static ExternalReference address_of_negative_infinity();
Address address() const {return reinterpret_cast<Address>(address_);}

34
deps/v8/src/ast.cc

@ -166,12 +166,6 @@ bool FunctionLiteral::AllowsLazyCompilation() {
}
bool FunctionLiteral::AllowOptimize() {
// We can't deal with heap-allocated locals.
return scope()->num_heap_slots() == 0;
}
ObjectLiteral::Property::Property(Literal* key, Expression* value) {
emit_store_ = true;
key_ = key;
@ -215,12 +209,16 @@ bool ObjectLiteral::Property::emit_store() {
bool IsEqualString(void* first, void* second) {
ASSERT((*reinterpret_cast<String**>(first))->IsString());
ASSERT((*reinterpret_cast<String**>(second))->IsString());
Handle<String> h1(reinterpret_cast<String**>(first));
Handle<String> h2(reinterpret_cast<String**>(second));
return (*h1)->Equals(*h2);
}
bool IsEqualSmi(void* first, void* second) {
ASSERT((*reinterpret_cast<Smi**>(first))->IsSmi());
ASSERT((*reinterpret_cast<Smi**>(second))->IsSmi());
Handle<Smi> h1(reinterpret_cast<Smi**>(first));
Handle<Smi> h2(reinterpret_cast<Smi**>(second));
return (*h1)->value() == (*h2)->value();
@ -266,12 +264,12 @@ void ObjectLiteral::CalculateEmitStore() {
// If the key of a computed property is in the table, do not emit
// a store for the property later.
if (property->kind() == ObjectLiteral::Property::COMPUTED) {
if (table->Lookup(literal, hash, false) != NULL) {
if (table->Lookup(key, hash, false) != NULL) {
property->set_emit_store(false);
}
}
// Add key to the table.
table->Lookup(literal, hash, true);
table->Lookup(key, hash, true);
}
}
@ -517,6 +515,9 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
Builtins::LoadIC_FunctionPrototype)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
@ -638,10 +639,19 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
}
#endif
if (receiver_types_ != NULL && receiver_types_->length() > 0) {
Handle<Map> type = receiver_types_->at(0);
is_monomorphic_ = oracle->CallIsMonomorphic(this);
if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
is_monomorphic_ = oracle->CallIsMonomorphic(this);
check_type_ = oracle->GetCallCheckType(this);
if (is_monomorphic_) {
Handle<Map> map;
if (receiver_types_ != NULL && receiver_types_->length() > 0) {
ASSERT(check_type_ == RECEIVER_MAP_CHECK);
map = receiver_types_->at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
map = Handle<Map>(
oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
}
is_monomorphic_ = ComputeTarget(map, name);
}
}

10
deps/v8/src/ast.h

@ -1208,6 +1208,7 @@ class Property: public Expression {
is_monomorphic_(false),
receiver_types_(NULL),
is_array_length_(false),
is_function_prototype_(false),
is_arguments_access_(false) { }
DECLARE_NODE_TYPE(Property)
@ -1220,6 +1221,8 @@ class Property: public Expression {
int position() const { return pos_; }
bool is_synthetic() const { return type_ == SYNTHETIC; }
bool IsFunctionPrototype() const { return is_function_prototype_; }
// Marks that this is actually an argument rewritten to a keyed property
// accessing the argument through the arguments shadow object.
void set_is_arguments_access(bool is_arguments_access) {
@ -1249,6 +1252,7 @@ class Property: public Expression {
bool is_monomorphic_;
ZoneMapList* receiver_types_;
bool is_array_length_;
bool is_function_prototype_;
bool is_arguments_access_;
Handle<Map> monomorphic_receiver_type_;
@ -1264,6 +1268,7 @@ class Call: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
check_type_(RECEIVER_MAP_CHECK),
receiver_types_(NULL),
return_id_(GetNextId()) {
}
@ -1279,6 +1284,7 @@ class Call: public Expression {
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
virtual bool IsMonomorphic() { return is_monomorphic_; }
CheckType check_type() const { return check_type_; }
Handle<JSFunction> target() { return target_; }
Handle<JSObject> holder() { return holder_; }
Handle<JSGlobalPropertyCell> cell() { return cell_; }
@ -1302,6 +1308,7 @@ class Call: public Expression {
int pos_;
bool is_monomorphic_;
CheckType check_type_;
ZoneMapList* receiver_types_;
Handle<JSFunction> target_;
Handle<JSObject> holder_;
@ -1391,7 +1398,7 @@ class BinaryOperation: public Expression {
: op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
ASSERT(Token::IsBinaryOp(op));
right_id_ = (op == Token::AND || op == Token::OR)
? GetNextId()
? static_cast<int>(GetNextId())
: AstNode::kNoNumber;
}
@ -1710,7 +1717,6 @@ class FunctionLiteral: public Expression {
int num_parameters() { return num_parameters_; }
bool AllowsLazyCompilation();
bool AllowOptimize();
Handle<String> debug_name() const {
if (name_->length() > 0) return name_;

67
deps/v8/src/builtins.cc

@ -380,7 +380,7 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Object* receiver) {
if (!receiver->IsJSArray()) return NULL;
JSArray* array = JSArray::cast(receiver);
HeapObject* elms = HeapObject::cast(array->elements());
HeapObject* elms = array->elements();
if (elms->map() == Heap::fixed_array_map()) return elms;
if (elms->map() == Heap::fixed_cow_array_map()) {
return array->EnsureWritableFastElements();
@ -613,41 +613,42 @@ BUILTIN(ArraySlice) {
Object* receiver = *args.receiver();
FixedArray* elms;
int len = -1;
{ MaybeObject* maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(receiver);
Object* elms_obj;
if (maybe_elms_obj != NULL && maybe_elms_obj->ToObject(&elms_obj)) {
if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
return CallJsBuiltin("ArraySlice", args);
}
elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
ASSERT(array->HasFastElements());
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
if (!array->HasFastElements() ||
!IsJSArrayFastElementMovingAllowed(array)) {
return CallJsBuiltin("ArraySlice", args);
}
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
Top::context()->global_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
// than 50% of invocations in Web apps). Treat it in C++ as well.
Map* arguments_map =
Top::context()->global_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
receiver->IsJSObject()
&& JSObject::cast(receiver)->map() == arguments_map
&& JSObject::cast(receiver)->HasFastElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin("ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
Object* len_obj = JSObject::cast(receiver)
->InObjectPropertyAt(Heap::arguments_length_index);
if (!len_obj->IsSmi()) {
return CallJsBuiltin("ArraySlice", args);
}
len = Smi::cast(len_obj)->value();
if (len > elms->length()) {
return CallJsBuiltin("ArraySlice", args);
}
for (int i = 0; i < len; i++) {
if (elms->get(i) == Heap::the_hole_value()) {
return CallJsBuiltin("ArraySlice", args);
}
elms = FixedArray::cast(JSObject::cast(receiver)->elements());
len = elms->length();
#ifdef DEBUG
// Arguments object by construction should have no holes, check it.
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < len; i++) {
ASSERT(elms->get(i) != Heap::the_hole_value());
}
}
#endif
}
}
ASSERT(len >= 0);

34
deps/v8/src/code-stubs.cc

@ -49,8 +49,10 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
// Nested stubs are not allowed for leafs.
masm->set_allow_stub_calls(AllowsStubCalls());
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
// Generate the code for the stub.
masm->set_generating_stub(true);
Generate(masm);
@ -197,4 +199,34 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
}
const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* args = "";
if (HasArgsInRegisters()) {
args = "_REGS";
}
const char* inline_check = "";
if (HasCallSiteInlineCheck()) {
inline_check = "_INLINE";
}
const char* return_true_false_object = "";
if (ReturnTrueFalseObject()) {
return_true_false_object = "_TRUEFALSE";
}
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"InstanceofStub%s%s%s",
args,
inline_check,
return_true_false_object);
return name_;
}
} } // namespace v8::internal

60
deps/v8/src/code-stubs.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,7 +34,7 @@ namespace v8 {
namespace internal {
// List of code stubs used on all platforms. The order in this list is important
// as only the stubs up to and including RecordWrite allows nested stub calls.
// as only the stubs up to and including Instanceof allows nested stub calls.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(GenericBinaryOp) \
@ -48,7 +48,7 @@ namespace internal {
V(CompareIC) \
V(MathPow) \
V(TranscendentalCache) \
V(RecordWrite) \
V(Instanceof) \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(IntegerMod) \
@ -59,7 +59,6 @@ namespace internal {
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
V(RegExpExec) \
@ -180,7 +179,7 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
bool AllowsStubCalls() { return MajorKey() <= RecordWrite; }
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
@ -327,22 +326,38 @@ class InstanceofStub: public CodeStub {
public:
enum Flags {
kNoFlags = 0,
kArgsInRegisters = 1 << 0
kArgsInRegisters = 1 << 0,
kCallSiteInlineCheck = 1 << 1,
kReturnTrueFalseObject = 1 << 2
};
explicit InstanceofStub(Flags flags) : flags_(flags) { }
explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { }
static Register left();
static Register right();
void Generate(MacroAssembler* masm);
private:
Major MajorKey() { return Instanceof; }
int MinorKey() { return args_in_registers() ? 1 : 0; }
int MinorKey() { return static_cast<int>(flags_); }
bool args_in_registers() {
bool HasArgsInRegisters() const {
return (flags_ & kArgsInRegisters) != 0;
}
bool HasCallSiteInlineCheck() const {
return (flags_ & kCallSiteInlineCheck) != 0;
}
bool ReturnTrueFalseObject() const {
return (flags_ & kReturnTrueFalseObject) != 0;
}
const char* GetName();
Flags flags_;
char* name_;
};
@ -707,6 +722,10 @@ class CallFunctionStub: public CodeStub {
void Generate(MacroAssembler* masm);
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
private:
int argc_;
InLoopFlag in_loop_;
@ -738,11 +757,6 @@ class CallFunctionStub: public CodeStub {
bool ReceiverMightBeValue() {
return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
}
public:
static int ExtractArgcFromMinorKey(int minor_key) {
return ArgcBits::decode(minor_key);
}
};
@ -902,6 +916,24 @@ class StringCharAtGenerator {
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
};
class AllowStubCallsScope {
public:
AllowStubCallsScope(MacroAssembler* masm, bool allow)
: masm_(masm), previous_allow_(masm->allow_stub_calls()) {
masm_->set_allow_stub_calls(allow);
}
~AllowStubCallsScope() {
masm_->set_allow_stub_calls(previous_allow_);
}
private:
MacroAssembler* masm_;
bool previous_allow_;
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
};
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_

23
deps/v8/src/compiler.cc

@ -92,6 +92,25 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
}
void CompilationInfo::DisableOptimization() {
if (FLAG_optimize_closures) {
// If we allow closures optimizations and it's an optimizable closure
// mark it correspondingly.
bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
if (is_closure) {
bool is_optimizable_closure =
!scope_->outer_scope_calls_eval() && !scope_->inside_with();
if (is_optimizable_closure) {
SetMode(BASE);
return;
}
}
}
SetMode(NONOPT);
}
// Determine whether to use the full compiler for all code. If the flag
// --always-full-compiler is specified this is the case. For the virtual frame
// based compiler the full compiler is also used if a debugger is connected, as
@ -262,7 +281,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HTracer::Instance()->TraceCompilation(info->function());
}
TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
TypeFeedbackOracle oracle(
Handle<Code>(info->shared_info()->code()),
Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);

6
deps/v8/src/compiler.h

@ -114,7 +114,7 @@ class CompilationInfo BASE_EMBEDDED {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
void DisableOptimization() { SetMode(NONOPT); }
void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
@ -125,9 +125,7 @@ class CompilationInfo BASE_EMBEDDED {
// Determine whether or not we can adaptively optimize.
bool AllowOptimize() {
return V8::UseCrankshaft() &&
!closure_.is_null() &&
function_->AllowOptimize();
return V8::UseCrankshaft() && !closure_.is_null();
}
private:

3
deps/v8/src/cpu-profiler.cc

@ -47,7 +47,8 @@ static const int kTickSamplesBufferChunksCount = 16;
ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
: generator_(generator),
: Thread("v8:ProfEvntProc"),
generator_(generator),
running_(true),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,

19
deps/v8/src/d8-debug.cc

@ -34,12 +34,21 @@
namespace v8 {
void PrintPrompt() {
printf("dbg> ");
static bool was_running = true;
void PrintPrompt(bool is_running) {
const char* prompt = is_running? "> " : "dbg> ";
was_running = is_running;
printf("%s", prompt);
fflush(stdout);
}
void PrintPrompt() {
PrintPrompt(was_running);
}
void HandleDebugEvent(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
@ -91,7 +100,7 @@ void HandleDebugEvent(DebugEvent event,
bool running = false;
while (!running) {
char command[kBufferSize];
PrintPrompt();
PrintPrompt(running);
char* str = fgets(command, kBufferSize, stdin);
if (str == NULL) break;
@ -284,7 +293,9 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
} else {
printf("???\n");
}
PrintPrompt();
bool is_running = details->Get(String::New("running"))->ToBoolean()->Value();
PrintPrompt(is_running);
}

6
deps/v8/src/d8-debug.h

@ -98,7 +98,8 @@ class RemoteDebugger {
class ReceiverThread: public i::Thread {
public:
explicit ReceiverThread(RemoteDebugger* remote_debugger)
: remote_debugger_(remote_debugger) {}
: Thread("d8:ReceiverThrd"),
remote_debugger_(remote_debugger) {}
~ReceiverThread() {}
void Run();
@ -112,7 +113,8 @@ class ReceiverThread: public i::Thread {
class KeyboardThread: public i::Thread {
public:
explicit KeyboardThread(RemoteDebugger* remote_debugger)
: remote_debugger_(remote_debugger) {}
: Thread("d8:KeyboardThrd"),
remote_debugger_(remote_debugger) {}
~KeyboardThread() {}
void Run();

3
deps/v8/src/d8.cc

@ -599,7 +599,8 @@ void Shell::RunShell() {
class ShellThread : public i::Thread {
public:
ShellThread(int no, i::Vector<const char> files)
: no_(no), files_(files) { }
: Thread("d8:ShellThread"),
no_(no), files_(files) { }
virtual void Run();
private:
int no_;

611
deps/v8/src/d8.js

@ -110,17 +110,32 @@ Debug.ScopeType = { Global: 0,
const kNoFrame = -1;
Debug.State = {
currentFrame: kNoFrame,
displaySourceStartLine: -1,
displaySourceEndLine: -1,
currentSourceLine: -1
}
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd_line = '';
var repeat_cmd_line = '';
var is_running = true;
// Copied from debug-delay.js. This is needed below:
function ScriptTypeFlag(type) {
return (1 << type);
}
// Process a debugger JSON message into a display text and a running status.
// This function returns an object with properties "text" and "running" holding
// this information.
function DebugMessageDetails(message) {
if (trace_debug_json) {
print("received: '" + message + "'");
}
// Convert the JSON string to an object.
var response = new ProtocolPackage(message);
is_running = response.running();
if (response.type() == 'event') {
return DebugEventDetails(response);
@ -161,6 +176,8 @@ function DebugEventDetails(response) {
result += '\n';
result += SourceUnderline(body.sourceLineText, body.sourceColumn);
Debug.State.currentSourceLine = body.sourceLine;
Debug.State.displaySourceStartLine = -1;
Debug.State.displaySourceEndLine = -1;
Debug.State.currentFrame = 0;
details.text = result;
break;
@ -180,10 +197,14 @@ function DebugEventDetails(response) {
result += '\n';
result += SourceUnderline(body.sourceLineText, body.sourceColumn);
Debug.State.currentSourceLine = body.sourceLine;
Debug.State.displaySourceStartLine = -1;
Debug.State.displaySourceEndLine = -1;
Debug.State.currentFrame = 0;
} else {
result += ' (empty stack)';
Debug.State.currentSourceLine = -1;
Debug.State.displaySourceStartLine = -1;
Debug.State.displaySourceEndLine = -1;
Debug.State.currentFrame = kNoFrame;
}
details.text = result;
@ -202,6 +223,10 @@ function DebugEventDetails(response) {
details.text = result;
break;
case 'scriptCollected':
details.text = result;
break;
default:
details.text = 'Unknown debug event ' + response.event();
}
@ -254,7 +279,11 @@ function SourceUnderline(source_text, position) {
// Converts a text command to a JSON request.
function DebugCommandToJSONRequest(cmd_line) {
return new DebugRequest(cmd_line).JSONRequest();
var result = new DebugRequest(cmd_line).JSONRequest();
if (trace_debug_json && result) {
print("sending: '" + result + "'");
}
return result;
};
@ -266,6 +295,20 @@ function DebugRequest(cmd_line) {
return;
}
// Check for a simple carriage return to repeat the last command:
var is_repeating = false;
if (cmd_line == '\n') {
if (is_running) {
cmd_line = 'break'; // Not in debugger mode, break with a frame request.
} else {
cmd_line = repeat_cmd_line; // use command to repeat.
is_repeating = true;
}
}
if (!is_running) { // Only save the command if in debugger mode.
repeat_cmd_line = cmd_line; // save last command.
}
// Trim string for leading and trailing whitespace.
cmd_line = cmd_line.replace(/^\s+|\s+$/g, '');
@ -281,6 +324,13 @@ function DebugRequest(cmd_line) {
args = cmd_line.slice(pos).replace(/^\s+|\s+$/g, '');
}
if ((cmd === undefined) || !cmd) {
this.request_ = void 0;
return;
}
last_cmd = cmd;
// Switch on command.
switch (cmd) {
case 'continue':
@ -290,7 +340,22 @@ function DebugRequest(cmd_line) {
case 'step':
case 's':
this.request_ = this.stepCommandToJSONRequest_(args);
this.request_ = this.stepCommandToJSONRequest_(args, 'in');
break;
case 'stepi':
case 'si':
this.request_ = this.stepCommandToJSONRequest_(args, 'min');
break;
case 'next':
case 'n':
this.request_ = this.stepCommandToJSONRequest_(args, 'next');
break;
case 'finish':
case 'fin':
this.request_ = this.stepCommandToJSONRequest_(args, 'out');
break;
case 'backtrace':
@ -311,6 +376,26 @@ function DebugRequest(cmd_line) {
this.request_ = this.scopeCommandToJSONRequest_(args);
break;
case 'disconnect':
case 'exit':
case 'quit':
this.request_ = this.disconnectCommandToJSONRequest_(args);
break;
case 'up':
this.request_ =
this.frameCommandToJSONRequest_('' +
(Debug.State.currentFrame + 1));
break;
case 'down':
case 'do':
this.request_ =
this.frameCommandToJSONRequest_('' +
(Debug.State.currentFrame - 1));
break;
case 'set':
case 'print':
case 'p':
this.request_ = this.printCommandToJSONRequest_(args);
@ -328,11 +413,17 @@ function DebugRequest(cmd_line) {
this.request_ = this.instancesCommandToJSONRequest_(args);
break;
case 'list':
case 'l':
this.request_ = this.listCommandToJSONRequest_(args);
break;
case 'source':
this.request_ = this.sourceCommandToJSONRequest_(args);
break;
case 'scripts':
case 'script':
case 'scr':
this.request_ = this.scriptsCommandToJSONRequest_(args);
break;
@ -347,6 +438,8 @@ function DebugRequest(cmd_line) {
break;
case 'clear':
case 'delete':
case 'd':
this.request_ = this.clearCommandToJSONRequest_(args);
break;
@ -354,7 +447,42 @@ function DebugRequest(cmd_line) {
this.request_ = this.threadsCommandToJSONRequest_(args);
break;
case 'cond':
this.request_ = this.changeBreakpointCommandToJSONRequest_(args, 'cond');
break;
case 'enable':
case 'en':
this.request_ =
this.changeBreakpointCommandToJSONRequest_(args, 'enable');
break;
case 'disable':
case 'dis':
this.request_ =
this.changeBreakpointCommandToJSONRequest_(args, 'disable');
break;
case 'ignore':
this.request_ =
this.changeBreakpointCommandToJSONRequest_(args, 'ignore');
break;
case 'info':
case 'inf':
this.request_ = this.infoCommandToJSONRequest_(args);
break;
case 'flags':
this.request_ = this.v8FlagsToJSONRequest_(args);
break;
case 'gc':
this.request_ = this.gcToJSONRequest_(args);
break;
case 'trace':
case 'tr':
// Return undefined to indicate command handled internally (no JSON).
this.request_ = void 0;
this.traceCommand_(args);
@ -370,8 +498,6 @@ function DebugRequest(cmd_line) {
default:
throw new Error('Unknown command "' + cmd + '"');
}
last_cmd = cmd;
}
DebugRequest.prototype.JSONRequest = function() {
@ -465,59 +591,73 @@ DebugRequest.prototype.continueCommandToJSONRequest_ = function(args) {
// Create a JSON request for the step command.
DebugRequest.prototype.stepCommandToJSONRequest_ = function(args) {
DebugRequest.prototype.stepCommandToJSONRequest_ = function(args, type) {
// Requesting a step is through the continue command with additional
// arguments.
var request = this.createRequest('continue');
request.arguments = {};
// Process arguments if any.
// Only process args if the command is 'step' which is indicated by type being
// set to 'in'. For all other commands, ignore the args.
if (args && args.length > 0) {
args = args.split(/\s*[ ]+\s*/g);
args = args.split(/\s+/g);
if (args.length > 2) {
throw new Error('Invalid step arguments.');
}
if (args.length > 0) {
// Get step count argument if any.
if (args.length == 2) {
var stepcount = parseInt(args[1]);
if (isNaN(stepcount) || stepcount <= 0) {
throw new Error('Invalid step count argument "' + args[0] + '".');
// Check if we have a gdb stype step command. If so, the 1st arg would
// be the step count. If it's not a number, then assume that we're
// parsing for the legacy v8 step command.
var stepcount = Number(args[0]);
if (stepcount == Number.NaN) {
// No step count at arg 1. Process as legacy d8 step command:
if (args.length == 2) {
var stepcount = parseInt(args[1]);
if (isNaN(stepcount) || stepcount <= 0) {
throw new Error('Invalid step count argument "' + args[0] + '".');
}
request.arguments.stepcount = stepcount;
}
request.arguments.stepcount = stepcount;
}
// Get the step action.
switch (args[0]) {
case 'in':
case 'i':
request.arguments.stepaction = 'in';
break;
// Get the step action.
switch (args[0]) {
case 'in':
case 'i':
request.arguments.stepaction = 'in';
break;
case 'min':
case 'm':
request.arguments.stepaction = 'min';
break;
case 'min':
case 'm':
request.arguments.stepaction = 'min';
break;
case 'next':
case 'n':
request.arguments.stepaction = 'next';
break;
case 'next':
case 'n':
request.arguments.stepaction = 'next';
break;
case 'out':
case 'o':
request.arguments.stepaction = 'out';
break;
case 'out':
case 'o':
request.arguments.stepaction = 'out';
break;
default:
throw new Error('Invalid step argument "' + args[0] + '".');
default:
throw new Error('Invalid step argument "' + args[0] + '".');
}
} else {
// gdb style step commands:
request.arguments.stepaction = type;
request.arguments.stepcount = stepcount;
}
}
} else {
// Default is step next.
request.arguments.stepaction = 'next';
// Default is step of the specified type.
request.arguments.stepaction = type;
}
return request.toJSONProtocol();
@ -648,6 +788,41 @@ DebugRequest.prototype.instancesCommandToJSONRequest_ = function(args) {
};
// Create a JSON request for the list command.
DebugRequest.prototype.listCommandToJSONRequest_ = function(args) {
// Default is ten lines starting five lines before the current location.
if (Debug.State.displaySourceEndLine == -1) {
// If we list forwards, we will start listing after the last source end
// line. Set it to start from 5 lines before the current location.
Debug.State.displaySourceEndLine = Debug.State.currentSourceLine - 5;
// If we list backwards, we will start listing backwards from the last
// source start line. Set it to start from 1 lines before the current
// location.
Debug.State.displaySourceStartLine = Debug.State.currentSourceLine + 1;
}
var from = Debug.State.displaySourceEndLine + 1;
var lines = 10;
// Parse the arguments.
args = args.split(/\s*,\s*/g);
if (args == '') {
} else if ((args.length == 1) && (args[0] == '-')) {
from = Debug.State.displaySourceStartLine - lines;
} else if (args.length == 2) {
from = parseInt(args[0]);
lines = parseInt(args[1]) - from + 1; // inclusive of the ending line.
} else {
throw new Error('Invalid list arguments.');
}
Debug.State.displaySourceStartLine = from;
Debug.State.displaySourceEndLine = from + lines - 1;
var sourceArgs = '' + from + ' ' + lines;
return this.sourceCommandToJSONRequest_(sourceArgs);
};
// Create a JSON request for the source command.
DebugRequest.prototype.sourceCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
@ -709,7 +884,10 @@ DebugRequest.prototype.scriptsCommandToJSONRequest_ = function(args) {
break;
default:
throw new Error('Invalid argument "' + args[0] + '".');
// If the arg is not one of the know one aboves, then it must be a
// filter used for filtering the results:
request.arguments.filter = args[0];
break;
}
}
@ -731,6 +909,8 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
var request = this.createRequest('setbreakpoint');
// Break the args into target spec and condition if appropriate.
// Check for breakpoint condition.
pos = args.indexOf(' ');
if (pos > 0) {
@ -801,6 +981,178 @@ DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
};
// Create a JSON request for the change breakpoint command.
DebugRequest.prototype.changeBreakpointCommandToJSONRequest_ =
function(args, command) {
var request;
// Check for exception breaks first:
// en[able] exc[eptions] [all|unc[aught]]
// en[able] [all|unc[aught]] exc[eptions]
// dis[able] exc[eptions] [all|unc[aught]]
// dis[able] [all|unc[aught]] exc[eptions]
if ((command == 'enable' || command == 'disable') &&
args && args.length > 1) {
var nextPos = args.indexOf(' ');
var arg1 = (nextPos > 0) ? args.substring(0, nextPos) : args;
var excType = null;
// Check for:
// en[able] exc[eptions] [all|unc[aught]]
// dis[able] exc[eptions] [all|unc[aught]]
if (arg1 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
var arg2 = (nextPos > 0) ?
args.substring(nextPos + 1, args.length) : 'all';
if (!arg2) {
arg2 = 'all'; // if unspecified, set for all.
} if (arg2 == 'unc') { // check for short cut.
arg2 = 'uncaught';
}
excType = arg2;
// Check for:
// en[able] [all|unc[aught]] exc[eptions]
// dis[able] [all|unc[aught]] exc[eptions]
} else if (arg1 == 'all' || arg1 == 'unc' || arg1 == 'uncaught') {
var arg2 = (nextPos > 0) ?
args.substring(nextPos + 1, args.length) : null;
if (arg2 == 'exc' || arg1 == 'exception' || arg1 == 'exceptions') {
excType = arg1;
if (excType == 'unc') {
excType = 'uncaught';
}
}
}
// If we matched one of the command formats, then excType will be non-null:
if (excType) {
// Build a evaluate request from the text command.
request = this.createRequest('setexceptionbreak');
request.arguments = {};
request.arguments.type = excType;
request.arguments.enabled = (command == 'enable');
return request.toJSONProtocol();
}
}
// Build a evaluate request from the text command.
request = this.createRequest('changebreakpoint');
// Process arguments if any.
if (args && args.length > 0) {
request.arguments = {};
var pos = args.indexOf(' ');
var breakpointArg = args;
var otherArgs;
if (pos > 0) {
breakpointArg = args.substring(0, pos);
otherArgs = args.substring(pos + 1, args.length);
}
request.arguments.breakpoint = parseInt(breakpointArg);
switch(command) {
case 'cond':
request.arguments.condition = otherArgs ? otherArgs : null;
break;
case 'enable':
request.arguments.enabled = true;
break;
case 'disable':
request.arguments.enabled = false;
break;
case 'ignore':
request.arguments.ignoreCount = parseInt(otherArgs);
break;
default:
throw new Error('Invalid arguments.');
}
} else {
throw new Error('Invalid arguments.');
}
return request.toJSONProtocol();
};
// Create a JSON request for the disconnect command.
DebugRequest.prototype.disconnectCommandToJSONRequest_ = function(args) {
var request;
request = this.createRequest('disconnect');
return request.toJSONProtocol();
};
// Create a JSON request for the info command.
DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
var request;
if (args && (args == 'break' || args == 'br')) {
// Build a evaluate request from the text command.
request = this.createRequest('listbreakpoints');
last_cmd = 'info break';
} else if (args && (args == 'locals' || args == 'lo')) {
// Build a evaluate request from the text command.
request = this.createRequest('frame');
last_cmd = 'info locals';
} else if (args && (args == 'args' || args == 'ar')) {
// Build a evaluate request from the text command.
request = this.createRequest('frame');
last_cmd = 'info args';
} else {
throw new Error('Invalid info arguments.');
}
return request.toJSONProtocol();
};
DebugRequest.prototype.v8FlagsToJSONRequest_ = function(args) {
var request;
request = this.createRequest('v8flags');
request.arguments = {};
request.arguments.flags = args;
return request.toJSONProtocol();
};
DebugRequest.prototype.gcToJSONRequest_ = function(args) {
var request;
if (!args) {
args = 'all';
}
var args = args.split(/\s+/g);
var cmd = args[0];
switch(cmd) {
case 'all':
case 'quick':
case 'full':
case 'young':
case 'old':
case 'compact':
case 'sweep':
case 'scavenge': {
if (cmd == 'young') { cmd = 'quick'; }
else if (cmd == 'old') { cmd = 'full'; }
request = this.createRequest('gc');
request.arguments = {};
request.arguments.type = cmd;
break;
}
// Else fall thru to the default case below to report the error.
default:
throw new Error('Missing arguments after ' + cmd + '.');
}
return request.toJSONProtocol();
};
// Create a JSON request for the threads command.
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
// Build a threads request from the text command.
@ -816,6 +1168,10 @@ DebugRequest.prototype.traceCommand_ = function(args) {
if (args == 'compile') {
trace_compile = !trace_compile;
print('Tracing of compiled scripts ' + (trace_compile ? 'on' : 'off'));
} else if (args === 'debug json' || args === 'json' || args === 'packets') {
trace_debug_json = !trace_debug_json;
print('Tracing of debug json packets ' +
(trace_debug_json ? 'on' : 'off'));
} else {
throw new Error('Invalid trace arguments.');
}
@ -831,24 +1187,63 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('warning: arguments to \'help\' are ignored');
}
print('break');
print('break location [condition]');
print(' break on named function: location is a function name');
print(' break on function: location is #<id>#');
print(' break on script position: location is name:line[:column]');
print('clear <breakpoint #>');
print('backtrace [n] | [-n] | [from to]');
print('frame <frame #>');
print('Note: <> denotes symbollic values to be replaced with real values.');
print('Note: [] denotes optional parts of commands, or optional options / arguments.');
print(' e.g. d[elete] - you get the same command if you type d or delete.');
print('');
print('[break] - break as soon as possible');
print('b[reak] location [condition]');
print(' - break on named function: location is a function name');
print(' - break on function: location is #<id>#');
print(' - break on script position: location is name:line[:column]');
print('');
print('clear <breakpoint #> - deletes the specified user defined breakpoint');
print('d[elete] <breakpoint #> - deletes the specified user defined breakpoint');
print('dis[able] <breakpoint #> - disables the specified user defined breakpoint');
print('dis[able] exc[eptions] [[all] | unc[aught]]');
print(' - disables breaking on exceptions');
print('en[able] <breakpoint #> - enables the specified user defined breakpoint');
print('en[able] exc[eptions] [[all] | unc[aught]]');
print(' - enables breaking on exceptions');
print('');
print('b[ack]t[race] [n] | [-n] | [from to]');
print(' - prints the stack back trace');
print('f[rame] - prints info about the current frame context');
print('f[rame] <frame #> - set context to specified frame #');
print('scopes');
print('scope <scope #>');
print('');
print('up - set context to caller of current frame');
print('do[wn] - set context to callee of current frame');
print('inf[o] br[eak] - prints info about breakpoints in use');
print('inf[o] ar[gs] - prints info about arguments of the current function');
print('inf[o] lo[cals] - prints info about locals in the current function');
print('inf[o] liveobjectlist|lol - same as \'lol info\'');
print('');
print('step [in | next | out| min [step count]]');
print('print <expression>');
print('dir <expression>');
print('c[ontinue] - continue executing after a breakpoint');
print('s[tep] [<N>] - step into the next N callees (default N is 1)');
print('s[tep]i [<N>] - step into the next N callees (default N is 1)');
print('n[ext] [<N>] - step over the next N callees (default N is 1)');
print('fin[ish] [<N>] - step out of N frames (default N is 1)');
print('');
print('p[rint] <expression> - prints the result of the specified expression');
print('dir <expression> - prints the object structure of the result');
print('set <var> = <expression> - executes the specified statement');
print('');
print('l[ist] - list the source code around for the current pc');
print('l[ist] [- | <start>,<end>] - list the specified range of source code');
print('source [from line [num lines]]');
print('scripts');
print('continue');
print('scr[ipts] [native|extensions|all]');
print('scr[ipts] [<filter text>] - list scripts with the specified text in its description');
print('');
print('gc - runs the garbage collector');
print('');
print('trace compile');
print('help');
// hidden command: trace debug json - toggles tracing of debug json packets
print('');
print('disconnect|exit|quit - disconnects and quits the debugger');
print('help - prints this help information');
}
@ -930,6 +1325,27 @@ function formatScope_(scope) {
}
function refObjectToString_(protocolPackage, handle) {
var value = protocolPackage.lookup(handle);
var result = '';
if (value.isString()) {
result = '"' + value.value() + '"';
} else if (value.isPrimitive()) {
result = value.valueString();
} else if (value.isObject()) {
result += formatObject_(value, true);
}
return result;
}
// Rounds number 'num' to 'length' decimal places.
function roundNumber(num, length) {
var factor = Math.pow(10, length);
return Math.round(num * factor) / factor;
}
// Convert a JSON response to text for display in a text based debugger.
function DebugResponseDetails(response) {
details = {text:'', running:false}
@ -962,6 +1378,11 @@ function DebugResponseDetails(response) {
details.text = result;
break;
case 'changebreakpoint':
result = 'successfully changed breakpoint';
details.text = result;
break;
case 'listbreakpoints':
result = 'breakpoints: (' + body.breakpoints.length + ')';
for (var i = 0; i < body.breakpoints.length; i++) {
@ -974,9 +1395,9 @@ function DebugResponseDetails(response) {
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
result += ' line=' + breakpoint.line;
result += ' line=' + (breakpoint.line + 1);
if (breakpoint.column != null) {
result += ' column=' + breakpoint.column;
result += ' column=' + (breakpoint.column + 1);
}
if (breakpoint.groupId) {
result += ' groupId=' + breakpoint.groupId;
@ -992,6 +1413,24 @@ function DebugResponseDetails(response) {
}
result += ' hit_count=' + breakpoint.hit_count;
}
if (body.breakpoints.length === 0) {
result = "No user defined breakpoints\n";
} else {
result += '\n';
}
if (body.breakOnExceptions) {
result += '* breaking on ALL exceptions is enabled\n';
} else if (body.breakOnUncaughtExceptions) {
result += '* breaking on UNCAUGHT exceptions is enabled\n';
} else {
result += '* all exception breakpoints are disabled\n';
}
details.text = result;
break;
case 'setexceptionbreak':
result = 'Break on ' + body.type + ' exceptions: ';
result += body.enabled ? 'enabled' : 'disabled';
details.text = result;
break;
@ -1010,10 +1449,39 @@ function DebugResponseDetails(response) {
break;
case 'frame':
details.text = SourceUnderline(body.sourceLineText,
body.column);
Debug.State.currentSourceLine = body.line;
Debug.State.currentFrame = body.index;
if (last_cmd === 'info locals') {
var locals = body.locals;
if (locals.length === 0) {
result = 'No locals';
} else {
for (var i = 0; i < locals.length; i++) {
var local = locals[i];
result += local.name + ' = ';
result += refObjectToString_(response, local.value.ref);
result += '\n';
}
}
} else if (last_cmd === 'info args') {
var args = body.arguments;
if (args.length === 0) {
result = 'No arguments';
} else {
for (var i = 0; i < args.length; i++) {
var arg = args[i];
result += arg.name + ' = ';
result += refObjectToString_(response, arg.value.ref);
result += '\n';
}
}
} else {
result = SourceUnderline(body.sourceLineText,
body.column);
Debug.State.currentSourceLine = body.line;
Debug.State.currentFrame = body.index;
Debug.State.displaySourceStartLine = -1;
Debug.State.displaySourceEndLine = -1;
}
details.text = result;
break;
case 'scopes':
@ -1132,7 +1600,9 @@ function DebugResponseDetails(response) {
if (body[i].name) {
result += body[i].name;
} else {
if (body[i].compilationType == Debug.ScriptCompilationType.Eval) {
if (body[i].compilationType == Debug.ScriptCompilationType.Eval
&& body[i].evalFromScript
) {
result += 'eval from ';
var script_value = response.lookup(body[i].evalFromScript.ref);
result += ' ' + script_value.field('name');
@ -1162,6 +1632,9 @@ function DebugResponseDetails(response) {
result += sourceStart;
result += ']';
}
if (body.length == 0) {
result = "no matching scripts found";
}
details.text = result;
break;
@ -1181,6 +1654,23 @@ function DebugResponseDetails(response) {
details.text = "(running)";
break;
case 'v8flags':
details.text = "flags set";
break;
case 'gc':
details.text = "GC " + body.before + " => " + body.after;
if (body.after > (1024*1024)) {
details.text +=
" (" + roundNumber(body.before/(1024*1024), 1) + "M => " +
roundNumber(body.after/(1024*1024), 1) + "M)";
} else if (body.after > 1024) {
details.text +=
" (" + roundNumber(body.before/1024, 1) + "K => " +
roundNumber(body.after/1024, 1) + "K)";
}
break;
default:
details.text =
'Response for unknown command \'' + response.command() + '\'' +
@ -1467,6 +1957,11 @@ ProtocolValue.prototype.value = function() {
}
ProtocolValue.prototype.valueString = function() {
return this.value_.text;
}
function ProtocolReference(handle) {
this.handle_ = handle;
}
@ -1613,7 +2108,9 @@ function SimpleObjectToJSON_(object) {
var property_value_json;
switch (typeof property_value) {
case 'object':
if (typeof property_value.toJSONProtocol == 'function') {
if (property_value === null) {
property_value_json = 'null';
} else if (typeof property_value.toJSONProtocol == 'function') {
property_value_json = property_value.toJSONProtocol(true)
} else if (property_value.constructor.name == 'Array'){
property_value_json = SimpleArrayToJSON_(property_value);

7
deps/v8/src/data-flow.h

@ -112,10 +112,13 @@ class BitVector: public ZoneObject {
}
void CopyFrom(const BitVector& other) {
ASSERT(other.length() == length());
for (int i = 0; i < data_length_; i++) {
ASSERT(other.length() <= length());
for (int i = 0; i < other.data_length_; i++) {
data_[i] = other.data_[i];
}
for (int i = other.data_length_; i < data_length_; i++) {
data_[i] = 0;
}
}
bool Contains(int i) const {

2
deps/v8/src/date.js

@ -1000,7 +1000,7 @@ function DateToISOString() {
function DateToJSON(key) {
var o = ToObject(this);
var tv = DefaultNumber(o);
if (IS_NUMBER(tv) && !$isFinite(tv)) {
if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
return null;
}
return o.toISOString();

33
deps/v8/src/debug-agent.cc

@ -27,9 +27,11 @@
#include "v8.h"
#include "debug.h"
#include "debug-agent.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
namespace v8 {
namespace internal {
@ -167,22 +169,33 @@ void DebuggerAgentSession::Run() {
while (true) {
// Read data from the debugger front end.
SmartPointer<char> message = DebuggerAgentUtil::ReceiveMessage(client_);
if (*message == NULL) {
// Session is closed.
agent_->OnSessionClosed(this);
return;
const char* msg = *message;
bool is_closing_session = (msg == NULL);
if (msg == NULL) {
// If we lost the connection, then simulate a disconnect msg:
msg = "{\"seq\":1,\"type\":\"request\",\"command\":\"disconnect\"}";
} else {
// Check if we're getting a disconnect request:
const char* disconnectRequestStr =
"\"type\":\"request\",\"command\":\"disconnect\"}";
const char* result = strstr(msg, disconnectRequestStr);
if (result != NULL) {
is_closing_session = true;
}
}
// Convert UTF-8 to UTF-16.
unibrow::Utf8InputBuffer<> buf(*message,
StrLength(*message));
unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
int len = 0;
while (buf.has_more()) {
buf.GetNext();
len++;
}
ScopedVector<int16_t> temp(len + 1);
buf.Reset(*message, StrLength(*message));
buf.Reset(msg, StrLength(msg));
for (int i = 0; i < len; i++) {
temp[i] = buf.GetNext();
}
@ -190,6 +203,12 @@ void DebuggerAgentSession::Run() {
// Send the request received to the debugger.
v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
len);
if (is_closing_session) {
// Session is closed.
agent_->OnSessionClosed(this);
return;
}
}
}

6
deps/v8/src/debug-agent.h

@ -44,7 +44,8 @@ class DebuggerAgentSession;
class DebuggerAgent: public Thread {
public:
explicit DebuggerAgent(const char* name, int port)
: name_(StrDup(name)), port_(port),
: Thread(name),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
@ -90,7 +91,8 @@ class DebuggerAgent: public Thread {
class DebuggerAgentSession: public Thread {
public:
DebuggerAgentSession(DebuggerAgent* agent, Socket* client)
: agent_(agent), client_(client) {}
: Thread("v8:DbgAgntSessn"),
agent_(agent), client_(client) {}
void DebuggerMessage(Vector<uint16_t> message);
void Shutdown();

167
deps/v8/src/debug-debugger.js

@ -112,8 +112,8 @@ var debugger_flags = {
// Create a new break point object and add it to the list of break points.
function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point);
function MakeBreakPoint(source_position, opt_script_break_point) {
var break_point = new BreakPoint(source_position, opt_script_break_point);
break_points.push(break_point);
return break_point;
}
@ -123,10 +123,8 @@ function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_
// NOTE: This object does not have a reference to the function having break
// point as this would cause function not to be garbage collected when it is
// not used any more. We do not want break points to keep functions alive.
function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) {
function BreakPoint(source_position, opt_script_break_point) {
this.source_position_ = source_position;
this.source_line_ = opt_line;
this.source_column_ = opt_column;
if (opt_script_break_point) {
this.script_break_point_ = opt_script_break_point;
} else {
@ -424,7 +422,7 @@ ScriptBreakPoint.prototype.set = function (script) {
if (position === null) return;
// Create a break point object and set the break point.
break_point = MakeBreakPoint(position, this.line(), this.column(), this);
break_point = MakeBreakPoint(position, this);
break_point.setIgnoreCount(this.ignoreCount());
var actual_position = %SetScriptBreakPoint(script, position, break_point);
if (IS_UNDEFINED(actual_position)) {
@ -639,7 +637,7 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
opt_condition);
} else {
// Set a break point directly on the function.
var break_point = MakeBreakPoint(source_position, opt_line, opt_column);
var break_point = MakeBreakPoint(source_position);
var actual_position =
%SetFunctionBreakPoint(func, source_position, break_point);
actual_position += this.sourcePosition(func);
@ -652,15 +650,40 @@ Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
};
Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
condition, enabled)
{
break_point = MakeBreakPoint(position);
break_point.setCondition(condition);
if (!enabled)
break_point.disable();
var scripts = this.scripts();
for (var i = 0; i < scripts.length; i++) {
if (script_id == scripts[i].id) {
break_point.actual_position = %SetScriptBreakPoint(scripts[i], position,
break_point);
break;
}
}
return break_point;
};
Debug.enableBreakPoint = function(break_point_number) {
var break_point = this.findBreakPoint(break_point_number, false);
break_point.enable();
// Only enable if the breakpoint hasn't been deleted:
if (break_point) {
break_point.enable();
}
};
Debug.disableBreakPoint = function(break_point_number) {
var break_point = this.findBreakPoint(break_point_number, false);
break_point.disable();
// Only enable if the breakpoint hasn't been deleted:
if (break_point) {
break_point.disable();
}
};
@ -701,6 +724,17 @@ Debug.clearAllBreakPoints = function() {
};
Debug.disableAllBreakPoints = function() {
// Disable all user defined breakpoints:
for (var i = 1; i < next_break_point_number; i++) {
Debug.disableBreakPoint(i);
}
// Disable all exception breakpoints:
%ChangeBreakOnException(Debug.ExceptionBreak.Caught, false);
%ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false);
};
Debug.findScriptBreakPoint = function(break_point_number, remove) {
var script_break_point;
for (var i = 0; i < script_break_points.length; i++) {
@ -1341,6 +1375,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.clearBreakPointRequest_(request, response);
} else if (request.command == 'clearbreakpointgroup') {
this.clearBreakPointGroupRequest_(request, response);
} else if (request.command == 'disconnect') {
this.disconnectRequest_(request, response);
} else if (request.command == 'setexceptionbreak') {
this.setExceptionBreakRequest_(request, response);
} else if (request.command == 'listbreakpoints') {
this.listBreakpointsRequest_(request, response);
} else if (request.command == 'backtrace') {
@ -1373,6 +1411,13 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
this.changeLiveRequest_(request, response);
} else if (request.command == 'flags') {
this.debuggerFlagsRequest_(request, response);
} else if (request.command == 'v8flags') {
this.v8FlagsRequest_(request, response);
// GC tools:
} else if (request.command == 'gc') {
this.gcRequest_(request, response);
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@ -1690,7 +1735,63 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, resp
array.push(description);
}
response.body = { breakpoints: array }
response.body = {
breakpoints: array,
breakOnExceptions: Debug.isBreakOnException(),
breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
}
}
DebugCommandProcessor.prototype.disconnectRequest_ =
function(request, response) {
Debug.disableAllBreakPoints();
this.continueRequest_(request, response);
}
DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
function(request, response) {
// Check for legal request.
if (!request.arguments) {
response.failed('Missing arguments');
return;
}
// Pull out and check the 'type' argument:
var type = request.arguments.type;
if (!type) {
response.failed('Missing argument "type"');
return;
}
// Initialize the default value of enable:
var enabled;
if (type == 'all') {
enabled = !Debug.isBreakOnException();
} else if (type == 'uncaught') {
enabled = !Debug.isBreakOnUncaughtException();
}
// Pull out and check the 'enabled' argument if present:
if (!IS_UNDEFINED(request.arguments.enabled)) {
enabled = request.arguments.enabled;
if ((enabled != true) && (enabled != false)) {
response.failed('Illegal value for "enabled":"' + enabled + '"');
}
}
// Now set the exception break state:
if (type == 'all') {
%ChangeBreakOnException(Debug.ExceptionBreak.Caught, enabled);
} else if (type == 'uncaught') {
%ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, enabled);
} else {
response.failed('Unknown "type":"' + type + '"');
}
// Add the cleared break point number to the response.
response.body = { 'type': type, 'enabled': enabled };
}
@ -2047,6 +2148,16 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
idsToInclude[ids[i]] = true;
}
}
var filterStr = null;
var filterNum = null;
if (!IS_UNDEFINED(request.arguments.filter)) {
var num = %ToNumber(request.arguments.filter);
if (!isNaN(num)) {
filterNum = num;
}
filterStr = request.arguments.filter;
}
}
// Collect all scripts in the heap.
@ -2058,6 +2169,21 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (idsToInclude && !idsToInclude[scripts[i].id]) {
continue;
}
if (filterStr || filterNum) {
var script = scripts[i];
var found = false;
if (filterNum && !found) {
if (script.id && script.id === filterNum) {
found = true;
}
}
if (filterStr && !found) {
if (script.name && script.name.indexOf(filterStr) >= 0) {
found = true;
}
}
if (!found) continue;
}
if (types & ScriptTypeFlag(scripts[i].type)) {
response.body.push(MakeMirror(scripts[i]));
}
@ -2196,6 +2322,27 @@ DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
}
DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
var flags = request.arguments.flags;
if (!flags) flags = '';
%SetFlags(flags);
};
DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
var type = request.arguments.type;
if (!type) type = 'all';
var before = %GetHeapUsage();
%CollectGarbage(type);
var after = %GetHeapUsage();
response.body = { "before": before, "after": after };
};
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {

11
deps/v8/src/debug.cc

@ -622,7 +622,7 @@ bool Debug::disable_break_ = false;
// Default call debugger on uncaught exception.
bool Debug::break_on_exception_ = false;
bool Debug::break_on_uncaught_exception_ = true;
bool Debug::break_on_uncaught_exception_ = false;
Handle<Context> Debug::debug_context_ = Handle<Context>();
Code* Debug::debug_break_return_ = NULL;
@ -2740,8 +2740,10 @@ bool Debugger::StartAgent(const char* name, int port,
}
if (Socket::Setup()) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
if (agent_ == NULL) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
}
return true;
}
@ -3037,7 +3039,8 @@ void LockingCommandMessageQueue::Clear() {
MessageDispatchHelperThread::MessageDispatchHelperThread()
: sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
: Thread("v8:MsgDispHelpr"),
sem_(OS::CreateSemaphore(0)), mutex_(OS::CreateMutex()),
already_signalled_(false) {
}

10
deps/v8/src/debug.h

@ -32,6 +32,7 @@
#include "debug-agent.h"
#include "execution.h"
#include "factory.h"
#include "flags.h"
#include "hashmap.h"
#include "platform.h"
#include "string-stream.h"
@ -772,6 +773,15 @@ class Debugger {
}
}
if (((event == v8::BeforeCompile) || (event == v8::AfterCompile)) &&
!FLAG_debug_compile_events) {
return false;
} else if ((event == v8::ScriptCollected) &&
!FLAG_debug_script_collected_events) {
return false;
}
// Currently argument event is not used.
return !compiling_natives_ && Debugger::IsDebuggerActive();
}

14
deps/v8/src/deoptimizer.cc

@ -309,9 +309,9 @@ void Deoptimizer::TearDown() {
}
unsigned Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
unsigned id,
SharedFunctionInfo* shared) {
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
unsigned id,
SharedFunctionInfo* shared) {
// TODO(kasperl): For now, we do a simple linear search for the PC
// offset associated with the given node id. This should probably be
// changed to a binary search.
@ -618,17 +618,17 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
case Translation::ARGUMENTS_OBJECT: {
// Use the hole value as a sentinel and fill in the arguments object
// after the deoptimized frame is built.
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
ASSERT(frame_index == 0); // Only supported for first frame.
if (FLAG_trace_deopt) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
Heap::the_hole_value()->ShortPrint();
Heap::arguments_marker()->ShortPrint();
PrintF(" ; arguments object\n");
}
intptr_t value = reinterpret_cast<intptr_t>(Heap::the_hole_value());
intptr_t value = reinterpret_cast<intptr_t>(Heap::arguments_marker());
output_[frame_index]->SetFrameSlot(output_offset, value);
return;
}

6
deps/v8/src/deoptimizer.h

@ -145,9 +145,9 @@ class Deoptimizer : public Malloced {
static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static unsigned GetOutputInfo(DeoptimizationOutputData* data,
unsigned node_id,
SharedFunctionInfo* shared);
static int GetOutputInfo(DeoptimizationOutputData* data,
unsigned node_id,
SharedFunctionInfo* shared);
static void Setup();
static void TearDown();

11
deps/v8/src/disassembler.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -268,10 +268,13 @@ static int DecodeIt(FILE* f,
Code::Kind2String(kind),
CodeStub::MajorName(major_key, false));
switch (major_key) {
case CodeStub::CallFunction:
out.AddFormatted("argc = %d", minor_key);
case CodeStub::CallFunction: {
int argc =
CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
out.AddFormatted("argc = %d", argc);
break;
default:
}
default:
out.AddFormatted("minor: %d", minor_key);
}
}

50
deps/v8/src/extensions/experimental/experimental.gyp

@ -0,0 +1,50 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'icu_src_dir%': '',
},
'targets': [
{
'target_name': 'i18n_api',
'type': 'static_library',
'sources': [
'i18n-extension.cc',
'i18n-extension.h',
],
'include_dirs': [
'<(icu_src_dir)/public/common',
'../..',
],
'dependencies': [
'<(icu_src_dir)/icu.gyp:*',
'../../../tools/gyp/v8.gyp:v8',
],
},
], # targets
}

8
deps/v8/src/factory.cc

@ -99,6 +99,14 @@ Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
}
Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(Heap::LookupAsciiSymbol(string), String);
}
Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
CALL_HEAP_FUNCTION(Heap::LookupTwoByteSymbol(string), String);
}
Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {

2
deps/v8/src/factory.h

@ -61,6 +61,8 @@ class Factory : public AllStatic {
PretenureFlag pretenure);
static Handle<String> LookupSymbol(Vector<const char> str);
static Handle<String> LookupAsciiSymbol(Vector<const char> str);
static Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
static Handle<String> LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}

12
deps/v8/src/flag-definitions.h

@ -122,7 +122,6 @@ DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_environment, false, "trace lithium environments")
DEFINE_bool(trace_representation, false, "trace representation types")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
@ -142,6 +141,7 @@ DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
@ -356,6 +356,16 @@ DEFINE_string(map_counters, NULL, "Map counters to a file")
DEFINE_args(js_arguments, JSArguments(),
"Pass all remaining arguments to the script. Alias for \"--\".")
#if defined(WEBOS__)
DEFINE_bool(debug_compile_events, false, "Enable debugger compile events")
DEFINE_bool(debug_script_collected_events, false,
"Enable debugger script collected events")
#else
DEFINE_bool(debug_compile_events, true, "Enable debugger compile events")
DEFINE_bool(debug_script_collected_events, true,
"Enable debugger script collected events")
#endif
//
// Debug only flags
//

50
deps/v8/src/frames.cc

@ -329,21 +329,20 @@ void SafeStackTraceFrameIterator::Advance() {
Code* StackFrame::GetSafepointData(Address pc,
uint8_t** safepoint_entry,
SafepointEntry* safepoint_entry,
unsigned* stack_slots) {
PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
uint8_t* cached_safepoint_entry = entry->safepoint_entry;
if (cached_safepoint_entry == NULL) {
cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(cached_safepoint_entry != NULL); // No safepoint found.
entry->safepoint_entry = cached_safepoint_entry;
SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
if (!entry->safepoint_entry.is_valid()) {
entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
ASSERT(entry->safepoint_entry.is_valid());
} else {
ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
}
// Fill in the results and return the code.
Code* code = entry->code;
*safepoint_entry = cached_safepoint_entry;
*safepoint_entry = entry->safepoint_entry;
*stack_slots = code->stack_slots();
return code;
}
@ -536,7 +535,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Compute the safepoint information.
unsigned stack_slots = 0;
uint8_t* safepoint_entry = NULL;
SafepointEntry safepoint_entry;
Code* code = StackFrame::GetSafepointData(
pc(), &safepoint_entry, &stack_slots);
unsigned slot_space = stack_slots * kPointerSize;
@ -548,10 +547,22 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
Object** parameters_limit = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
// Visit the parameters that may be on top of the saved registers.
if (safepoint_entry.argument_count() > 0) {
v->VisitPointers(parameters_base,
parameters_base + safepoint_entry.argument_count());
parameters_base += safepoint_entry.argument_count();
}
if (safepoint_entry.has_doubles()) {
parameters_base += DoubleRegister::kNumAllocatableRegisters *
kDoubleSize / kPointerSize;
}
// Visit the registers that contain pointers if any.
if (SafepointTable::HasRegisters(safepoint_entry)) {
if (safepoint_entry.HasRegisters()) {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
if (safepoint_entry.HasRegisterAt(i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
v->VisitPointer(parameters_base + reg_stack_index);
}
@ -561,7 +572,8 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
}
// We're done dealing with the register bits.
safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
uint8_t* safepoint_bits = safepoint_entry.bits();
safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
// Visit the rest of the parameters.
v->VisitPointers(parameters_base, parameters_limit);
@ -570,7 +582,7 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
for (unsigned index = 0; index < stack_slots; index++) {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
v->VisitPointer(parameters_limit + index);
}
}
@ -778,14 +790,8 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
ASSERT(code != NULL);
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
SafepointTable table(code);
unsigned pc_offset = static_cast<unsigned>(pc() - code->instruction_start());
for (unsigned i = 0; i < table.length(); i++) {
if (table.GetPcOffset(i) == pc_offset) {
*deopt_index = table.GetDeoptimizationIndex(i);
break;
}
}
SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
*deopt_index = safepoint_entry.deoptimization_index();
ASSERT(*deopt_index != AstNode::kNoNumber);
return DeoptimizationInputData::cast(code->deoptimization_data());
@ -1150,7 +1156,7 @@ PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
// been set. Otherwise, we risk trying to use a cache entry before
// the code has been computed.
entry->code = GcSafeFindCodeForPc(pc);
entry->safepoint_entry = NULL;
entry->safepoint_entry.Reset();
entry->pc = pc;
}
return entry;

6
deps/v8/src/frames.h

@ -28,6 +28,8 @@
#ifndef V8_FRAMES_H_
#define V8_FRAMES_H_
#include "safepoint-table.h"
namespace v8 {
namespace internal {
@ -51,7 +53,7 @@ class PcToCodeCache : AllStatic {
struct PcToCodeCacheEntry {
Address pc;
Code* code;
uint8_t* safepoint_entry;
SafepointEntry safepoint_entry;
};
static PcToCodeCacheEntry* cache(int index) {
@ -208,7 +210,7 @@ class StackFrame BASE_EMBEDDED {
// safepoint entry and the number of stack slots. The pc must be at
// a safepoint.
static Code* GetSafepointData(Address pc,
uint8_t** safepoint_entry,
SafepointEntry* safepoint_entry,
unsigned* stack_slots);
virtual void Iterate(ObjectVisitor* v) const = 0;

13
deps/v8/src/globals.h

@ -181,10 +181,6 @@ typedef byte* Address;
#define USING_BSD_ABI
#endif
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
// -----------------------------------------------------------------------------
// Constants
@ -228,6 +224,15 @@ const int kBinary32MinExponent = 0x01;
const int kBinary32MantissaBits = 23;
const int kBinary32ExponentShift = 23;
// ASCII/UC16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
const int kASCIISize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
const uc32 kMaxAsciiCharCode = 0x7f;
const uint32_t kMaxAsciiCharCodeU = 0x7fu;
// The expression OFFSET_OF(type, field) computes the byte-offset
// of the specified field relative to the containing type. This

13
deps/v8/src/handles.cc

@ -280,13 +280,13 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
}
Handle<Object> IgnoreAttributesAndSetLocalProperty(
Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(object->
IgnoreAttributesAndSetLocalProperty(*key, *value, attributes), Object);
SetLocalPropertyIgnoreAttributes(*key, *value, attributes), Object);
}
@ -422,6 +422,15 @@ Handle<Object> SetElement(Handle<JSObject> object,
}
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value) {
ASSERT(!object->HasPixelElements());
ASSERT(!object->HasExternalArrayElements());
CALL_HEAP_FUNCTION(object->SetElement(index, *value, false), Object);
}
Handle<JSObject> Copy(Handle<JSObject> obj) {
CALL_HEAP_FUNCTION(Heap::CopyJSObject(*obj), JSObject);
}

11
deps/v8/src/handles.h

@ -217,9 +217,10 @@ Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key);
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
@ -231,6 +232,10 @@ Handle<Object> SetElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value);
Handle<Object> SetOwnElement(Handle<JSObject> object,
uint32_t index,
Handle<Object> value);
Handle<Object> GetProperty(Handle<JSObject> obj,
const char* name);

78
deps/v8/src/heap-inl.h

@ -40,6 +40,19 @@ int Heap::MaxObjectSizeInPagedSpace() {
}
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
PretenureFlag pretenure) {
// Check for ASCII first since this is the common case.
if (String::IsAscii(str.start(), str.length())) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
return AllocateStringFromAscii(str, pretenure);
}
// Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, pretenure);
}
MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
int chars,
uint32_t hash_field) {
@ -49,6 +62,71 @@ MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
}
MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
uint32_t hash_field) {
if (str.length() > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Compute map and object size.
Map* map = ascii_symbol_map();
int size = SeqAsciiString::SizeFor(str.length());
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
answer->set_hash_field(hash_field);
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
memcpy(answer->address() + SeqAsciiString::kHeaderSize,
str.start(), str.length());
return answer;
}
MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
uint32_t hash_field) {
if (str.length() > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Compute map and object size.
Map* map = symbol_map();
int size = SeqTwoByteString::SizeFor(str.length());
// Allocate string.
Object* result;
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
? lo_space_->AllocateRaw(size)
: old_data_space_->AllocateRaw(size);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
reinterpret_cast<HeapObject*>(result)->set_map(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
answer->set_hash_field(hash_field);
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
str.start(), str.length() * kUC16Size);
return answer;
}
MaybeObject* Heap::CopyFixedArray(FixedArray* src) {
return CopyFixedArrayWithMap(src, src->map());
}

76
deps/v8/src/heap.cc

@ -2011,6 +2011,12 @@ bool Heap::CreateInitialObjects() {
}
set_the_hole_value(obj);
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Smi::FromInt(-4));
if (!maybe_obj->ToObject(&obj)) return false;
}
set_arguments_marker(obj);
{ MaybeObject* maybe_obj =
CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
if (!maybe_obj->ToObject(&obj)) return false;
@ -2549,20 +2555,10 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
}
// For small strings we check whether the resource contains only
// ascii characters. If yes, we use a different string map.
bool is_ascii = true;
if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
is_ascii = false;
} else {
const uc16* data = resource->data();
for (size_t i = 0; i < length; i++) {
if (data[i] > String::kMaxAsciiCharCode) {
is_ascii = false;
break;
}
}
}
// ASCII characters. If yes, we use a different string map.
static const size_t kAsciiCheckLengthLimit = 32;
bool is_ascii = length <= kAsciiCheckLengthLimit &&
String::IsAscii(resource->data(), static_cast<int>(length));
Map* map = is_ascii ?
Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Object* result;
@ -2728,6 +2724,9 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(ByteArray::cast(reloc_info));
code->set_flags(flags);
if (code->is_call_stub() || code->is_keyed_call_stub()) {
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array());
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
@ -3307,8 +3306,8 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
}
MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
PretenureFlag pretenure) {
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
PretenureFlag pretenure) {
// V8 only supports characters in the Basic Multilingual Plane.
const uc32 kMaxSupportedChar = 0xFFFF;
// Count the number of characters in the UTF-8 string and check if
@ -3317,17 +3316,11 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
decoder(ScannerConstants::utf8_decoder());
decoder->Reset(string.start(), string.length());
int chars = 0;
bool is_ascii = true;
while (decoder->has_more()) {
uc32 r = decoder->GetNext();
if (r > String::kMaxAsciiCharCode) is_ascii = false;
decoder->GetNext();
chars++;
}
// If the string is ascii, we do not need to convert the characters
// since UTF8 is backwards compatible with ascii.
if (is_ascii) return AllocateStringFromAscii(string, pretenure);
Object* result;
{ MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
@ -3348,11 +3341,8 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
PretenureFlag pretenure) {
// Check if the string is an ASCII string.
int i = 0;
while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
MaybeObject* maybe_result;
if (i == string.length()) { // It's an ASCII string.
if (String::IsAscii(string.start(), string.length())) {
maybe_result = AllocateRawAsciiString(string.length(), pretenure);
} else { // It's not an ASCII string.
maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
@ -4032,6 +4022,36 @@ MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
}
MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
Object* symbol = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
symbol_table()->LookupAsciiSymbol(string, &symbol);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
// Can't use set_symbol_table because SymbolTable::cast knows that
// SymbolTable is a singleton and checks for identity.
roots_[kSymbolTableRootIndex] = new_table;
ASSERT(symbol != NULL);
return symbol;
}
MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
Object* symbol = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
symbol_table()->LookupTwoByteSymbol(string, &symbol);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
// Can't use set_symbol_table because SymbolTable::cast knows that
// SymbolTable is a singleton and checks for identity.
roots_[kSymbolTableRootIndex] = new_table;
ASSERT(symbol != NULL);
return symbol;
}
MaybeObject* Heap::LookupSymbol(String* string) {
if (string->IsSymbol()) return string;
Object* symbol = NULL;
@ -5012,7 +5032,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
obj->SetMark();
}
UnmarkingVisitor visitor;
Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
Heap::IterateRoots(&visitor, VISIT_ALL);
while (visitor.can_process())
visitor.ProcessNext();
}

19
deps/v8/src/heap.h

@ -53,6 +53,7 @@ namespace internal {
V(Object, null_value, NullValue) \
V(Object, true_value, TrueValue) \
V(Object, false_value, FalseValue) \
V(Object, arguments_marker, ArgumentsMarker) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, global_context_map, GlobalContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@ -412,7 +413,10 @@ class Heap : public AllStatic {
MUST_USE_RESULT static MaybeObject* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT static MaybeObject* AllocateStringFromUtf8(
MUST_USE_RESULT static inline MaybeObject* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT static MaybeObject* AllocateStringFromUtf8Slow(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT static MaybeObject* AllocateStringFromTwoByte(
@ -428,6 +432,14 @@ class Heap : public AllStatic {
int chars,
uint32_t hash_field);
MUST_USE_RESULT static inline MaybeObject* AllocateAsciiSymbol(
Vector<const char> str,
uint32_t hash_field);
MUST_USE_RESULT static inline MaybeObject* AllocateTwoByteSymbol(
Vector<const uc16> str,
uint32_t hash_field);
MUST_USE_RESULT static MaybeObject* AllocateInternalSymbol(
unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
@ -683,6 +695,9 @@ class Heap : public AllStatic {
// failed.
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT static MaybeObject* LookupSymbol(Vector<const char> str);
MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(Vector<const char> str);
MUST_USE_RESULT static MaybeObject* LookupTwoByteSymbol(
Vector<const uc16> str);
MUST_USE_RESULT static MaybeObject* LookupAsciiSymbol(const char* str) {
return LookupSymbol(CStrVector(str));
}
@ -1850,7 +1865,7 @@ class GCTracer BASE_EMBEDDED {
}
~Scope() {
ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
ASSERT(scope_ < kNumberOfScopes); // scope_ is unsigned.
tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
}

5
deps/v8/src/hydrogen-instructions.cc

@ -1190,6 +1190,11 @@ void HStoreGlobal::PrintDataTo(StringStream* stream) const {
}
void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
stream->Add("(%d, %d)", context_chain_length(), slot_index());
}
// Implementation of type inference and type conversions. Calculates
// the inferred type of this instruction based on the input operands.

176
deps/v8/src/hydrogen-instructions.h

@ -73,10 +73,10 @@ class LChunkBuilder;
// HCompare
// HCompareJSObjectEq
// HInstanceOf
// HInstanceOfKnownGlobal
// HLoadKeyed
// HLoadKeyedFastElement
// HLoadKeyedGeneric
// HLoadNamedGeneric
// HPower
// HStoreNamed
// HStoreNamedField
@ -92,6 +92,7 @@ class LChunkBuilder;
// HCallNew
// HCallRuntime
// HCallStub
// HCheckPrototypeMaps
// HConstant
// HControlInstruction
// HDeoptimize
@ -106,6 +107,7 @@ class LChunkBuilder;
// HGlobalObject
// HGlobalReceiver
// HLeaveInlined
// HLoadContextSlot
// HLoadGlobal
// HMaterializedLiteral
// HArrayLiteral
@ -119,19 +121,21 @@ class LChunkBuilder;
// HStoreKeyedFastElement
// HStoreKeyedGeneric
// HUnaryOperation
// HArrayLength
// HBitNot
// HChange
// HCheckFunction
// HCheckInstanceType
// HCheckMap
// HCheckNonSmi
// HCheckPrototypeMaps
// HCheckSmi
// HDeleteProperty
// HFixedArrayLength
// HJSArrayLength
// HLoadElements
// HTypeofIs
// HLoadNamedField
// HLoadNamedGeneric
// HLoadFunctionPrototype
// HPushArgument
// HTypeof
// HUnaryMathOperation
@ -170,7 +174,6 @@ class LChunkBuilder;
V(ArgumentsElements) \
V(ArgumentsLength) \
V(ArgumentsObject) \
V(ArrayLength) \
V(ArrayLiteral) \
V(BitAnd) \
V(BitNot) \
@ -203,24 +206,29 @@ class LChunkBuilder;
V(Deoptimize) \
V(Div) \
V(EnterInlined) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
V(IsNull) \
V(IsObject) \
V(IsSmi) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
V(JSArrayLength) \
V(ClassOfTest) \
V(LeaveInlined) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadGlobal) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadFunctionPrototype) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
@ -256,6 +264,7 @@ class LChunkBuilder;
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
V(FunctionPrototypes) \
V(OsrEntries)
#define DECLARE_INSTRUCTION(type) \
@ -905,6 +914,9 @@ class HCompareMapAndBranch: public HUnaryControlInstruction {
virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
HBasicBlock* true_destination() const { return true_destination_; }
HBasicBlock* false_destination() const { return false_destination_; }
virtual void PrintDataTo(StringStream* stream) const;
Handle<Map> map() const { return map_; }
@ -1015,10 +1027,10 @@ class HChange: public HUnaryOperation {
class HSimulate: public HInstruction {
public:
HSimulate(int ast_id, int pop_count, int environment_height)
HSimulate(int ast_id, int pop_count, int environment_length)
: ast_id_(ast_id),
pop_count_(pop_count),
environment_height_(environment_height),
environment_length_(environment_length),
values_(2),
assigned_indexes_(2) {}
virtual ~HSimulate() {}
@ -1032,7 +1044,7 @@ class HSimulate: public HInstruction {
ast_id_ = id;
}
int environment_height() const { return environment_height_; }
int environment_length() const { return environment_length_; }
int pop_count() const { return pop_count_; }
const ZoneList<HValue*>* values() const { return &values_; }
int GetAssignedIndexAt(int index) const {
@ -1074,7 +1086,7 @@ class HSimulate: public HInstruction {
}
int ast_id_;
int pop_count_;
int environment_height_;
int environment_length_;
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
};
@ -1336,9 +1348,9 @@ class HCallRuntime: public HCall {
};
class HArrayLength: public HUnaryOperation {
class HJSArrayLength: public HUnaryOperation {
public:
explicit HArrayLength(HValue* value) : HUnaryOperation(value) {
explicit HJSArrayLength(HValue* value) : HUnaryOperation(value) {
// The length of an array is stored as a tagged value in the array
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
@ -1351,7 +1363,23 @@ class HArrayLength: public HUnaryOperation {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array_length")
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js_array_length")
};
class HFixedArrayLength: public HUnaryOperation {
public:
explicit HFixedArrayLength(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Tagged());
SetFlag(kDependsOnArrayLengths);
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed_array_length")
};
@ -1596,42 +1624,40 @@ class HCheckNonSmi: public HUnaryOperation {
};
class HCheckPrototypeMaps: public HUnaryOperation {
class HCheckPrototypeMaps: public HInstruction {
public:
HCheckPrototypeMaps(HValue* value,
Handle<JSObject> holder,
Handle<Map> receiver_map)
: HUnaryOperation(value),
holder_(holder),
receiver_map_(receiver_map) {
set_representation(Representation::Tagged());
HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
: prototype_(prototype), holder_(holder) {
SetFlag(kUseGVN);
SetFlag(kDependsOnMaps);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
#ifdef DEBUG
virtual void Verify() const;
#endif
Handle<JSObject> prototype() const { return prototype_; }
Handle<JSObject> holder() const { return holder_; }
Handle<Map> receiver_map() const { return receiver_map_; }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
virtual intptr_t Hashcode() const {
ASSERT(!Heap::IsAllocationAllowed());
intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
return hash;
}
protected:
virtual bool DataEquals(HValue* other) const {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
return holder_.is_identical_to(b->holder()) &&
receiver_map_.is_identical_to(b->receiver_map());
return prototype_.is_identical_to(b->prototype()) &&
holder_.is_identical_to(b->holder());
}
private:
Handle<JSObject> prototype_;
Handle<JSObject> holder_;
Handle<Map> receiver_map_;
};
@ -1766,6 +1792,8 @@ class HConstant: public HInstruction {
Handle<Object> handle() const { return handle_; }
bool InOldSpace() const { return !Heap::InNewSpace(*handle_); }
virtual bool EmitAtUses() const { return !representation().IsDouble(); }
virtual void PrintDataTo(StringStream* stream) const;
virtual HType CalculateInferredType() const;
@ -2236,6 +2264,28 @@ class HInstanceOf: public HBinaryOperation {
};
class HInstanceOfKnownGlobal: public HUnaryOperation {
public:
HInstanceOfKnownGlobal(HValue* left, Handle<JSFunction> right)
: HUnaryOperation(left), function_(right) {
set_representation(Representation::Tagged());
SetFlagMask(AllSideEffects());
}
Handle<JSFunction> function() { return function_; }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance_of_known_global")
private:
Handle<JSFunction> function_;
};
class HPower: public HBinaryOperation {
public:
HPower(HValue* left, HValue* right)
@ -2551,6 +2601,39 @@ class HStoreGlobal: public HUnaryOperation {
};
class HLoadContextSlot: public HInstruction {
public:
HLoadContextSlot(int context_chain_length , int slot_index)
: context_chain_length_(context_chain_length), slot_index_(slot_index) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnCalls);
}
int context_chain_length() const { return context_chain_length_; }
int slot_index() const { return slot_index_; }
virtual void PrintDataTo(StringStream* stream) const;
virtual intptr_t Hashcode() const {
return context_chain_length() * 29 + slot_index();
}
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load_context_slot")
protected:
virtual bool DataEquals(HValue* other) const {
HLoadContextSlot* b = HLoadContextSlot::cast(other);
return (context_chain_length() == b->context_chain_length())
&& (slot_index() == b->slot_index());
}
private:
int context_chain_length_;
int slot_index_;
};
class HLoadNamedField: public HUnaryOperation {
public:
HLoadNamedField(HValue* object, bool is_in_object, int offset)
@ -2617,6 +2700,27 @@ class HLoadNamedGeneric: public HUnaryOperation {
};
class HLoadFunctionPrototype: public HUnaryOperation {
public:
explicit HLoadFunctionPrototype(HValue* function)
: HUnaryOperation(function) {
set_representation(Representation::Tagged());
SetFlagMask(kDependsOnFunctionPrototypes);
}
HValue* function() const { return OperandAt(0); }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load_function_prototype")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HLoadKeyed: public HBinaryOperation {
public:
HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
@ -2663,6 +2767,12 @@ class HLoadKeyedGeneric: public HLoadKeyed {
};
static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsSmi() &&
!(value->IsConstant() && HConstant::cast(value)->InOldSpace());
}
class HStoreNamed: public HBinaryOperation {
public:
HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
@ -2680,6 +2790,10 @@ class HStoreNamed: public HBinaryOperation {
HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); }
bool NeedsWriteBarrier() const {
return StoringValueNeedsWriteBarrier(value());
}
DECLARE_INSTRUCTION(StoreNamed)
protected:
@ -2760,6 +2874,10 @@ class HStoreKeyed: public HInstruction {
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
bool NeedsWriteBarrier() const {
return StoringValueNeedsWriteBarrier(value());
}
DECLARE_INSTRUCTION(StoreKeyed)
protected:
@ -2779,10 +2897,6 @@ class HStoreKeyedFastElement: public HStoreKeyed {
SetFlag(kChangesArrayElements);
}
bool NeedsWriteBarrier() const {
return !value()->type().IsSmi();
}
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32.
return (index == 1) ? Representation::Integer32()

246
deps/v8/src/hydrogen.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -128,7 +128,7 @@ HSimulate* HBasicBlock::CreateSimulate(int id) {
int push_count = environment->push_count();
int pop_count = environment->pop_count();
int length = environment->values()->length();
int length = environment->length();
HSimulate* instr = new HSimulate(id, pop_count, length);
for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
@ -222,7 +222,7 @@ void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
ASSERT(IsLoopHeader() || first_ == NULL);
HEnvironment* incoming_env = pred->last_environment();
if (IsLoopHeader()) {
ASSERT(phis()->length() == incoming_env->values()->length());
ASSERT(phis()->length() == incoming_env->length());
for (int i = 0; i < phis_.length(); ++i) {
phis_[i]->AddInput(incoming_env->values()->at(i));
}
@ -1982,7 +1982,7 @@ AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
: owner_(owner), kind_(kind), outer_(owner->ast_context()) {
owner->set_ast_context(this); // Push.
#ifdef DEBUG
original_count_ = owner->environment()->total_count();
original_length_ = owner->environment()->length();
#endif
}
@ -1995,14 +1995,14 @@ AstContext::~AstContext() {
EffectContext::~EffectContext() {
ASSERT(owner()->HasStackOverflow() ||
!owner()->subgraph()->HasExit() ||
owner()->environment()->total_count() == original_count_);
owner()->environment()->length() == original_length_);
}
ValueContext::~ValueContext() {
ASSERT(owner()->HasStackOverflow() ||
!owner()->subgraph()->HasExit() ||
owner()->environment()->total_count() == original_count_ + 1);
owner()->environment()->length() == original_length_ + 1);
}
@ -2343,7 +2343,7 @@ void HGraphBuilder::SetupScope(Scope* scope) {
}
// Set the initial values of stack-allocated locals.
for (int i = count; i < environment()->values()->length(); ++i) {
for (int i = count; i < environment()->length(); ++i) {
environment()->Bind(i, undefined_constant);
}
@ -2702,7 +2702,7 @@ void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
int osr_entry_id = statement->OsrEntryId();
// We want the correct environment at the OsrEntry instruction. Build
// it explicitly. The expression stack should be empty.
int count = osr_entry->last_environment()->total_count();
int count = osr_entry->last_environment()->length();
ASSERT(count == (osr_entry->last_environment()->parameter_count() +
osr_entry->last_environment()->local_count()));
for (int i = 0; i < count; ++i) {
@ -2940,6 +2940,21 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
BAILOUT("unsupported context for arguments object");
}
ast_context()->ReturnValue(environment()->Lookup(variable));
} else if (variable->IsContextSlot()) {
if (variable->mode() == Variable::CONST) {
BAILOUT("reference to const context slot");
}
Slot* slot = variable->AsSlot();
CompilationInfo* info = graph()->info();
int context_chain_length = info->function()->scope()->
ContextChainLength(slot->var()->scope());
ASSERT(context_chain_length >= 0);
// TODO(antonm): if slot's value is not modified by closures, instead
// of reading it out of context, we could just embed the value as
// a constant.
HLoadContextSlot* instr =
new HLoadContextSlot(context_chain_length, slot->index());
ast_context()->ReturnInstruction(instr, expr->id());
} else if (variable->is_global()) {
LookupResult lookup;
LookupGlobalPropertyCell(variable, &lookup, false);
@ -2956,7 +2971,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
ast_context()->ReturnInstruction(instr, expr->id());
} else {
BAILOUT("reference to non-stack-allocated/non-global variable");
BAILOUT("reference to a variable which requires dynamic lookup");
}
}
@ -3103,8 +3118,15 @@ HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
// this basic block the current basic block.
HBasicBlock* join_block = graph_->CreateBasicBlock();
for (int i = 0; i < subgraphs->length(); ++i) {
if (subgraphs->at(i)->HasExit()) {
subgraphs->at(i)->exit_block()->Goto(join_block);
HSubgraph* subgraph = subgraphs->at(i);
if (subgraph->HasExit()) {
// In an effect context the value of the type switch is not needed.
// There is no need to merge it at the join block only to discard it.
HBasicBlock* subgraph_exit = subgraph->exit_block();
if (ast_context()->IsEffect()) {
subgraph_exit->last_environment()->Drop(1);
}
subgraph_exit->Goto(join_block);
}
}
@ -3242,7 +3264,8 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->id());
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
ast_context()->ReturnValue(Pop());
} else {
// Build subgraph for generic store through IC.
{
@ -3260,11 +3283,14 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->AssignmentId());
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
if (subgraph()->HasExit() && !ast_context()->IsEffect()) {
ast_context()->ReturnValue(Pop());
}
}
if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
}
@ -3471,7 +3497,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
Top(),
expr->position(),
expr->AssignmentId());
} else {
} else if (var->IsStackAllocated()) {
// We allow reference to the arguments object only in assignemtns
// to local variables to make sure that the arguments object does
// not escape and is not modified.
@ -3484,6 +3510,8 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
VISIT_FOR_VALUE(expr->value());
}
Bind(proxy->var(), Top());
} else {
BAILOUT("Assigning to no non-stack-allocated/non-global variable");
}
// Return the value.
ast_context()->ReturnValue(Pop());
@ -3548,8 +3576,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
if (maps.length() == 0) {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position());
PushAndAdd(instr);
if (instr->HasSideEffects()) AddSimulate(expr->id());
ast_context()->ReturnInstruction(instr, expr->id());
} else {
// Build subgraph for generic load through IC.
{
@ -3568,9 +3595,12 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
if (subgraph()->HasExit() && !ast_context()->IsEffect()) {
ast_context()->ReturnValue(Pop());
}
}
if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
}
@ -3643,9 +3673,18 @@ HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
Handle<Map> map = expr->GetMonomorphicReceiverType();
ASSERT(map->has_fast_elements());
AddInstruction(new HCheckMap(object, map));
HInstruction* elements = AddInstruction(new HLoadElements(object));
HInstruction* length = AddInstruction(new HArrayLength(elements));
AddInstruction(new HBoundsCheck(key, length));
bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
HLoadElements* elements = new HLoadElements(object);
HInstruction* length = NULL;
if (is_array) {
length = AddInstruction(new HJSArrayLength(object));
AddInstruction(new HBoundsCheck(key, length));
AddInstruction(elements);
} else {
AddInstruction(elements);
length = AddInstruction(new HFixedArrayLength(elements));
AddInstruction(new HBoundsCheck(key, length));
}
return new HLoadKeyedFastElement(elements, key);
}
@ -3671,9 +3710,9 @@ HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
HInstruction* length = NULL;
if (is_array) {
length = AddInstruction(new HArrayLength(object));
length = AddInstruction(new HJSArrayLength(object));
} else {
length = AddInstruction(new HArrayLength(elements));
length = AddInstruction(new HFixedArrayLength(elements));
}
AddInstruction(new HBoundsCheck(key, length));
return new HStoreKeyedFastElement(elements, key, val);
@ -3720,7 +3759,13 @@ void HGraphBuilder::VisitProperty(Property* expr) {
if (expr->IsArrayLength()) {
HValue* array = Pop();
AddInstruction(new HCheckNonSmi(array));
instr = new HArrayLength(array);
AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
instr = new HJSArrayLength(array);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
AddInstruction(new HCheckNonSmi(function));
instr = new HLoadFunctionPrototype(function);
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
@ -3767,9 +3812,9 @@ void HGraphBuilder::AddCheckConstantFunction(Call* expr,
AddInstruction(new HCheckMap(receiver, receiver_map));
}
if (!expr->holder().is_null()) {
AddInstruction(new HCheckPrototypeMaps(receiver,
expr->holder(),
receiver_map));
AddInstruction(new HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
expr->holder()));
}
}
@ -3841,7 +3886,11 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
subgraph()->set_exit_block(new_exit_block);
if (new_exit_block != NULL) ast_context()->ReturnValue(Pop());
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
if (new_exit_block != NULL && !ast_context()->IsEffect()) {
ast_context()->ReturnValue(Pop());
}
}
}
@ -3977,7 +4026,9 @@ bool HGraphBuilder::TryInline(Call* expr) {
function_return_->MarkAsInlineReturnTarget();
}
call_context_ = ast_context();
TypeFeedbackOracle new_oracle(Handle<Code>(shared->code()));
TypeFeedbackOracle new_oracle(
Handle<Code>(shared->code()),
Handle<Context>(target->context()->global_context()));
oracle_ = &new_oracle;
graph()->info()->SetOsrAstId(AstNode::kNoNumber);
@ -4179,7 +4230,8 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
HValue* arg_two_value = environment()->Lookup(arg_two->var());
if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
if (!expr->IsMonomorphic()) return false;
if (!expr->IsMonomorphic() ||
expr->check_type() != RECEIVER_MAP_CHECK) return false;
// Found pattern f.apply(receiver, arguments).
VisitForValue(prop->obj());
@ -4248,7 +4300,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
if (expr->IsMonomorphic()) {
if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) {
AddCheckConstantFunction(expr, receiver, types->first(), true);
if (TryMathFunctionInline(expr)) {
@ -4273,6 +4325,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
}
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
HandlePolymorphicCallNamed(expr, receiver, types, name);
return;
@ -4847,14 +4900,49 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
HInstruction* instr = NULL;
if (op == Token::INSTANCEOF) {
instr = new HInstanceOf(left, right);
// Check to see if the rhs of the instanceof is a global function not
// residing in new space. If it is we assume that the function will stay the
// same.
Handle<JSFunction> target = Handle<JSFunction>::null();
Variable* var = expr->right()->AsVariableProxy()->AsVariable();
bool global_function = (var != NULL) && var->is_global() && !var->is_this();
CompilationInfo* info = graph()->info();
if (global_function &&
info->has_global_object() &&
!info->global_object()->IsAccessCheckNeeded()) {
Handle<String> name = var->name();
Handle<GlobalObject> global(info->global_object());
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
lookup.GetValue()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!Heap::InNewSpace(*candidate)) {
target = candidate;
}
}
}
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
instr = new HInstanceOf(left, right);
} else {
AddInstruction(new HCheckFunction(right, target));
instr = new HInstanceOfKnownGlobal(left, target);
}
} else if (op == Token::IN) {
BAILOUT("Unsupported comparison: in");
} else if (info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
AddInstruction(new HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
AddInstruction(new HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
instr = new HCompareJSObjectEq(left, right);
break;
@ -5262,6 +5350,19 @@ void HEnvironment::Initialize(int parameter_count,
}
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_);
assigned_variables_.AddAll(other->assigned_variables_);
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
}
void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
ASSERT(!block->IsLoopHeader());
ASSERT(values_.length() == other->values_.length());
@ -5292,26 +5393,45 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
}
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_);
assigned_variables_.AddAll(other->assigned_variables_);
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
if (other->outer_ != NULL) outer_ = other->outer_->Copy(); // Deep copy.
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
ast_id_ = other->ast_id_;
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
if (!assigned_variables_.Contains(index)) {
assigned_variables_.Add(index);
}
values_[index] = value;
}
int HEnvironment::IndexFor(Variable* variable) const {
Slot* slot = variable->AsSlot();
ASSERT(slot != NULL && slot->IsStackAllocated());
if (slot->type() == Slot::PARAMETER) {
return slot->index() + 1;
} else {
return parameter_count_ + slot->index();
bool HEnvironment::HasExpressionAt(int index) const {
return index >= parameter_count_ + local_count_;
}
bool HEnvironment::ExpressionStackIsEmpty() const {
int first_expression = parameter_count() + local_count();
ASSERT(length() >= first_expression);
return length() == first_expression;
}
void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
int count = index_from_top + 1;
int index = values_.length() - count;
ASSERT(HasExpressionAt(index));
// The push count must include at least the element in question or else
// the new value will not be included in this environment's history.
if (push_count_ < count) {
// This is the same effect as popping then re-pushing 'count' elements.
pop_count_ += (count - push_count_);
push_count_ = count;
}
values_[index] = value;
}
void HEnvironment::Drop(int count) {
for (int i = 0; i < count; ++i) {
Pop();
}
}
@ -5376,7 +5496,7 @@ HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
void HEnvironment::PrintTo(StringStream* stream) {
for (int i = 0; i < total_count(); i++) {
for (int i = 0; i < length(); i++) {
if (i == 0) stream->Add("parameters\n");
if (i == parameter_count()) stream->Add("locals\n");
if (i == parameter_count() + local_count()) stream->Add("expressions");
@ -5614,31 +5734,40 @@ void HStatistics::Print() {
PrintF("%30s", names_[i]);
double ms = static_cast<double>(timing_[i]) / 1000;
double percent = static_cast<double>(timing_[i]) * 100 / sum;
PrintF(" - %0.3f ms / %0.3f %% \n", ms, percent);
PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
}
PrintF("%30s - %0.3f ms \n", "Sum", static_cast<double>(sum) / 1000);
PrintF("%30s - %7.3f ms %8u bytes\n", "Sum",
static_cast<double>(sum) / 1000,
total_size_);
PrintF("---------------------------------------------------------------\n");
PrintF("%30s - %0.3f ms (%0.1f times slower than full code gen)\n",
PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
"Total",
static_cast<double>(total_) / 1000,
static_cast<double>(total_) / full_code_gen_);
}
void HStatistics::SaveTiming(const char* name, int64_t ticks) {
void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
if (name == HPhase::kFullCodeGen) {
full_code_gen_ += ticks;
} else if (name == HPhase::kTotal) {
total_ += ticks;
} else {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (names_[i] == name) {
timing_[i] += ticks;
sizes_[i] += size;
return;
}
}
names_.Add(name);
timing_.Add(ticks);
sizes_.Add(size);
}
}
@ -5659,13 +5788,15 @@ void HPhase::Begin(const char* name,
chunk_ = allocator->chunk();
}
if (FLAG_time_hydrogen) start_ = OS::Ticks();
start_allocation_size_ = Zone::allocation_size_;
}
void HPhase::End() const {
if (FLAG_time_hydrogen) {
int64_t end = OS::Ticks();
HStatistics::Instance()->SaveTiming(name_, end - start_);
unsigned size = Zone::allocation_size_ - start_allocation_size_;
HStatistics::Instance()->SaveTiming(name_, end - start_, size);
}
if (FLAG_trace_hydrogen) {
@ -5678,7 +5809,6 @@ void HPhase::End() const {
#ifdef DEBUG
if (graph_ != NULL) graph_->Verify();
if (chunk_ != NULL) chunk_->Verify();
if (allocator_ != NULL) allocator_->Verify();
#endif
}

124
deps/v8/src/hydrogen.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -401,27 +401,33 @@ class HEnvironment: public ZoneObject {
Scope* scope,
Handle<JSFunction> closure);
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
const ZoneList<int>* assigned_variables() const {
return &assigned_variables_;
}
int parameter_count() const { return parameter_count_; }
int local_count() const { return local_count_; }
HEnvironment* outer() const { return outer_; }
int pop_count() const { return pop_count_; }
int push_count() const { return push_count_; }
int ast_id() const { return ast_id_; }
void set_ast_id(int id) { ast_id_ = id; }
int length() const { return values_.length(); }
void Bind(Variable* variable, HValue* value) {
Bind(IndexFor(variable), value);
if (FLAG_trace_environment) {
PrintF("Slot index=%d name=%s\n",
variable->AsSlot()->index(),
*variable->name()->ToCString());
}
}
void Bind(int index, HValue* value) {
ASSERT(value != NULL);
if (!assigned_variables_.Contains(index)) {
assigned_variables_.Add(index);
}
values_[index] = value;
}
void Bind(int index, HValue* value);
HValue* Lookup(Variable* variable) const {
return Lookup(IndexFor(variable));
}
HValue* Lookup(int index) const {
HValue* result = values_[index];
ASSERT(result != NULL);
@ -434,53 +440,28 @@ class HEnvironment: public ZoneObject {
values_.Add(value);
}
HValue* Top() const { return ExpressionStackAt(0); }
HValue* ExpressionStackAt(int index_from_top) const {
int index = values_.length() - index_from_top - 1;
ASSERT(IsExpressionStackIndex(index));
return values_[index];
}
void SetExpressionStackAt(int index_from_top, HValue* value) {
int index = values_.length() - index_from_top - 1;
ASSERT(IsExpressionStackIndex(index));
values_[index] = value;
}
HValue* Pop() {
ASSERT(!IsExpressionStackEmpty());
ASSERT(!ExpressionStackIsEmpty());
if (push_count_ > 0) {
--push_count_;
ASSERT(push_count_ >= 0);
} else {
++pop_count_;
}
return values_.RemoveLast();
}
void Drop(int count) {
for (int i = 0; i < count; ++i) {
Pop();
}
}
Handle<JSFunction> closure() const { return closure_; }
void Drop(int count);
// ID of the original AST node to identify deoptimization points.
int ast_id() const { return ast_id_; }
void set_ast_id(int id) { ast_id_ = id; }
HValue* Top() const { return ExpressionStackAt(0); }
const ZoneList<HValue*>* values() const { return &values_; }
const ZoneList<int>* assigned_variables() const {
return &assigned_variables_;
HValue* ExpressionStackAt(int index_from_top) const {
int index = length() - index_from_top - 1;
ASSERT(HasExpressionAt(index));
return values_[index];
}
int parameter_count() const { return parameter_count_; }
int local_count() const { return local_count_; }
int push_count() const { return push_count_; }
int pop_count() const { return pop_count_; }
int total_count() const { return values_.length(); }
HEnvironment* outer() const { return outer_; }
void SetExpressionStackAt(int index_from_top, HValue* value);
HEnvironment* Copy() const;
HEnvironment* CopyWithoutHistory() const;
HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
@ -496,13 +477,15 @@ class HEnvironment: public ZoneObject {
HConstant* undefined) const;
void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
void ClearHistory() {
pop_count_ = 0;
push_count_ = 0;
assigned_variables_.Clear();
}
void SetValueAt(int index, HValue* value) {
ASSERT(index < total_count());
ASSERT(index < length());
values_[index] = value;
}
@ -512,19 +495,23 @@ class HEnvironment: public ZoneObject {
private:
explicit HEnvironment(const HEnvironment* other);
bool IsExpressionStackIndex(int index) const {
return index >= parameter_count_ + local_count_;
}
bool IsExpressionStackEmpty() const {
int length = values_.length();
int first_expression = parameter_count() + local_count();
ASSERT(length >= first_expression);
return length == first_expression;
}
// True if index is included in the expression stack part of the environment.
bool HasExpressionAt(int index) const;
bool ExpressionStackIsEmpty() const;
void Initialize(int parameter_count, int local_count, int stack_height);
void Initialize(const HEnvironment* other);
int VariableToIndex(Variable* var);
int IndexFor(Variable* variable) const;
// Map a variable to an environment index. Parameter indices are shifted
// by 1 (receiver is parameter index -1 but environment index 0).
// Stack-allocated local indices are shifted by the number of parameters.
int IndexFor(Variable* variable) const {
Slot* slot = variable->AsSlot();
ASSERT(slot != NULL && slot->IsStackAllocated());
int shift = (slot->type() == Slot::PARAMETER) ? 1 : parameter_count_;
return slot->index() + shift;
}
Handle<JSFunction> closure_;
// Value array [parameters] [locals] [temporaries].
@ -567,7 +554,7 @@ class AstContext {
// We want to be able to assert, in a context-specific way, that the stack
// height makes sense when the context is filled.
#ifdef DEBUG
int original_count_;
int original_length_;
#endif
private:
@ -919,7 +906,7 @@ class HValueMap: public ZoneObject {
class HStatistics: public Malloced {
public:
void Print();
void SaveTiming(const char* name, int64_t ticks);
void SaveTiming(const char* name, int64_t ticks, unsigned size);
static HStatistics* Instance() {
static SetOncePointer<HStatistics> instance;
if (!instance.is_set()) {
@ -930,11 +917,19 @@ class HStatistics: public Malloced {
private:
HStatistics() : timing_(5), names_(5), total_(0), full_code_gen_(0) { }
HStatistics()
: timing_(5),
names_(5),
sizes_(5),
total_(0),
total_size_(0),
full_code_gen_(0) { }
List<int64_t> timing_;
List<const char*> names_;
List<unsigned> sizes_;
int64_t total_;
unsigned total_size_;
int64_t full_code_gen_;
};
@ -971,6 +966,7 @@ class HPhase BASE_EMBEDDED {
HGraph* graph_;
LChunk* chunk_;
LAllocator* allocator_;
unsigned start_allocation_size_;
};

45
deps/v8/src/ia32/assembler-ia32.cc

@ -2465,6 +2465,17 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) {
}
void Assembler::por(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xEB);
emit_sse_operand(dst, src);
}
void Assembler::ptest(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@ -2489,6 +2500,40 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) {
}
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xF3);
emit_sse_operand(dst, src);
}
void Assembler::psrlq(XMMRegister reg, int8_t shift) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x73);
emit_sse_operand(edx, reg); // edx == 2
EMIT(shift);
}
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0xD3);
emit_sse_operand(dst, src);
}
void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);

4
deps/v8/src/ia32/assembler-ia32.h

@ -919,9 +919,13 @@ class Assembler : public Malloced {
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
void por(XMMRegister dst, XMMRegister src);
void ptest(XMMRegister dst, XMMRegister src);
void psllq(XMMRegister reg, int8_t shift);
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);

8
deps/v8/src/ia32/builtins-ia32.cc

@ -399,7 +399,7 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Clear the context before we push it when entering the JS frame.
__ xor_(esi, Operand(esi)); // clear esi
__ Set(esi, Immediate(0));
// Enter an internal frame.
__ EnterInternalFrame();
@ -421,7 +421,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Copy arguments to the stack in a loop.
Label loop, entry;
__ xor_(ecx, Operand(ecx)); // clear ecx
__ Set(ecx, Immediate(0));
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
@ -644,7 +644,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&non_function);
__ mov(Operand(esp, eax, times_4, 0), edi);
// Clear edi to indicate a non-function being called.
__ xor_(edi, Operand(edi));
__ Set(edi, Immediate(0));
// 4. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@ -665,7 +665,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{ Label function;
__ test(edi, Operand(edi));
__ j(not_zero, &function, taken);
__ xor_(ebx, Operand(ebx));
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);

196
deps/v8/src/ia32/code-stubs-ia32.cc

@ -104,7 +104,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
__ Set(ebx, Immediate(0)); // Set to NULL.
__ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
__ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
__ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
@ -1772,7 +1772,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
@ -2016,8 +2015,7 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
operands_type_ == TRBinaryOpIC::INT32);
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
// Floating point case.
switch (op_) {
@ -4303,7 +4301,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// that contains the exponent and high bit of the mantissa.
STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
__ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ xor_(eax, Operand(eax));
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
__ add(edx, Operand(edx));
@ -4433,7 +4431,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ j(below, &below_label, not_taken);
__ j(above, &above_label, not_taken);
__ xor_(eax, Operand(eax));
__ Set(eax, Immediate(0));
__ ret(0);
__ bind(&below_label);
@ -4646,7 +4644,7 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
__ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
__ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
NearLabel skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
@ -4799,7 +4797,7 @@ void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
}
// Clear the context pointer.
__ xor_(esi, Operand(esi));
__ Set(esi, Immediate(0));
// Restore fp from handler and discard handler state.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
@ -4973,7 +4971,26 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
// Generate stub code for instanceof.
// This code can patch a call site inlined cache of the instance of check,
// which looks like this.
//
// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
// 75 0a jne <some near label>
// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
//
// If call site patching is requested the stack will have the delta from the
// return address to the cmp instruction just below the return address. This
// also means that call site patching can only take place with arguments in
// registers. TOS looks like this when call site patching is requested
//
// esp[0] : return address
// esp[4] : delta from return address to cmp instruction
//
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// Fixed register usage throughout the stub.
Register object = eax; // Object (lhs).
Register map = ebx; // Map of the object.
@ -4981,9 +4998,22 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Register prototype = edi; // Prototype of the function.
Register scratch = ecx;
// Constants describing the call site code to patch.
static const int kDeltaToCmpImmediate = 2;
static const int kDeltaToMov = 8;
static const int kDeltaToMovImmediate = 9;
static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
ExternalReference roots_address = ExternalReference::roots_address();
ASSERT_EQ(object.code(), InstanceofStub::left().code());
ASSERT_EQ(function.code(), InstanceofStub::right().code());
// Get the object and function - they are always both needed.
Label slow, not_js_object;
if (!args_in_registers()) {
if (!HasArgsInRegisters()) {
__ mov(object, Operand(esp, 2 * kPointerSize));
__ mov(function, Operand(esp, 1 * kPointerSize));
}
@ -4993,22 +5023,26 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &not_js_object, not_taken);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// Look up the function and the map in the instanceof cache.
NearLabel miss;
ExternalReference roots_address = ExternalReference::roots_address();
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ IncrementCounter(&Counters::instance_of_cache, 1);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
// Look up the function and the map in the instanceof cache.
NearLabel miss;
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ cmp(function,
Operand::StaticArray(scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ cmp(map, Operand::StaticArray(
scratch, times_pointer_size, roots_address));
__ j(not_equal, &miss);
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(eax, Operand::StaticArray(
scratch, times_pointer_size, roots_address));
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&miss);
}
__ bind(&miss);
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
@ -5017,13 +5051,29 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ j(zero, &slow, not_taken);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the golbal instanceof cache with the current map and function. The
// cached answer will be set when it is known.
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
__ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
function);
} else {
// The constants for the code patching are based on no push instructions
// at the call site.
ASSERT(HasArgsInRegisters());
// Get return address and delta to inlined map check.
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
__ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
__ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
__ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
}
__ mov(Operand(scratch, kDeltaToCmpImmediate), map);
}
// Loop through the prototype chain of the object looking for the function
// prototype.
@ -5039,18 +5089,48 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ jmp(&loop);
__ bind(&is_instance);
__ IncrementCounter(&Counters::instance_of_stub_true, 1);
__ Set(eax, Immediate(0));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
if (!HasCallSiteInlineCheck()) {
__ Set(eax, Immediate(0));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch,
times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, Factory::true_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
__ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
__ Set(eax, Immediate(0));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&is_not_instance);
__ IncrementCounter(&Counters::instance_of_stub_false, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
if (!HasCallSiteInlineCheck()) {
__ Set(eax, Immediate(Smi::FromInt(1)));
__ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
__ mov(Operand::StaticArray(
scratch, times_pointer_size, roots_address), eax);
} else {
// Get return address and delta to inlined map check.
__ mov(eax, Factory::false_value());
__ mov(scratch, Operand(esp, 0 * kPointerSize));
__ sub(scratch, Operand(esp, 1 * kPointerSize));
if (FLAG_debug_code) {
__ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
__ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
}
__ mov(Operand(scratch, kDeltaToMovImmediate), eax);
if (!ReturnTrueFalseObject()) {
__ Set(eax, Immediate(Smi::FromInt(1)));
}
}
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
@ -5064,39 +5144,61 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Null is not instance of anything.
__ cmp(object, Factory::null_value());
__ j(not_equal, &object_not_null);
__ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null);
// Smi values is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(not_zero, &object_not_null_or_smi, not_taken);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
__ bind(&object_not_null_or_smi);
// String values is not instance of anything.
Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
__ j(NegateCondition(is_string), &slow);
__ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
__ Set(eax, Immediate(Smi::FromInt(1)));
__ ret((args_in_registers() ? 0 : 2) * kPointerSize);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
// Slow-case: Go through the JavaScript implementation.
__ bind(&slow);
if (args_in_registers()) {
// Push arguments below return address.
__ pop(scratch);
if (!ReturnTrueFalseObject()) {
// Tail call the builtin which returns 0 or 1.
if (HasArgsInRegisters()) {
// Push arguments below return address.
__ pop(scratch);
__ push(object);
__ push(function);
__ push(scratch);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
// Call the builtin and convert 0/1 to true/false.
__ EnterInternalFrame();
__ push(object);
__ push(function);
__ push(scratch);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
__ LeaveInternalFrame();
NearLabel true_value, done;
__ test(eax, Operand(eax));
__ j(zero, &true_value);
__ mov(eax, Factory::false_value());
__ jmp(&done);
__ bind(&true_value);
__ mov(eax, Factory::true_value());
__ bind(&done);
__ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
}
__ IncrementCounter(&Counters::instance_of_slow, 1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
Register InstanceofStub::left() { return eax; }
Register InstanceofStub::right() { return edx; }
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the

11
deps/v8/src/ia32/code-stubs-ia32.h

@ -250,13 +250,6 @@ class TypeRecordingBinaryOpStub: public CodeStub {
result_type_(result_type),
name_(NULL) { }
// Generate code to call the stub with the supplied arguments. This will add
// code at the call site to prepare arguments either in registers or on the
// stack together with the actual call.
void GenerateCall(MacroAssembler* masm, Register left, Register right);
void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
private:
enum SmiCodeGenerateHeapNumberResults {
ALLOW_HEAPNUMBER_RESULTS,
@ -321,10 +314,6 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
bool IsOperationCommutative() {
return (op_ == Token::ADD) || (op_ == Token::MUL);
}
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() {

351
deps/v8/src/ia32/codegen-ia32.cc

@ -745,10 +745,10 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
Comment cmnt(masm_, "[ store arguments object");
if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
// When using lazy arguments allocation, we store the hole value
// When using lazy arguments allocation, we store the arguments marker value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
frame_->Push(Factory::the_hole_value());
frame_->Push(Factory::arguments_marker());
} else {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
@ -773,9 +773,9 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
if (probe.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
skip_arguments = !probe.handle()->IsTheHole();
skip_arguments = !probe.handle()->IsArgumentsMarker();
} else {
__ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
__ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
probe.Unuse();
done.Branch(not_equal);
}
@ -3294,9 +3294,9 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
Label slow, done;
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
try_lazy = probe.handle()->IsArgumentsMarker();
} else {
__ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
__ cmp(Operand(probe.reg()), Immediate(Factory::arguments_marker()));
probe.Unuse();
__ j(not_equal, &slow);
}
@ -5068,7 +5068,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// object has been lazily loaded yet.
Result result = frame()->Pop();
if (result.is_constant()) {
if (result.handle()->IsTheHole()) {
if (result.handle()->IsArgumentsMarker()) {
result = StoreArgumentsObject(false);
}
frame()->Push(&result);
@ -5079,7 +5079,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
__ cmp(Operand(result.reg()), Immediate(Factory::arguments_marker()));
frame()->Push(&result);
exit.Branch(not_equal);
@ -6649,38 +6649,41 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop, loop_condition,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
Load(args->at(1));
// Load this to eax (= array)
Load(args->at(0));
Result array_result = frame_->Pop();
array_result.ToRegister(eax);
frame_->SpillAll();
Label bailout;
Label done;
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
Register result_pos = no_reg;
Register elements = no_reg; // Will be eax.
Register index = edi;
Register index = edx;
Register current_string_length = ecx; // Will be ecx when live.
Register string_length = ecx;
Register current_string = edx;
Register string = esi;
Register scratch = ebx;
Register scratch_2 = esi;
Register new_padding_chars = scratch_2;
Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
Operand elements = Operand(esp, 3 * kPointerSize);
Operand result = Operand(esp, 2 * kPointerSize);
Operand padding_chars = Operand(esp, 1 * kPointerSize);
Operand array_length = Operand(esp, 0);
__ sub(Operand(esp), Immediate(4 * kPointerSize));
Register array_length = edi;
Register result_pos = no_reg; // Will be edi.
// Check that eax is a JSArray
// Separator operand is already pushed.
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@ -6691,140 +6694,226 @@ void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
// If the array is empty, return the empty string.
__ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
__ sar(scratch, 1);
Label non_trivial;
__ j(not_zero, &non_trivial);
__ mov(result, Factory::empty_string());
// If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ sar(array_length, 1);
__ j(not_zero, &non_trivial_array);
__ mov(result_operand, Factory::empty_string());
__ jmp(&done);
__ bind(&non_trivial);
__ mov(array_length, scratch);
__ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
__ mov(elements, scratch);
// Save the array length.
__ bind(&non_trivial_array);
__ mov(array_length_operand, array_length);
// Save the FixedArray containing array's elements.
// End of array's live range.
result_pos = array;
elements = array;
__ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
// Check that the separator is a flat ascii string.
__ mov(current_string, separator);
__ test(current_string, Immediate(kSmiTagMask));
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ Set(index, Immediate(0));
__ Set(string_length, Immediate(0));
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
__ jmp(&loop_condition);
__ bind(&loop);
__ cmp(index, Operand(array_length));
__ j(greater_equal, &done);
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// If the separator is the empty string, replace it with NULL.
// The test for NULL is quicker than the empty string test, in a loop.
__ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
Immediate(0));
Label separator_checked;
__ j(not_zero, &separator_checked);
__ mov(separator, Immediate(0));
__ bind(&separator_checked);
// Check that elements[0] is a flat ascii string, and copy it in new space.
__ mov(scratch, elements);
__ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
__ test(current_string, Immediate(kSmiTagMask));
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ add(Operand(index), Immediate(1));
__ bind(&loop_condition);
__ cmp(index, Operand(array_length));
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
__ cmp(array_length, 1);
__ j(not_equal, &not_size_one_array);
__ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
__ mov(result_operand, scratch);
__ jmp(&done);
__ bind(&not_size_one_array);
// End of array_length live range.
result_pos = array_length;
array_length = no_reg;
// Live registers:
// string_length: Sum of string lengths, as a smi.
// elements: FixedArray of strings.
// Check that the separator is a flat ASCII string.
__ mov(string, separator_operand);
__ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Allocate space to copy it. Round up the size to the alignment granularity.
__ mov(current_string_length,
FieldOperand(current_string, String::kLengthOffset));
__ shr(current_string_length, 1);
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
__ add(string_length, Operand(scratch));
__ j(overflow, &bailout);
__ shr(string_length, 1);
// Live registers and stack values:
// current_string_length: length of elements[0].
// New string result in new space = elements[0]
__ AllocateAsciiString(result_pos, current_string_length, scratch_2,
index, no_reg, &bailout);
__ mov(result, result_pos);
// Adjust current_string_length to include padding bytes at end of string.
// Keep track of the number of padding bytes.
__ mov(new_padding_chars, current_string_length);
__ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
__ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
__ sub(new_padding_chars, Operand(current_string_length));
__ neg(new_padding_chars);
__ mov(padding_chars, new_padding_chars);
Label copy_loop_1_done;
Label copy_loop_1;
__ test(current_string_length, Operand(current_string_length));
__ j(zero, &copy_loop_1_done);
__ bind(&copy_loop_1);
__ sub(Operand(current_string_length), Immediate(kPointerSize));
__ mov(scratch, FieldOperand(current_string, current_string_length,
times_1, SeqAsciiString::kHeaderSize));
__ mov(FieldOperand(result_pos, current_string_length,
times_1, SeqAsciiString::kHeaderSize),
scratch);
__ j(not_zero, &copy_loop_1);
__ bind(&copy_loop_1_done);
__ mov(index, Immediate(1));
// string_length
// elements
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
__ mov(string, separator_operand);
__ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
// Empty separator case
__ mov(index, Immediate(0));
__ jmp(&loop_1_condition);
// Loop condition: while (index < length).
Label loop;
__ bind(&loop);
__ cmp(index, array_length);
__ j(greater_equal, &done);
__ bind(&loop_1);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// elements: the FixedArray of strings we are joining.
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
__ jmp(&done);
// If the separator is the empty string, signalled by NULL, skip it.
Label separator_done;
__ mov(current_string, separator);
__ test(current_string, Operand(current_string));
__ j(zero, &separator_done);
// Append separator to result. It is known to be a flat ascii string.
__ AppendStringToTopOfNewSpace(current_string, current_string_length,
result_pos, scratch, scratch_2, result,
padding_chars, &bailout);
__ bind(&separator_done);
// Add next element of array to the end of the result.
// Get current_string = array[index].
__ mov(scratch, elements);
__ mov(current_string, FieldOperand(scratch, index,
times_pointer_size,
FixedArray::kHeaderSize));
// If current != flat ascii string drop result, return undefined.
__ test(current_string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Append current to the result.
__ AppendStringToTopOfNewSpace(current_string, current_string_length,
result_pos, scratch, scratch_2, result,
padding_chars, &bailout);
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ascii character value.
__ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_2_entry);
// Loop condition: while (index < length).
__ bind(&loop_2);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// Copy the separator character to the result.
__ mov_b(scratch, separator_operand);
__ mov_b(Operand(result_pos, 0), scratch);
__ inc(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ jmp(&loop); // End while (index < length).
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
__ jmp(&done);
// Long separator case (separator is more than one character).
__ bind(&long_separator);
__ Set(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_3_entry);
// Loop condition: while (index < length).
__ bind(&loop_3);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// Copy the separator to the result.
__ mov(string, separator_operand);
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
__ jmp(&done);
__ bind(&bailout);
__ mov(result, Factory::undefined_value());
__ mov(result_operand, Factory::undefined_value());
__ bind(&done);
__ mov(eax, result);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
__ add(Operand(esp), Immediate(4 * kPointerSize));
__ add(Operand(esp), Immediate(2 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
frame_->Drop(1);

2
deps/v8/src/ia32/debug-ia32.cc

@ -125,7 +125,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ Set(eax, Immediate(0)); // no arguments
__ Set(eax, Immediate(0)); // No arguments.
__ mov(ebx, Immediate(ExternalReference::debug_break()));
CEntryStub ceb(1);

23
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -27,6 +27,8 @@
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "codegen.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@ -56,8 +58,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
SafepointTable table(function->code());
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
int deoptimization_index = table.GetDeoptimizationIndex(i);
int gap_code_size = table.GetGapCodeSize(i);
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
unsigned instructions = pc_offset - last_pc_offset;
@ -105,23 +108,25 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
Code* replacement_code) {
// The stack check code matches the pattern (on ia32, for example):
// The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch the code to:
// We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x05 && // offset
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
@ -130,12 +135,14 @@ void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
Address call_target_address = rinfo->pc();
ASSERT(*(call_target_address - 3) == 0x90 && // nop
*(call_target_address - 2) == 0x90 && // nop
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x05; // offset
*(call_target_address - 2) = 0x07; // offset
rinfo->set_target_address(check_code->entry());
}
@ -613,3 +620,5 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

28
deps/v8/src/ia32/disasm-ia32.cc

@ -1182,15 +1182,33 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0xF3) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("psllq %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x73) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("psllq %s,%d",
ASSERT(regop == esi || regop == edx);
AppendToBuffer("%s %s,%d",
(regop == esi) ? "psllq" : "psrlq",
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0xD3) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("psrlq %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x7F) {
AppendToBuffer("movdqa ");
data++;
@ -1228,6 +1246,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0xEB) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("por %s,%s",
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
UnimplementedInstruction();
}

373
deps/v8/src/ia32/full-codegen-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -264,16 +264,24 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
__ j(above_equal, &ok, taken);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordStackCheck(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack
// replacement. In order to decide whether or not to perform OSR we
// embed the loop depth in a test instruction after the call so we
// can extract it from the OSR builtin.
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
// in a test instruction after the call so we can extract it from the OSR
// builtin.
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
@ -379,7 +387,7 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
__ mov(result_register(), lit);
__ Set(result_register(), Immediate(lit));
}
@ -1497,7 +1505,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
__ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
__ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
@ -1508,7 +1518,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
} else {
if (property->is_arguments_access()) {
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
__ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
__ push(Immediate(property->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(property->obj());
@ -3339,39 +3351,37 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
Label bailout;
Label done;
Label bailout, done, one_char_separator, long_separator,
non_trivial_array, not_size_one_array, loop, loop_condition,
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
ASSERT(args->length() == 2);
// We will leave the separator on the stack until the end of the function.
VisitForStackValue(args->at(1));
// Load this to eax (= array)
VisitForAccumulatorValue(args->at(0));
// All aliases of the same register have disjoint lifetimes.
Register array = eax;
Register result_pos = no_reg;
Register elements = no_reg; // Will be eax.
Register index = edi;
Register index = edx;
Register current_string_length = ecx; // Will be ecx when live.
Register string_length = ecx;
Register current_string = edx;
Register string = esi;
Register scratch = ebx;
Register scratch_2 = esi;
Register new_padding_chars = scratch_2;
Operand separator = Operand(esp, 4 * kPointerSize); // Already pushed.
Operand elements = Operand(esp, 3 * kPointerSize);
Operand result = Operand(esp, 2 * kPointerSize);
Operand padding_chars = Operand(esp, 1 * kPointerSize);
Operand array_length = Operand(esp, 0);
__ sub(Operand(esp), Immediate(4 * kPointerSize));
Register array_length = edi;
Register result_pos = no_reg; // Will be edi.
// Check that eax is a JSArray
// Separator operand is already pushed.
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
__ sub(Operand(esp), Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ test(array, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
@ -3382,140 +3392,226 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
1 << Map::kHasFastElements);
__ j(zero, &bailout);
// If the array is empty, return the empty string.
__ mov(scratch, FieldOperand(array, JSArray::kLengthOffset));
__ sar(scratch, 1);
Label non_trivial;
__ j(not_zero, &non_trivial);
__ mov(result, Factory::empty_string());
// If the array has length zero, return the empty string.
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ sar(array_length, 1);
__ j(not_zero, &non_trivial_array);
__ mov(result_operand, Factory::empty_string());
__ jmp(&done);
__ bind(&non_trivial);
__ mov(array_length, scratch);
__ mov(scratch, FieldOperand(array, JSArray::kElementsOffset));
__ mov(elements, scratch);
// Save the array length.
__ bind(&non_trivial_array);
__ mov(array_length_operand, array_length);
// Save the FixedArray containing array's elements.
// End of array's live range.
result_pos = array;
elements = array;
__ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
array = no_reg;
// Check that the separator is a flat ascii string.
__ mov(current_string, separator);
__ test(current_string, Immediate(kSmiTagMask));
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
__ Set(index, Immediate(0));
__ Set(string_length, Immediate(0));
// Loop condition: while (index < length).
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
__ jmp(&loop_condition);
__ bind(&loop);
__ cmp(index, Operand(array_length));
__ j(greater_equal, &done);
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// If the separator is the empty string, replace it with NULL.
// The test for NULL is quicker than the empty string test, in a loop.
__ cmp(FieldOperand(current_string, SeqAsciiString::kLengthOffset),
Immediate(0));
Label separator_checked;
__ j(not_zero, &separator_checked);
__ mov(separator, Immediate(0));
__ bind(&separator_checked);
// Check that elements[0] is a flat ascii string, and copy it in new space.
__ mov(scratch, elements);
__ mov(current_string, FieldOperand(scratch, FixedArray::kHeaderSize));
__ test(current_string, Immediate(kSmiTagMask));
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ add(Operand(index), Immediate(1));
__ bind(&loop_condition);
__ cmp(index, Operand(array_length));
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
__ cmp(array_length, 1);
__ j(not_equal, &not_size_one_array);
__ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
__ mov(result_operand, scratch);
__ jmp(&done);
__ bind(&not_size_one_array);
// End of array_length live range.
result_pos = array_length;
array_length = no_reg;
// Live registers:
// string_length: Sum of string lengths, as a smi.
// elements: FixedArray of strings.
// Check that the separator is a flat ASCII string.
__ mov(string, separator_operand);
__ test(string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Allocate space to copy it. Round up the size to the alignment granularity.
__ mov(current_string_length,
FieldOperand(current_string, String::kLengthOffset));
__ shr(current_string_length, 1);
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, Operand(scratch)); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
__ add(string_length, Operand(scratch));
__ j(overflow, &bailout);
__ shr(string_length, 1);
// Live registers and stack values:
// current_string_length: length of elements[0].
// New string result in new space = elements[0]
__ AllocateAsciiString(result_pos, current_string_length, scratch_2,
index, no_reg, &bailout);
__ mov(result, result_pos);
// Adjust current_string_length to include padding bytes at end of string.
// Keep track of the number of padding bytes.
__ mov(new_padding_chars, current_string_length);
__ add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
__ and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
__ sub(new_padding_chars, Operand(current_string_length));
__ neg(new_padding_chars);
__ mov(padding_chars, new_padding_chars);
Label copy_loop_1_done;
Label copy_loop_1;
__ test(current_string_length, Operand(current_string_length));
__ j(zero, &copy_loop_1_done);
__ bind(&copy_loop_1);
__ sub(Operand(current_string_length), Immediate(kPointerSize));
__ mov(scratch, FieldOperand(current_string, current_string_length,
times_1, SeqAsciiString::kHeaderSize));
__ mov(FieldOperand(result_pos, current_string_length,
times_1, SeqAsciiString::kHeaderSize),
scratch);
__ j(not_zero, &copy_loop_1);
__ bind(&copy_loop_1_done);
__ mov(index, Immediate(1));
// string_length
// elements
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
__ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
__ mov(string, separator_operand);
__ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
// Empty separator case
__ mov(index, Immediate(0));
__ jmp(&loop_1_condition);
// Loop condition: while (index < length).
Label loop;
__ bind(&loop);
__ cmp(index, array_length);
__ j(greater_equal, &done);
__ bind(&loop_1);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// elements: the FixedArray of strings we are joining.
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
__ jmp(&done);
// If the separator is the empty string, signalled by NULL, skip it.
Label separator_done;
__ mov(current_string, separator);
__ test(current_string, Operand(current_string));
__ j(zero, &separator_done);
// Append separator to result. It is known to be a flat ascii string.
__ AppendStringToTopOfNewSpace(current_string, current_string_length,
result_pos, scratch, scratch_2, result,
padding_chars, &bailout);
__ bind(&separator_done);
// Add next element of array to the end of the result.
// Get current_string = array[index].
__ mov(scratch, elements);
__ mov(current_string, FieldOperand(scratch, index,
times_pointer_size,
FixedArray::kHeaderSize));
// If current != flat ascii string drop result, return undefined.
__ test(current_string, Immediate(kSmiTagMask));
__ j(zero, &bailout);
__ mov(scratch, FieldOperand(current_string, HeapObject::kMapOffset));
__ mov_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
// Append current to the result.
__ AppendStringToTopOfNewSpace(current_string, current_string_length,
result_pos, scratch, scratch_2, result,
padding_chars, &bailout);
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ascii character value.
__ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_2_entry);
// Loop condition: while (index < length).
__ bind(&loop_2);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// Copy the separator character to the result.
__ mov_b(scratch, separator_operand);
__ mov_b(Operand(result_pos, 0), scratch);
__ inc(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
__ jmp(&done);
// Long separator case (separator is more than one character).
__ bind(&long_separator);
__ Set(index, Immediate(0));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&loop_3_entry);
// Loop condition: while (index < length).
__ bind(&loop_3);
// Each iteration of the loop concatenates one string to the result.
// Live values in registers:
// index: which element of the elements array we are adding to the result.
// result_pos: the position to which we are currently copying characters.
// Copy the separator to the result.
__ mov(string, separator_operand);
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
// Get string = array[index].
__ mov(string, FieldOperand(elements, index,
times_pointer_size,
FixedArray::kHeaderSize));
__ mov(string_length,
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(Operand(index), Immediate(1));
__ jmp(&loop); // End while (index < length).
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
__ jmp(&done);
__ bind(&bailout);
__ mov(result, Factory::undefined_value());
__ mov(result_operand, Factory::undefined_value());
__ bind(&done);
__ mov(eax, result);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
__ add(Operand(esp), Immediate(5 * kPointerSize));
__ add(Operand(esp), Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
@ -3739,7 +3835,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
if (prop->is_arguments_access()) {
VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
__ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
MemOperand slot_operand =
EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx);
__ push(slot_operand);
__ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
@ -4042,7 +4140,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForStackValue(expr->right());
__ IncrementCounter(&Counters::instance_of_full, 1);
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);

15
deps/v8/src/ia32/ic-ia32.cc

@ -1199,7 +1199,7 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
break;
case kExternalShortArray:
case kExternalUnsignedShortArray:
__ xor_(ecx, Operand(ecx));
__ Set(ecx, Immediate(0));
__ mov_w(Operand(edi, ebx, times_2, 0), ecx);
break;
case kExternalIntArray:
@ -1219,9 +1219,6 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
}
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
@ -1567,9 +1564,6 @@ void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
}
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : receiver
@ -1795,10 +1789,6 @@ bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
}
// Defined in ic.cc.
Object* KeyedLoadIC_Miss(Arguments args);
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
@ -1982,9 +1972,6 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
}
// Defined in ic.cc.
Object* KeyedStoreIC_Miss(Arguments args);
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value

512
deps/v8/src/ia32/lithium-codegen-ia32.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,6 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
#include "code-stubs.h"
#include "stub-cache.h"
@ -54,6 +58,157 @@ class SafepointGenerator : public PostCallGenerator {
};
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.from()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.from(), move.to());
} else {
LGapNode* from = LookupNode(move.from());
LGapNode* to = LookupNode(move.to());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()->
bool LCodeGen::GenerateCode() {
@ -135,6 +290,17 @@ bool LCodeGen::GeneratePrologue() {
__ j(not_zero, &loop);
} else {
__ sub(Operand(esp), Immediate(slots * kPointerSize));
#ifdef _MSC_VER
// On windows, you may not access the stack more than one page below
// the most recently mapped page. To make the allocated area randomly
// accessible, we write to each page in turn (the value is irrelevant).
const int kPageSize = 4 * KB;
for (int offset = slots * kPointerSize - kPageSize;
offset > 0;
offset -= kPageSize) {
__ mov(Operand(esp, offset), eax);
}
#endif
}
}
@ -261,6 +427,45 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
int translation_size = environment->values()->length();
// The output frame height does not include the parameters.
int height = translation_size - environment->parameter_count();
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
translation->BeginFrame(environment->ast_id(), closure_id, height);
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
// both NULL or both set.
if (environment->spilled_registers() != NULL && value != NULL) {
if (value->IsRegister() &&
environment->spilled_registers()[value->index()] != NULL) {
translation->MarkDuplicate();
AddToTranslation(translation,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i));
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
translation->MarkDuplicate();
AddToTranslation(
translation,
environment->spilled_double_registers()[value->index()],
false);
}
}
AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
}
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@ -385,7 +590,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
++frame_count;
}
Translation translation(&translations_, frame_count);
environment->WriteTranslation(this, &translation);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
environment->Register(deoptimization_index, translation.index());
deoptimizations_.Add(environment);
@ -564,8 +769,8 @@ void LCodeGen::DoParallelMove(LParallelMove* move) {
Register cpu_scratch = esi;
bool destroys_cpu_scratch = false;
LGapResolver resolver(move->move_operands(), &marker_operand);
const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.from();
@ -940,7 +1145,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), instr->value());
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
@ -973,27 +1178,21 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), Immediate(instr->value()));
__ Set(ToRegister(instr->result()), Immediate(instr->value()));
}
void LCodeGen::DoArrayLength(LArrayLength* instr) {
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->input());
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
if (instr->hydrogen()->value()->IsLoadElements()) {
// We load the length directly from the elements array.
Register elements = ToRegister(instr->input());
__ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
} else {
// Check that the receiver really is an array.
Register array = ToRegister(instr->input());
Register temporary = ToRegister(instr->temporary());
__ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
DeoptimizeIf(not_equal, instr->environment());
// Load length directly from the array.
__ mov(result, FieldOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->input());
__ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
}
@ -1700,7 +1899,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers eax and edx.
// Object and function are in fixed registers defined by the stub.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@ -1726,6 +1925,107 @@ void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
}
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
};
DeferredInstanceOfKnownGlobal* deferred;
deferred = new DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->input());
Register temp = ToRegister(instr->temp());
// A Smi is not instance of anything.
__ test(object, Immediate(kSmiTagMask));
__ j(zero, &false_result, not_taken);
// This is the inlined call site instanceof cache. The two occourences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
NearLabel cache_miss;
Register map = ToRegister(instr->temp());
__ mov(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
__ cmp(map, Factory::the_hole_value()); // Patched to cached map.
__ j(not_equal, &cache_miss, not_taken);
__ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
__ jmp(&done);
// The inlined call site cache did not match. Check null and string before
// calling the deferred code.
__ bind(&cache_miss);
// Null is not instance of anything.
__ cmp(object, Factory::null_value());
__ j(equal, &false_result);
// String values are not instances of anything.
Condition is_string = masm_->IsObjectStringType(object, temp, temp);
__ j(is_string, &false_result);
// Go to the deferred code.
__ jmp(deferred->entry());
__ bind(&false_result);
__ mov(ToRegister(instr->result()), Factory::false_value());
// Here result has either true or false. Deferred code also produces true or
// false object.
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
__ PushSafepointRegisters();
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kCallSiteInlineCheck);
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kReturnTrueFalseObject);
InstanceofStub stub(flags);
// Get the temp register reserved by the instruction. This needs to be edi as
// its slot of the pushing of safepoint registers is used to communicate the
// offset to the location of the map check.
Register temp = ToRegister(instr->temp());
ASSERT(temp.is(edi));
__ mov(InstanceofStub::right(), Immediate(instr->function()));
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->SizeOfCodeGeneratedSince(&before_push_delta));
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the eax slot and restore all registers.
__ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
__ PopSafepointRegisters();
}
static Condition ComputeCompareCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
@ -1815,6 +2115,14 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
// TODO(antonm): load a context with a separate instruction.
Register result = ToRegister(instr->result());
__ LoadContext(result, instr->context_chain_length());
__ mov(result, ContextOperand(result, instr->slot_index()));
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->input());
Register result = ToRegister(instr->result());
@ -1837,6 +2145,48 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
}
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register temp = ToRegister(instr->temporary());
Register result = ToRegister(instr->result());
// Check that the function really is a function.
__ CmpObjectType(function, JS_FUNCTION_TYPE, result);
DeoptimizeIf(not_equal, instr->environment());
// Check whether the function has an instance prototype.
NearLabel non_instance;
__ test_b(FieldOperand(result, Map::kBitFieldOffset),
1 << Map::kHasNonInstancePrototype);
__ j(not_zero, &non_instance);
// Get the prototype or initial map from the function.
__ mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ cmp(Operand(result), Immediate(Factory::the_hole_value()));
DeoptimizeIf(equal, instr->environment());
// If the function does not have an initial map, we're done.
NearLabel done;
__ CmpObjectType(result, MAP_TYPE, temp);
__ j(not_equal, &done);
// Get the prototype from the initial map.
__ mov(result, FieldOperand(result, Map::kPrototypeOffset));
__ jmp(&done);
// Non-instance prototype: Fetch prototype from constructor field
// in the function's map.
__ bind(&non_instance);
__ mov(result, FieldOperand(result, Map::kConstructorOffset));
// All done.
__ bind(&done);
}
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->input()));
Register reg = ToRegister(instr->input());
@ -1863,6 +2213,8 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
__ sub(length, index);
DeoptimizeIf(below_equal, instr->environment());
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
__ mov(result, Operand(arguments, length, times_4, kPointerSize));
}
@ -1870,32 +2222,15 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
Register elements = ToRegister(instr->elements());
Register key = ToRegister(instr->key());
Register result;
if (instr->load_result() != NULL) {
result = ToRegister(instr->load_result());
} else {
result = ToRegister(instr->result());
ASSERT(result.is(elements));
}
Register result = ToRegister(instr->result());
ASSERT(result.is(elements));
// Load the result.
__ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
// Untag and check for smi.
__ SmiUntag(result);
DeoptimizeIf(carry, instr->environment());
} else if (r.IsDouble()) {
EmitNumberUntagD(result,
ToDoubleRegister(instr->result()),
instr->environment());
} else {
// Check for the hole value.
ASSERT(r.IsTagged());
__ cmp(result, Factory::the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
// Check for the hole value.
__ cmp(result, Factory::the_hole_value());
DeoptimizeIf(equal, instr->environment());
}
@ -1912,7 +2247,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
// Check for arguments adapter frame.
Label done, adapted;
NearLabel done, adapted;
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
__ cmp(Operand(result),
@ -1927,7 +2262,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ bind(&adapted);
__ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
// Done. Pointer to topmost argument is in result.
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
}
@ -1936,9 +2272,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Operand elem = ToOperand(instr->input());
Register result = ToRegister(instr->result());
Label done;
NearLabel done;
// No arguments adaptor frame. Number of arguments is fixed.
// If no arguments adaptor frame the number of arguments is fixed.
__ cmp(ebp, elem);
__ mov(result, Immediate(scope()->num_parameters()));
__ j(equal, &done);
@ -1949,7 +2285,7 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(result);
// Done. Argument length is in result register.
// Argument length is in result register.
__ bind(&done);
}
@ -2498,7 +2834,6 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
value);
}
// Update the write barrier unless we're certain that we're storing a smi.
if (instr->hydrogen()->NeedsWriteBarrier()) {
// Compute address of modified element and store it into key register.
__ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
@ -2849,9 +3184,60 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
} else {
// This will bail out if the input was not in the int32 range (or,
// unfortunately, if the input was 0x80000000).
DeoptimizeIf(equal, instr->environment());
NearLabel done;
Register temp_reg = ToRegister(instr->temporary());
XMMRegister xmm_scratch = xmm0;
// If cvttsd2si succeeded, we're done. Otherwise, we attempt
// manual conversion.
__ j(not_equal, &done);
// Get high 32 bits of the input in result_reg and temp_reg.
__ pshufd(xmm_scratch, input_reg, 1);
__ movd(Operand(temp_reg), xmm_scratch);
__ mov(result_reg, temp_reg);
// Prepare negation mask in temp_reg.
__ sar(temp_reg, kBitsPerInt - 1);
// Extract the exponent from result_reg and subtract adjusted
// bias from it. The adjustment is selected in a way such that
// when the difference is zero, the answer is in the low 32 bits
// of the input, otherwise a shift has to be performed.
__ shr(result_reg, HeapNumber::kExponentShift);
__ and_(result_reg,
HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
__ sub(Operand(result_reg),
Immediate(HeapNumber::kExponentBias +
HeapNumber::kExponentBits +
HeapNumber::kMantissaBits));
// Don't handle big (> kMantissaBits + kExponentBits == 63) or
// special exponents.
DeoptimizeIf(greater, instr->environment());
// Zero out the sign and the exponent in the input (by shifting
// it to the left) and restore the implicit mantissa bit,
// i.e. convert the input to unsigned int64 shifted left by
// kExponentBits.
ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
// Minus zero has the most significant bit set and the other
// bits cleared.
__ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
__ psllq(input_reg, HeapNumber::kExponentBits);
__ por(input_reg, xmm_scratch);
// Get the amount to shift the input right in xmm_scratch.
__ neg(result_reg);
__ movd(xmm_scratch, Operand(result_reg));
// Shift the input right and extract low 32 bits.
__ psrlq(input_reg, xmm_scratch);
__ movd(Operand(result_reg), input_reg);
// Use the prepared mask in temp_reg to negate the result if necessary.
__ xor_(result_reg, Operand(temp_reg));
__ sub(result_reg, Operand(temp_reg));
__ bind(&done);
}
} else {
NearLabel done;
@ -2891,9 +3277,6 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
__ test(input, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr->environment());
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
__ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
static_cast<int8_t>(first));
@ -2931,13 +3314,13 @@ void LCodeGen::DoCheckMap(LCheckMap* instr) {
}
void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
if (Heap::InNewSpace(*prototype)) {
void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
if (Heap::InNewSpace(*object)) {
Handle<JSGlobalPropertyCell> cell =
Factory::NewJSGlobalPropertyCell(prototype);
Factory::NewJSGlobalPropertyCell(object);
__ mov(result, Operand::Cell(cell));
} else {
__ mov(result, prototype);
__ mov(result, object);
}
}
@ -2946,11 +3329,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
Handle<Map> receiver_map = instr->receiver_map();
Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
LoadPrototype(reg, current_prototype);
LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@ -2960,7 +3342,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
LoadPrototype(reg, current_prototype);
LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@ -3006,7 +3388,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
__ push(Immediate(instr->hydrogen()->constant_properties()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
// Pick the right runtime function or stub to call.
// Pick the right runtime function to call.
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else {
@ -3274,3 +3656,5 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

34
deps/v8/src/ia32/lithium-codegen-ia32.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -40,8 +40,30 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
@ -77,10 +99,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@ -148,7 +175,7 @@ class LCodeGen BASE_EMBEDDED {
int arity,
LInstruction* instr);
void LoadPrototype(Register result, Handle<JSObject> prototype);
void LoadHeapObject(Register result, Handle<HeapObject> object);
void RegisterLazyDeoptimization(LInstruction* instr);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
@ -228,6 +255,9 @@ class LCodeGen BASE_EMBEDDED {
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;
// Compiler from a set of parallel moves to a sequential list of moves.
LGapResolver resolver_;
friend class LDeferredCode;
friend class LEnvironment;
friend class SafepointGenerator;

678
deps/v8/src/ia32/lithium-ia32.cc

File diff suppressed because it is too large

859
deps/v8/src/ia32/lithium-ia32.h

File diff suppressed because it is too large

98
deps/v8/src/ia32/macro-assembler-ia32.cc

@ -877,55 +877,53 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
Immediate(Factory::cons_ascii_string_map()));
}
// All registers must be distinct. Only current_string needs valid contents
// on entry. All registers may be invalid on exit. result_operand is
// unchanged, padding_chars is updated correctly.
void MacroAssembler::AppendStringToTopOfNewSpace(
Register current_string, // Tagged pointer to string to copy.
Register current_string_length,
Register result_pos,
Register scratch,
Register new_padding_chars,
Operand operand_result,
Operand operand_padding_chars,
Label* bailout) {
mov(current_string_length,
FieldOperand(current_string, String::kLengthOffset));
shr(current_string_length, 1);
sub(current_string_length, operand_padding_chars);
mov(new_padding_chars, current_string_length);
add(Operand(current_string_length), Immediate(kObjectAlignmentMask));
and_(Operand(current_string_length), Immediate(~kObjectAlignmentMask));
sub(new_padding_chars, Operand(current_string_length));
neg(new_padding_chars);
// We need an allocation even if current_string_length is 0, to fetch
// result_pos. Consider using a faster fetch of result_pos in that case.
AllocateInNewSpace(current_string_length, result_pos, scratch, no_reg,
bailout, NO_ALLOCATION_FLAGS);
sub(result_pos, operand_padding_chars);
mov(operand_padding_chars, new_padding_chars);
Register scratch_2 = new_padding_chars; // Used to compute total length.
// Copy string to the end of result.
mov(current_string_length,
FieldOperand(current_string, String::kLengthOffset));
mov(scratch, operand_result);
mov(scratch_2, current_string_length);
add(scratch_2, FieldOperand(scratch, String::kLengthOffset));
mov(FieldOperand(scratch, String::kLengthOffset), scratch_2);
shr(current_string_length, 1);
lea(current_string,
FieldOperand(current_string, SeqAsciiString::kHeaderSize));
// Loop condition: while (--current_string_length >= 0).
Label copy_loop;
Label copy_loop_entry;
jmp(&copy_loop_entry);
bind(&copy_loop);
mov_b(scratch, Operand(current_string, current_string_length, times_1, 0));
mov_b(Operand(result_pos, current_string_length, times_1, 0), scratch);
bind(&copy_loop_entry);
sub(Operand(current_string_length), Immediate(1));
j(greater_equal, &copy_loop);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies. The contents of scratch and length are destroyed.
// Source and destination are incremented by length.
// Many variants of movsb, loop unrolling, word moves, and indexed operands
// have been tried here already, and this is fastest.
// A simpler loop is faster on small copies, but 30% slower on large ones.
// The cld() instruction must have been emitted, to set the direction flag(),
// before calling this function.
void MacroAssembler::CopyBytes(Register source,
Register destination,
Register length,
Register scratch) {
Label loop, done, short_string, short_loop;
// Experimentation shows that the short string loop is faster if length < 10.
cmp(Operand(length), Immediate(10));
j(less_equal, &short_string);
ASSERT(source.is(esi));
ASSERT(destination.is(edi));
ASSERT(length.is(ecx));
// Because source is 4-byte aligned in our uses of this function,
// we keep source aligned for the rep_movs call by copying the odd bytes
// at the end of the ranges.
mov(scratch, Operand(source, length, times_1, -4));
mov(Operand(destination, length, times_1, -4), scratch);
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
and_(Operand(scratch), Immediate(0x3));
add(destination, Operand(scratch));
jmp(&done);
bind(&short_string);
test(length, Operand(length));
j(zero, &done);
bind(&short_loop);
mov_b(scratch, Operand(source, 0));
mov_b(Operand(destination, 0), scratch);
inc(source);
inc(destination);
dec(length);
j(not_zero, &short_loop);
bind(&done);
}
@ -1715,7 +1713,7 @@ void MacroAssembler::Abort(const char* msg) {
}
#endif
// Disable stub call restrictions to always allow calls to abort.
set_allow_stub_calls(true);
AllowStubCallsScope allow_scope(this, true);
push(eax);
push(Immediate(p0));

23
deps/v8/src/ia32/macro-assembler-ia32.h

@ -386,22 +386,13 @@ class MacroAssembler: public Assembler {
Register scratch2,
Label* gc_required);
// All registers must be distinct. Only current_string needs valid contents
// on entry. All registers may be invalid on exit. result_operand is
// unchanged, padding_chars is updated correctly.
// The top of new space must contain a sequential ascii string with
// padding_chars bytes free in its top word. The sequential ascii string
// current_string is concatenated to it, allocating the necessary amount
// of new memory.
void AppendStringToTopOfNewSpace(
Register current_string, // Tagged pointer to string to copy.
Register current_string_length,
Register result_pos,
Register scratch,
Register new_padding_chars,
Operand operand_result,
Operand operand_padding_chars,
Label* bailout);
// Copy memory, byte-by-byte, from source to destination. Not optimized for
// long or aligned copies.
// The contents of index and scratch are destroyed.
void CopyBytes(Register source,
Register destination,
Register length,
Register scratch);
// ---------------------------------------------------------------------------
// Support functions.

6
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -211,9 +211,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
// If input is ASCII, don't even bother calling here if the string to
// match contains a non-ascii character.
if (mode_ == ASCII) {
for (int i = 0; i < str.length(); i++) {
ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
}
ASSERT(String::IsAscii(str.start(), str.length()));
}
#endif
int byte_length = str.length() * char_size();
@ -654,7 +652,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerIA32::Fail() {
ASSERT(FAILURE == 0); // Return value for failure is zero.
__ xor_(eax, Operand(eax)); // zero eax.
__ Set(eax, Immediate(0));
__ jmp(&exit_label_);
}

1
deps/v8/src/ia32/stub-cache-ia32.cc

@ -1686,6 +1686,7 @@ MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
Label miss;
Label index_out_of_range;
GenerateNameCheck(name, &miss);
// Check that the maps starting from the prototype haven't changed.

93
deps/v8/src/json.js

@ -27,11 +27,6 @@
var $JSON = global.JSON;
function ParseJSONUnfiltered(text) {
var s = $String(text);
return %ParseJson(s);
}
function Revive(holder, name, reviver) {
var val = holder[name];
if (IS_OBJECT(val)) {
@ -58,7 +53,7 @@ function Revive(holder, name, reviver) {
}
function JSONParse(text, reviver) {
var unfiltered = ParseJSONUnfiltered(text);
var unfiltered = %ParseJson(TO_STRING_INLINE(text));
if (IS_FUNCTION(reviver)) {
return Revive({'': unfiltered}, '', reviver);
} else {
@ -158,7 +153,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
if (IS_STRING(value)) {
return %QuoteJSONString(value);
} else if (IS_NUMBER(value)) {
return $isFinite(value) ? $String(value) : "null";
return NUMBER_IS_FINITE(value) ? $String(value) : "null";
} else if (IS_BOOLEAN(value)) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
@ -169,7 +164,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
return SerializeArray(value, replacer, stack, indent, gap);
} else if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
return $isFinite(value) ? ToString(value) : "null";
return NUMBER_IS_FINITE(value) ? ToString(value) : "null";
} else if (IS_STRING_WRAPPER(value)) {
return %QuoteJSONString(ToString(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
@ -184,24 +179,60 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
function BasicSerializeArray(value, stack, builder) {
var len = value.length;
if (len == 0) {
builder.push("[]");
return;
}
if (!%PushIfAbsent(stack, value)) {
throw MakeTypeError('circular_structure', []);
}
builder.push("[");
var len = value.length;
for (var i = 0; i < len; i++) {
var val = value[0];
if (IS_STRING(val)) {
// First entry is a string. Remaining entries are likely to be strings too.
builder.push(%QuoteJSONString(val));
for (var i = 1; i < len; i++) {
val = value[i];
if (IS_STRING(val)) {
builder.push(%QuoteJSONStringComma(val));
} else {
builder.push(",");
var before = builder.length;
BasicJSONSerialize(i, value[i], stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
} else if (IS_NUMBER(val)) {
// First entry is a number. Remaining entries are likely to be numbers too.
builder.push(NUMBER_IS_FINITE(val) ? %_NumberToString(val) : "null");
for (var i = 1; i < len; i++) {
builder.push(",");
val = value[i];
if (IS_NUMBER(val)) {
builder.push(NUMBER_IS_FINITE(val)
? %_NumberToString(val)
: "null");
} else {
var before = builder.length;
BasicJSONSerialize(i, value[i], stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
} else {
var before = builder.length;
BasicJSONSerialize(i, value, stack, builder);
BasicJSONSerialize(0, val, stack, builder);
if (before == builder.length) builder.push("null");
builder.push(",");
for (var i = 1; i < len; i++) {
builder.push(",");
before = builder.length;
val = value[i];
BasicJSONSerialize(i, val, stack, builder);
if (before == builder.length) builder[before - 1] = ",null";
}
}
stack.pop();
if (builder.pop() != ",") {
builder.push("[]"); // Zero length array. Push "[" back on.
} else {
builder.push("]");
}
builder.push("]");
}
@ -210,31 +241,31 @@ function BasicSerializeObject(value, stack, builder) {
throw MakeTypeError('circular_structure', []);
}
builder.push("{");
var first = true;
for (var p in value) {
if (%HasLocalProperty(value, p)) {
builder.push(%QuoteJSONString(p));
if (!first) {
builder.push(%QuoteJSONStringComma(p));
} else {
builder.push(%QuoteJSONString(p));
}
builder.push(":");
var before = builder.length;
BasicJSONSerialize(p, value, stack, builder);
BasicJSONSerialize(p, value[p], stack, builder);
if (before == builder.length) {
builder.pop();
builder.pop();
} else {
builder.push(",");
first = false;
}
}
}
stack.pop();
if (builder.pop() != ",") {
builder.push("{}"); // Object has no own properties. Push "{" back on.
} else {
builder.push("}");
}
builder.push("}");
}
function BasicJSONSerialize(key, holder, stack, builder) {
var value = holder[key];
function BasicJSONSerialize(key, value, stack, builder) {
if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
if (IS_FUNCTION(toJSON)) {
@ -244,7 +275,7 @@ function BasicJSONSerialize(key, holder, stack, builder) {
if (IS_STRING(value)) {
builder.push(%QuoteJSONString(value));
} else if (IS_NUMBER(value)) {
builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
} else if (IS_BOOLEAN(value)) {
builder.push(value ? "true" : "false");
} else if (IS_NULL(value)) {
@ -254,7 +285,7 @@ function BasicJSONSerialize(key, holder, stack, builder) {
// Unwrap value if necessary
if (IS_NUMBER_WRAPPER(value)) {
value = ToNumber(value);
builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
builder.push(NUMBER_IS_FINITE(value) ? %_NumberToString(value) : "null");
} else if (IS_STRING_WRAPPER(value)) {
builder.push(%QuoteJSONString(ToString(value)));
} else if (IS_BOOLEAN_WRAPPER(value)) {
@ -271,7 +302,7 @@ function BasicJSONSerialize(key, holder, stack, builder) {
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
var builder = [];
BasicJSONSerialize('', {'': value}, [], builder);
BasicJSONSerialize('', value, [], builder);
if (builder.length == 0) return;
var result = %_FastAsciiArrayJoin(builder, "");
if (!IS_UNDEFINED(result)) return result;

6
deps/v8/src/jsregexp.cc

@ -425,7 +425,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(
Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int32_t> output) {
Vector<int> output) {
Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()));
ASSERT(index >= 0);
@ -521,8 +521,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
OffsetsVector registers(required_registers);
IrregexpResult res = RegExpImpl::IrregexpExecOnce(
jsregexp, subject, previous_index, Vector<int32_t>(registers.vector(),
registers.length()));
jsregexp, subject, previous_index, Vector<int>(registers.vector(),
registers.length()));
if (res == RE_SUCCESS) {
int capture_register_count =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;

2
deps/v8/src/jsregexp.h

@ -114,7 +114,7 @@ class RegExpImpl {
static IrregexpResult IrregexpExecOnce(Handle<JSRegExp> regexp,
Handle<String> subject,
int index,
Vector<int32_t> registers);
Vector<int> registers);
// Execute an Irregexp bytecode pattern.
// On a successful match, the result is a JSArray containing

57
deps/v8/src/lithium-allocator.cc

@ -27,7 +27,6 @@
#include "lithium-allocator.h"
#include "data-flow.h"
#include "hydrogen.h"
#include "string-stream.h"
@ -107,9 +106,6 @@ void LOperand::PrintTo(StringStream* stream) {
case LUnallocated::SAME_AS_FIRST_INPUT:
stream->Add("(1)");
break;
case LUnallocated::SAME_AS_ANY_INPUT:
stream->Add("(A)");
break;
case LUnallocated::ANY:
stream->Add("(-)");
break;
@ -832,9 +828,19 @@ void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
AllocateFixed(cur_input, gap_index + 1, is_tagged);
AddConstraintsGapMove(gap_index, input_copy, cur_input);
} else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
// The live range of writable input registers always goes until the end
// of the instruction.
ASSERT(!cur_input->IsUsedAtStart());
LUnallocated* input_copy = cur_input->CopyUnconstrained();
cur_input->set_virtual_register(next_virtual_register_++);
second->AddTemp(cur_input);
if (RequiredRegisterKind(input_copy->virtual_register()) ==
DOUBLE_REGISTERS) {
double_artificial_registers_.Add(
cur_input->virtual_register() - first_artificial_register_);
}
AddConstraintsGapMove(gap_index, input_copy, cur_input);
}
}
@ -937,6 +943,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
curr_position.InstructionEnd());
}
}
}
if (summary->IsCall() || summary->IsSaveDoubles()) {
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
@ -1036,6 +1045,7 @@ void LAllocator::Allocate(LChunk* chunk) {
void LAllocator::MeetRegisterConstraints() {
HPhase phase("Register constraints", chunk());
first_artificial_register_ = next_virtual_register_;
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); ++i) {
HBasicBlock* block = blocks->at(i);
@ -1564,16 +1574,47 @@ bool LAllocator::HasTaggedValue(int virtual_register) const {
RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
HValue* value = graph()->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
if (virtual_register < first_artificial_register_) {
HValue* value = graph()->LookupValue(virtual_register);
if (value != NULL && value->representation().IsDouble()) {
return DOUBLE_REGISTERS;
}
} else if (double_artificial_registers_.Contains(
virtual_register - first_artificial_register_)) {
return DOUBLE_REGISTERS;
}
return GENERAL_REGISTERS;
}
void LAllocator::MarkAsCall() {
current_summary()->MarkAsCall();
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
InstructionSummary* summary = current_summary();
#ifdef DEBUG
ASSERT(summary->Output() == NULL ||
LUnallocated::cast(summary->Output())->HasFixedPolicy() ||
!LUnallocated::cast(summary->Output())->HasRegisterPolicy());
for (int i = 0; i < summary->InputCount(); i++) {
ASSERT(LUnallocated::cast(summary->InputAt(i))->HasFixedPolicy() ||
LUnallocated::cast(summary->InputAt(i))->IsUsedAtStart() ||
!LUnallocated::cast(summary->InputAt(i))->HasRegisterPolicy());
}
for (int i = 0; i < summary->TempCount(); i++) {
ASSERT(LUnallocated::cast(summary->TempAt(i))->HasFixedPolicy() ||
!LUnallocated::cast(summary->TempAt(i))->HasRegisterPolicy());
}
#endif
summary->MarkAsCall();
}
void LAllocator::MarkAsSaveDoubles() {
current_summary()->MarkAsSaveDoubles();
}

57
deps/v8/src/lithium-allocator.h

@ -30,6 +30,7 @@
#include "v8.h"
#include "data-flow.h"
#include "zone.h"
namespace v8 {
@ -49,7 +50,6 @@ class LArgument;
class LChunk;
class LConstantOperand;
class LGap;
class LInstruction;
class LParallelMove;
class LPointerMap;
class LStackSlot;
@ -204,7 +204,6 @@ class LUnallocated: public LOperand {
MUST_HAVE_REGISTER,
WRITABLE_REGISTER,
SAME_AS_FIRST_INPUT,
SAME_AS_ANY_INPUT,
IGNORE
};
@ -275,7 +274,7 @@ class LUnallocated: public LOperand {
return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
}
bool HasSameAsInputPolicy() const {
return policy() == SAME_AS_FIRST_INPUT || policy() == SAME_AS_ANY_INPUT;
return policy() == SAME_AS_FIRST_INPUT;
}
Policy policy() const { return PolicyField::decode(value_); }
void set_policy(Policy policy) {
@ -482,7 +481,11 @@ class LDoubleRegister: public LOperand {
class InstructionSummary: public ZoneObject {
public:
InstructionSummary()
: output_operand_(NULL), input_count_(0), operands_(4), is_call_(false) {}
: output_operand_(NULL),
input_count_(0),
operands_(4),
is_call_(false),
is_save_doubles_(false) {}
// Output operands.
LOperand* Output() const { return output_operand_; }
@ -510,11 +513,15 @@ class InstructionSummary: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
bool IsCall() const { return is_call_; }
void MarkAsSaveDoubles() { is_save_doubles_ = true; }
bool IsSaveDoubles() const { return is_save_doubles_; }
private:
LOperand* output_operand_;
int input_count_;
ZoneList<LOperand*> operands_;
bool is_call_;
bool is_save_doubles_;
};
// Representation of the non-empty interval [start,end[.
@ -698,6 +705,7 @@ class LiveRange: public ZoneObject {
bool HasAllocatedSpillOperand() const {
return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
}
LOperand* GetSpillOperand() const { return spill_operand_; }
void SetSpillOperand(LOperand* operand) {
ASSERT(!operand->IsUnallocated());
@ -715,7 +723,6 @@ class LiveRange: public ZoneObject {
bool Covers(LifetimePosition position);
LifetimePosition FirstIntersection(LiveRange* other);
// Add a new interval or a new use position to this live range.
void EnsureInterval(LifetimePosition start, LifetimePosition end);
void AddUseInterval(LifetimePosition start, LifetimePosition end);
@ -754,6 +761,40 @@ class LiveRange: public ZoneObject {
};
class GrowableBitVector BASE_EMBEDDED {
public:
GrowableBitVector() : bits_(NULL) { }
bool Contains(int value) const {
if (!InBitsRange(value)) return false;
return bits_->Contains(value);
}
void Add(int value) {
EnsureCapacity(value);
bits_->Add(value);
}
private:
static const int kInitialLength = 1024;
bool InBitsRange(int value) const {
return bits_ != NULL && bits_->length() > value;
}
void EnsureCapacity(int value) {
if (InBitsRange(value)) return;
int new_length = bits_ == NULL ? kInitialLength : bits_->length();
while (new_length <= value) new_length *= 2;
BitVector* new_bits = new BitVector(new_length);
if (bits_ != NULL) new_bits->CopyFrom(*bits_);
bits_ = new_bits;
}
BitVector* bits_;
};
class LAllocator BASE_EMBEDDED {
public:
explicit LAllocator(int first_virtual_register, HGraph* graph)
@ -770,6 +811,7 @@ class LAllocator BASE_EMBEDDED {
inactive_live_ranges_(8),
reusable_slots_(8),
next_virtual_register_(first_virtual_register),
first_artificial_register_(first_virtual_register),
mode_(NONE),
num_registers_(-1),
graph_(graph),
@ -789,6 +831,9 @@ class LAllocator BASE_EMBEDDED {
// Marks the current instruction as a call.
void MarkAsCall();
// Marks the current instruction as requiring saving double registers.
void MarkAsSaveDoubles();
// Checks whether the value of a given virtual register is tagged.
bool HasTaggedValue(int virtual_register) const;
@ -972,6 +1017,8 @@ class LAllocator BASE_EMBEDDED {
// Next virtual register number to be assigned to temporaries.
int next_virtual_register_;
int first_artificial_register_;
GrowableBitVector double_artificial_registers_;
RegisterKind mode_;
int num_registers_;

93
deps/v8/src/lithium.cc

@ -0,0 +1,93 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "lithium.h"
namespace v8 {
namespace internal {
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
}
return true;
}
void LParallelMove::PrintDataTo(StringStream* stream) const {
for (int i = move_operands_.length() - 1; i >= 0; --i) {
if (!move_operands_[i].IsEliminated()) {
LOperand* from = move_operands_[i].from();
LOperand* to = move_operands_[i].to();
if (from->Equals(to)) {
to->PrintTo(stream);
} else {
to->PrintTo(stream);
stream->Add(" = ");
from->PrintTo(stream);
}
stream->Add("; ");
}
}
}
void LEnvironment::PrintTo(StringStream* stream) {
stream->Add("[id=%d|", ast_id());
stream->Add("[parameters=%d|", parameter_count());
stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
if (i != 0) stream->Add(";");
if (values_[i] == NULL) {
stream->Add("[hole]");
} else {
values_[i]->PrintTo(stream);
}
}
stream->Add("]");
}
void LPointerMap::RecordPointer(LOperand* op) {
// Do not record arguments as pointers.
if (op->IsStackSlot() && op->index() < 0) return;
ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
pointer_operands_.Add(op);
}
void LPointerMap::PrintTo(StringStream* stream) {
stream->Add("{");
for (int i = 0; i < pointer_operands_.length(); ++i) {
if (i != 0) stream->Add(";");
pointer_operands_[i]->PrintTo(stream);
}
stream->Add("} @%d", position());
}
} } // namespace v8::internal

169
deps/v8/src/lithium.h

@ -0,0 +1,169 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LITHIUM_H_
#define V8_LITHIUM_H_
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
class LCodeGen;
class Translation;
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
void AddMove(LOperand* from, LOperand* to) {
move_operands_.Add(LMoveOperands(from, to));
}
bool IsRedundant() const;
const ZoneList<LMoveOperands>* move_operands() const {
return &move_operands_;
}
void PrintDataTo(StringStream* stream) const;
private:
ZoneList<LMoveOperands> move_operands_;
};
class LPointerMap: public ZoneObject {
public:
explicit LPointerMap(int position)
: pointer_operands_(8), position_(position), lithium_position_(-1) { }
const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
int position() const { return position_; }
int lithium_position() const { return lithium_position_; }
void set_lithium_position(int pos) {
ASSERT(lithium_position_ == -1);
lithium_position_ = pos;
}
void RecordPointer(LOperand* op);
void PrintTo(StringStream* stream);
private:
ZoneList<LOperand*> pointer_operands_;
int position_;
int lithium_position_;
};
class LEnvironment: public ZoneObject {
public:
LEnvironment(Handle<JSFunction> closure,
int ast_id,
int parameter_count,
int argument_count,
int value_count,
LEnvironment* outer)
: closure_(closure),
arguments_stack_height_(argument_count),
deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
translation_index_(-1),
ast_id_(ast_id),
parameter_count_(parameter_count),
values_(value_count),
representations_(value_count),
spilled_registers_(NULL),
spilled_double_registers_(NULL),
outer_(outer) {
}
Handle<JSFunction> closure() const { return closure_; }
int arguments_stack_height() const { return arguments_stack_height_; }
int deoptimization_index() const { return deoptimization_index_; }
int translation_index() const { return translation_index_; }
int ast_id() const { return ast_id_; }
int parameter_count() const { return parameter_count_; }
LOperand** spilled_registers() const { return spilled_registers_; }
LOperand** spilled_double_registers() const {
return spilled_double_registers_;
}
const ZoneList<LOperand*>* values() const { return &values_; }
LEnvironment* outer() const { return outer_; }
void AddValue(LOperand* operand, Representation representation) {
values_.Add(operand);
representations_.Add(representation);
}
bool HasTaggedValueAt(int index) const {
return representations_[index].IsTagged();
}
void Register(int deoptimization_index, int translation_index) {
ASSERT(!HasBeenRegistered());
deoptimization_index_ = deoptimization_index;
translation_index_ = translation_index;
}
bool HasBeenRegistered() const {
return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
}
void SetSpilledRegisters(LOperand** registers,
LOperand** double_registers) {
spilled_registers_ = registers;
spilled_double_registers_ = double_registers;
}
void PrintTo(StringStream* stream);
private:
Handle<JSFunction> closure_;
int arguments_stack_height_;
int deoptimization_index_;
int translation_index_;
int ast_id_;
int parameter_count_;
ZoneList<LOperand*> values_;
ZoneList<Representation> representations_;
// Allocation index indexed arrays of spill slot operands for registers
// that are also in spill slots at an OSR entry. NULL for environments
// that do not correspond to an OSR entry.
LOperand** spilled_registers_;
LOperand** spilled_double_registers_;
LEnvironment* outer_;
friend class LCodegen;
};
} } // namespace v8::internal
#endif // V8_LITHIUM_H_

12
deps/v8/src/liveedit-debugger.js

@ -144,8 +144,8 @@ Debug.LiveEdit = new function() {
replace_code_list[i].live_shared_function_infos;
if (live_shared_function_infos) {
for (var i = 0; i < live_shared_function_infos.length; i++) {
replaced_function_infos.push(live_shared_function_infos[i]);
for (var j = 0; j < live_shared_function_infos.length; j++) {
replaced_function_infos.push(live_shared_function_infos[j]);
}
}
}
@ -980,15 +980,15 @@ Debug.LiveEdit = new function() {
// LiveEdit main entry point: changes a script text to a new string.
function SetScriptSource(script, new_source, preview_only, change_log) {
var old_source = script.source;
var diff = CompareStringsLinewise(old_source, new_source);
var diff = CompareStrings(old_source, new_source);
return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
change_log);
}
// Function is public.
this.SetScriptSource = SetScriptSource;
function CompareStringsLinewise(s1, s2) {
return %LiveEditCompareStringsLinewise(s1, s2);
function CompareStrings(s1, s2) {
return %LiveEditCompareStrings(s1, s2);
}
// Applies the change to the script.
@ -1076,7 +1076,7 @@ Debug.LiveEdit = new function() {
// Functions are public for tests.
this.TestApi = {
PosTranslator: PosTranslator,
CompareStringsLinewise: CompareStringsLinewise,
CompareStrings: CompareStrings,
ApplySingleChunkPatch: ApplySingleChunkPatch
}
}

123
deps/v8/src/liveedit.cc

@ -275,6 +275,82 @@ static bool CompareSubstrings(Handle<String> s1, int pos1,
}
// A helper class that writes chunk numbers into JSArray.
// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
class CompareOutputArrayWriter {
public:
CompareOutputArrayWriter()
: array_(Factory::NewJSArray(10)), current_size_(0) {}
Handle<JSArray> GetResult() {
return array_;
}
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
SetElement(array_, current_size_ + 1,
Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
SetElement(array_, current_size_ + 2,
Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
current_size_ += 3;
}
private:
Handle<JSArray> array_;
int current_size_;
};
// Represents 2 strings as 2 arrays of tokens.
// TODO(LiveEdit): Currently it's actually an array of charactres.
// Make array of tokens instead.
class TokensCompareInput : public Comparator::Input {
public:
TokensCompareInput(Handle<String> s1, int offset1, int len1,
Handle<String> s2, int offset2, int len2)
: s1_(s1), offset1_(offset1), len1_(len1),
s2_(s2), offset2_(offset2), len2_(len2) {
}
virtual int getLength1() {
return len1_;
}
virtual int getLength2() {
return len2_;
}
bool equals(int index1, int index2) {
return s1_->Get(offset1_ + index1) == s2_->Get(offset2_ + index2);
}
private:
Handle<String> s1_;
int offset1_;
int len1_;
Handle<String> s2_;
int offset2_;
int len2_;
};
// Stores compare result in JSArray. Converts substring positions
// to absolute positions.
class TokensCompareOutput : public Comparator::Output {
public:
TokensCompareOutput(CompareOutputArrayWriter* array_writer,
int offset1, int offset2)
: array_writer_(array_writer), offset1_(offset1), offset2_(offset2) {
}
void AddChunk(int pos1, int pos2, int len1, int len2) {
array_writer_->WriteChunk(pos1 + offset1_, pos2 + offset2_, len1, len2);
}
private:
CompareOutputArrayWriter* array_writer_;
int offset1_;
int offset2_;
};
// Wraps raw n-elements line_ends array as a list of n+1 lines. The last line
// never has terminating new line character.
class LineEndsWrapper {
@ -350,13 +426,14 @@ class LineArrayCompareInput : public Comparator::Input {
};
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
// (pos1_begin, pos1_end, pos2_end).
class LineArrayCompareOutput : public Comparator::Output {
// Stores compare result in JSArray. For each chunk tries to conduct
// a fine-grained nested diff token-wise.
class TokenizingLineArrayCompareOutput : public Comparator::Output {
public:
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
: array_(Factory::NewJSArray(10)), current_size_(0),
line_ends1_(line_ends1), line_ends2_(line_ends2) {
TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
Handle<String> s1, Handle<String> s2)
: line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2) {
}
void AddChunk(int line_pos1, int line_pos2, int line_len1, int line_len2) {
@ -365,33 +442,43 @@ class LineArrayCompareOutput : public Comparator::Output {
int char_len1 = line_ends1_.GetLineStart(line_pos1 + line_len1) - char_pos1;
int char_len2 = line_ends2_.GetLineStart(line_pos2 + line_len2) - char_pos2;
SetElement(array_, current_size_, Handle<Object>(Smi::FromInt(char_pos1)));
SetElement(array_, current_size_ + 1,
Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
SetElement(array_, current_size_ + 2,
Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
current_size_ += 3;
if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
// Chunk is small enough to conduct a nested token-level diff.
HandleScope subTaskScope;
TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
s2_, char_pos2, char_len2);
TokensCompareOutput tokens_output(&array_writer_, char_pos1,
char_pos2);
Comparator::CalculateDifference(&tokens_input, &tokens_output);
} else {
array_writer_.WriteChunk(char_pos1, char_pos2, char_len1, char_len2);
}
}
Handle<JSArray> GetResult() {
return array_;
return array_writer_.GetResult();
}
private:
Handle<JSArray> array_;
int current_size_;
static const int CHUNK_LEN_LIMIT = 800;
CompareOutputArrayWriter array_writer_;
LineEndsWrapper line_ends1_;
LineEndsWrapper line_ends2_;
Handle<String> s1_;
Handle<String> s2_;
};
Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
Handle<String> s2) {
Handle<JSArray> LiveEdit::CompareStrings(Handle<String> s1,
Handle<String> s2) {
LineEndsWrapper line_ends1(s1);
LineEndsWrapper line_ends2(s2);
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
LineArrayCompareOutput output(line_ends1, line_ends2);
TokenizingLineArrayCompareOutput output(line_ends1, line_ends2, s1, s2);
Comparator::CalculateDifference(&input, &output);

9
deps/v8/src/liveedit.h

@ -126,10 +126,11 @@ class LiveEdit : AllStatic {
FUNCTION_REPLACED_ON_ACTIVE_STACK = 5
};
// Compares 2 strings line-by-line and returns diff in form of array of
// triplets (pos1, pos1_end, pos2_end) describing list of diff chunks.
static Handle<JSArray> CompareStringsLinewise(Handle<String> s1,
Handle<String> s2);
// Compares 2 strings line-by-line, then token-wise and returns diff in form
// of array of triplets (pos1, pos1_end, pos2_end) describing list
// of diff chunks.
static Handle<JSArray> CompareStrings(Handle<String> s1,
Handle<String> s2);
};

3
deps/v8/src/log.cc

@ -276,7 +276,8 @@ void SlidingStateWindow::AddState(StateTag state) {
// Profiler implementation.
//
Profiler::Profiler()
: head_(0),
: Thread("v8:Profiler"),
head_(0),
tail_(0),
overflow_(false),
buffer_semaphore_(OS::CreateSemaphore(0)),

2
deps/v8/src/macros.py

@ -120,11 +120,13 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || arg - arg == 0);
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
macro TO_UINT32(arg) = (arg >>> 0);
macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
# Macros implemented in Python.

42
deps/v8/src/math.js

@ -44,26 +44,26 @@ $Math.__proto__ = global.Object.prototype;
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
if (%_IsSmi(x)) return x >= 0 ? x : -x;
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
if (x === 0) return 0; // To handle -0.
return x > 0 ? x : -x;
}
// ECMA 262 - 15.8.2.2
function MathAcos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_acos(x);
}
// ECMA 262 - 15.8.2.3
function MathAsin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_asin(x);
}
// ECMA 262 - 15.8.2.4
function MathAtan(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_atan(x);
}
@ -71,32 +71,32 @@ function MathAtan(x) {
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2(y, x) {
if (!IS_NUMBER(y)) y = ToNumber(y);
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_atan2(y, x);
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_ceil(x);
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %_MathCos(x);
}
// ECMA 262 - 15.8.2.8
function MathExp(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_exp(x);
}
// ECMA 262 - 15.8.2.9
function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
if (x < 0x80000000 && x > 0) {
@ -112,7 +112,7 @@ function MathFloor(x) {
// ECMA 262 - 15.8.2.10
function MathLog(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %_MathLog(x);
}
@ -123,11 +123,11 @@ function MathMax(arg1, arg2) { // length == 2
return -1/0; // Compiler constant-folds this to -Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
@ -143,11 +143,11 @@ function MathMin(arg1, arg2) { // length == 2
return 1/0; // Compiler constant-folds this to Infinity.
}
var r = arg1;
if (!IS_NUMBER(r)) r = ToNumber(r);
if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
if (NUMBER_IS_NAN(r)) return r;
for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = ToNumber(n);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can b a
// Smi or a heap number.
@ -158,8 +158,8 @@ function MathMin(arg1, arg2) { // length == 2
// ECMA 262 - 15.8.2.13
function MathPow(x, y) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(y)) y = ToNumber(y);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
if (!IS_NUMBER(y)) y = NonNumberToNumber(y);
return %_MathPow(x, y);
}
@ -170,25 +170,25 @@ function MathRandom() {
// ECMA 262 - 15.8.2.15
function MathRound(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %RoundNumber(x);
}
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %_MathSin(x);
}
// ECMA 262 - 15.8.2.17
function MathSqrt(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %_MathSqrt(x);
}
// ECMA 262 - 15.8.2.18
function MathTan(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
return %Math_tan(x);
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save