diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 4c96de014b..95a3640604 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,76 @@ +2010-09-08: Version 2.4.2 + + Fixed GC crash bug. + + Fixed stack corruption bug. + + Fixed compilation for newer C++ compilers that found Operand(0) + ambiguous. + + +2010-09-06: Version 2.4.1 + + Added the ability for an embedding application to receive a callback + when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates + (V8::RemoveMemoryAllocationCallback) from the OS. + + Fixed several JSON bugs (including issue 855). + + Fixed memory overrun crash bug triggered during V8's tick-based + profiling. + + Performance improvements on all platforms. + + +2010-09-01: Version 2.4.0 + + Fix bug in Object.freeze and Object.seal when Array.prototype or + Object.prototype is changed (issue 842). + + Update Array.splice to follow Safari and Firefox when called + with zero arguments. + + Fix a missing live register when breaking at keyed loads on ARM. + + Performance improvements on all platforms. + + +2010-08-25: Version 2.3.11 + + Fix bug in RegExp related to copy-on-write arrays. + + Refactoring of tools/test.py script, including the introduction of + VARIANT_FLAGS that allows specification of sets of flags with which + all tests should be run. + + Fix a bug in the handling of debug breaks in CallIC. + + Performance improvements on all platforms. + + +2010-08-23: Version 2.3.10 + + Fix bug in bitops on ARM. + + Build fixes for unusual compilers. + + Track high water mark for RWX memory. + + Performance improvements on all platforms. + + +2010-08-18: Version 2.3.9 + + Fix compilation for ARMv4 on OpenBSD/FreeBSD. + + Removed specialized handling of GCC 4.4 (issue 830). + + Fixed DST cache to take into account the suspension of DST in + Egypt during the 2010 Ramadan (issue http://crbug.com/51855). + + Performance improvements on all platforms. + + 2010-08-16: Version 2.3.8 Fixed build with strict aliasing on GCC 4.4 (issue 463). diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 8fc192637c..2a39583f1c 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -54,15 +54,8 @@ if ARM_TARGET_LIB: else: ARM_LINK_FLAGS = [] -# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4 -# on linux we need these compiler flags to avoid crashes in the v8 test suite -# and avoid dtoa.c strict aliasing issues -if os.environ.get('GCC_VERSION') == '44': - GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp'] - GCC_DTOA_EXTRA_CCFLAGS = [] -else: - GCC_EXTRA_CCFLAGS = [] - GCC_DTOA_EXTRA_CCFLAGS = [] +GCC_EXTRA_CCFLAGS = [] +GCC_DTOA_EXTRA_CCFLAGS = [] ANDROID_FLAGS = ['-march=armv7-a', '-mtune=cortex-a8', @@ -299,6 +292,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -672,7 +666,7 @@ SIMPLE_OPTIONS = { 'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')' }, 'os': { - 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], + 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'], 'default': OS_GUESS, 'help': 'the os to build for (' + OS_GUESS + ')' }, diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 9e3cb873c6..dd1b8caf7b 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -260,10 +260,17 @@ class V8EXPORT HeapGraphNode { /** * Returns node id. For the same heap object, the id remains the same - * across all snapshots. + * across all snapshots. Not applicable to aggregated heap snapshots + * as they only contain aggregated instances. */ uint64_t GetId() const; + /** + * Returns the number of instances. Only applicable to aggregated + * heap snapshots. + */ + int GetInstancesCount() const; + /** Returns node's own size, in bytes. */ int GetSelfSize() const; @@ -313,6 +320,15 @@ class V8EXPORT HeapSnapshotsDiff { */ class V8EXPORT HeapSnapshot { public: + enum Type { + kFull = 0, // Heap snapshot with all instances and references. + kAggregated = 1 // Snapshot doesn't contain individual heap entries, + //instead they are grouped by constructor name. + }; + + /** Returns heap snapshot type. */ + Type GetType() const; + /** Returns heap snapshot UID (assigned by the profiler.) */ unsigned GetUid() const; @@ -322,7 +338,10 @@ class V8EXPORT HeapSnapshot { /** Returns the root node of the heap graph. */ const HeapGraphNode* GetRoot() const; - /** Returns a diff between this snapshot and another one. */ + /** + * Returns a diff between this snapshot and another one. Only snapshots + * of the same type can be compared. + */ const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const; }; @@ -341,8 +360,13 @@ class V8EXPORT HeapProfiler { /** Returns a profile by uid. */ static const HeapSnapshot* FindSnapshot(unsigned uid); - /** Takes a heap snapshot and returns it. Title may be an empty string. */ - static const HeapSnapshot* TakeSnapshot(Handle title); + /** + * Takes a heap snapshot and returns it. Title may be an empty string. + * See HeapSnapshot::Type for types description. + */ + static const HeapSnapshot* TakeSnapshot( + Handle title, + HeapSnapshot::Type type = HeapSnapshot::kFull); }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index ff73226925..b89c244ca2 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -1763,8 +1763,6 @@ class V8EXPORT AccessorInfo { typedef Handle (*InvocationCallback)(const Arguments& args); -typedef int (*LookupCallback)(Local self, Local name); - /** * NamedProperty[Getter|Setter] are used as interceptors on object. * See ObjectTemplate::SetNamedPropertyHandler. @@ -2361,6 +2359,30 @@ typedef void* (*CreateHistogramCallback)(const char* name, typedef void (*AddHistogramSampleCallback)(void* histogram, int sample); +// --- M e m o r y A l l o c a t i o n C a l l b a c k --- + enum ObjectSpace { + kObjectSpaceNewSpace = 1 << 0, + kObjectSpaceOldPointerSpace = 1 << 1, + kObjectSpaceOldDataSpace = 1 << 2, + kObjectSpaceCodeSpace = 1 << 3, + kObjectSpaceMapSpace = 1 << 4, + kObjectSpaceLoSpace = 1 << 5, + + kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace | + kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace | + kObjectSpaceLoSpace + }; + + enum AllocationAction { + kAllocationActionAllocate = 1 << 0, + kAllocationActionFree = 1 << 1, + kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree + }; + +typedef void (*MemoryAllocationCallback)(ObjectSpace space, + AllocationAction action, + int size); + // --- F a i l e d A c c e s s C h e c k C a l l b a c k --- typedef void (*FailedAccessCheckCallback)(Local target, AccessType type, @@ -2580,6 +2602,20 @@ class V8EXPORT V8 { */ static void SetGlobalGCEpilogueCallback(GCCallback); + /** + * Enables the host application to provide a mechanism to be notified + * and perform custom logging when V8 Allocates Executable Memory. + */ + static void AddMemoryAllocationCallback(MemoryAllocationCallback callback, + ObjectSpace space, + AllocationAction action); + + /** + * This function removes callback which was installed by + * AddMemoryAllocationCallback function. + */ + static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); + /** * Allows the host application to group objects together. If one * object in the group is alive, all objects in the group are alive. diff --git a/deps/v8/node_cygwin_patch.diff b/deps/v8/node_cygwin_patch.diff deleted file mode 100644 index eeedc6d6ea..0000000000 --- a/deps/v8/node_cygwin_patch.diff +++ /dev/null @@ -1,918 +0,0 @@ -diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct -index 7219e9d..b8de1b8 100644 ---- a/deps/v8/SConstruct -+++ b/deps/v8/SConstruct -@@ -670,7 +670,7 @@ SIMPLE_OPTIONS = { - 'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')' - }, - 'os': { -- 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris'], -+ 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], - 'default': OS_GUESS, - 'help': 'the os to build for (' + OS_GUESS + ')' - }, -diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript -index 8466a0c..9ff3414 100755 ---- a/deps/v8/src/SConscript -+++ b/deps/v8/src/SConscript -@@ -206,6 +206,7 @@ SOURCES = { - 'os:android': ['platform-linux.cc', 'platform-posix.cc'], - 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], - 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], -+ 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'], - 'os:nullos': ['platform-nullos.cc'], - 'os:win32': ['platform-win32.cc'], - 'mode:release': [], -diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc -new file mode 100644 -index 0000000..34410e8 ---- /dev/null -+++ b/deps/v8/src/platform-cygwin.cc -@@ -0,0 +1,858 @@ -+// Copyright 2006-2008 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Platform specific code for Cygwin goes here. For the POSIX comaptible parts -+// the implementation is in platform-posix.cc. -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+// Ubuntu Dapper requires memory pages to be marked as -+// executable. Otherwise, OS raises an exception when executing code -+// in that page. -+#include // mmap & munmap -+#include // mmap & munmap -+#include // open -+#include // open -+#include // sysconf -+#ifdef __GLIBC__ -+#include // backtrace, backtrace_symbols -+#endif // def __GLIBC__ -+#include // index -+#include -+#include -+ -+#undef MAP_TYPE -+ -+#include "v8.h" -+ -+#include "platform.h" -+#include "top.h" -+#include "v8threads.h" -+ -+ -+namespace v8 { -+namespace internal { -+ -+// 0 is never a valid thread id on Linux since tids and pids share a -+// name space and pid 0 is reserved (see man 2 kill). -+static const pthread_t kNoThread = (pthread_t) 0; -+ -+ -+double ceiling(double x) { -+ return ceil(x); -+} -+ -+ -+void OS::Setup() { -+ // Seed the random number generator. -+ // Convert the current time to a 64-bit integer first, before converting it -+ // to an unsigned. Going directly can cause an overflow and the seed to be -+ // set to all ones. The seed will be identical for different instances that -+ // call this setup code within the same millisecond. -+ uint64_t seed = static_cast(TimeCurrentMillis()); -+ srandom(static_cast(seed)); -+} -+ -+ -+uint64_t OS::CpuFeaturesImpliedByPlatform() { -+#if (defined(__VFP_FP__) && !defined(__SOFTFP__)) -+ // Here gcc is telling us that we are on an ARM and gcc is assuming that we -+ // have VFP3 instructions. If gcc can assume it then so can we. -+ return 1u << VFP3; -+#elif CAN_USE_ARMV7_INSTRUCTIONS -+ return 1u << ARMv7; -+#else -+ return 0; // Linux runs on anything. -+#endif -+} -+ -+ -+#ifdef __arm__ -+bool OS::ArmCpuHasFeature(CpuFeature feature) { -+ const char* search_string = NULL; -+ const char* file_name = "/proc/cpuinfo"; -+ // Simple detection of VFP at runtime for Linux. -+ // It is based on /proc/cpuinfo, which reveals hardware configuration -+ // to user-space applications. According to ARM (mid 2009), no similar -+ // facility is universally available on the ARM architectures, -+ // so it's up to individual OSes to provide such. -+ // -+ // This is written as a straight shot one pass parser -+ // and not using STL string and ifstream because, -+ // on Linux, it's reading from a (non-mmap-able) -+ // character special device. -+ switch (feature) { -+ case VFP3: -+ search_string = "vfp"; -+ break; -+ case ARMv7: -+ search_string = "ARMv7"; -+ break; -+ default: -+ UNREACHABLE(); -+ } -+ -+ FILE* f = NULL; -+ const char* what = search_string; -+ -+ if (NULL == (f = fopen(file_name, "r"))) -+ return false; -+ -+ int k; -+ while (EOF != (k = fgetc(f))) { -+ if (k == *what) { -+ ++what; -+ while ((*what != '\0') && (*what == fgetc(f))) { -+ ++what; -+ } -+ if (*what == '\0') { -+ fclose(f); -+ return true; -+ } else { -+ what = search_string; -+ } -+ } -+ } -+ fclose(f); -+ -+ // Did not find string in the proc file. -+ return false; -+} -+#endif // def __arm__ -+ -+ -+int OS::ActivationFrameAlignment() { -+#ifdef V8_TARGET_ARCH_ARM -+ // On EABI ARM targets this is required for fp correctness in the -+ // runtime system. -+ return 8; -+#elif V8_TARGET_ARCH_MIPS -+ return 8; -+#endif -+ // With gcc 4.4 the tree vectorization optimizer can generate code -+ // that requires 16 byte alignment such as movdqa on x86. -+ return 16; -+} -+ -+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { -+ __asm__ __volatile__("" : : : "memory"); -+ // An x86 store acts as a release barrier. -+ *ptr = value; -+} -+ -+const char* OS::LocalTimezone(double time) { -+ if (isnan(time)) return ""; -+ time_t tv = static_cast(floor(time/msPerSecond)); -+ struct tm* t = localtime(&tv); -+ if (NULL == t) return ""; -+ return tzname[0]; // The location of the timezone string on Cywin. -+} -+ -+ -+double OS::LocalTimeOffset() { -+ // -+ // On Cygwin, struct tm does not contain a tm_gmtoff field. -+ time_t utc = time(NULL); -+ ASSERT(utc != -1); -+ struct tm* loc = localtime(&utc); -+ ASSERT(loc != NULL); -+ return static_cast((mktime(loc) - utc) * msPerSecond); -+} -+ -+ -+// We keep the lowest and highest addresses mapped as a quick way of -+// determining that pointers are outside the heap (used mostly in assertions -+// and verification). The estimate is conservative, ie, not all addresses in -+// 'allocated' space are actually allocated to our heap. The range is -+// [lowest, highest), inclusive on the low and and exclusive on the high end. -+static void* lowest_ever_allocated = reinterpret_cast(-1); -+static void* highest_ever_allocated = reinterpret_cast(0); -+ -+ -+static void UpdateAllocatedSpaceLimits(void* address, int size) { -+ lowest_ever_allocated = Min(lowest_ever_allocated, address); -+ highest_ever_allocated = -+ Max(highest_ever_allocated, -+ reinterpret_cast(reinterpret_cast(address) + size)); -+} -+ -+ -+bool OS::IsOutsideAllocatedSpace(void* address) { -+ return address < lowest_ever_allocated || address >= highest_ever_allocated; -+} -+ -+ -+size_t OS::AllocateAlignment() { -+ return sysconf(_SC_PAGESIZE); -+} -+ -+ -+void* OS::Allocate(const size_t requested, -+ size_t* allocated, -+ bool is_executable) { -+ const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE)); -+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); -+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); -+ if (mbase == MAP_FAILED) { -+ LOG(StringEvent("OS::Allocate", "mmap failed")); -+ return NULL; -+ } -+ *allocated = msize; -+ UpdateAllocatedSpaceLimits(mbase, msize); -+ return mbase; -+} -+ -+ -+void OS::Free(void* address, const size_t size) { -+ // TODO(1240712): munmap has a return value which is ignored here. -+ int result = munmap(address, size); -+ USE(result); -+ ASSERT(result == 0); -+} -+ -+ -+#ifdef ENABLE_HEAP_PROTECTION -+ -+void OS::Protect(void* address, size_t size) { -+ // TODO(1240712): mprotect has a return value which is ignored here. -+ mprotect(address, size, PROT_READ); -+} -+ -+ -+void OS::Unprotect(void* address, size_t size, bool is_executable) { -+ // TODO(1240712): mprotect has a return value which is ignored here. -+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); -+ mprotect(address, size, prot); -+} -+ -+#endif -+ -+ -+void OS::Sleep(int milliseconds) { -+ unsigned int ms = static_cast(milliseconds); -+ usleep(1000 * ms); -+} -+ -+ -+void OS::Abort() { -+ // Redirect to std abort to signal abnormal program termination. -+ abort(); -+} -+ -+ -+void OS::DebugBreak() { -+// TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, -+// which is the architecture of generated code). -+#if (defined(__arm__) || defined(__thumb__)) && \ -+ defined(CAN_USE_ARMV5_INSTRUCTIONS) -+ asm("bkpt 0"); -+#elif defined(__mips__) -+ asm("break"); -+#else -+ asm("int $3"); -+#endif -+} -+ -+ -+class PosixMemoryMappedFile : public OS::MemoryMappedFile { -+ public: -+ PosixMemoryMappedFile(FILE* file, void* memory, int size) -+ : file_(file), memory_(memory), size_(size) { } -+ virtual ~PosixMemoryMappedFile(); -+ virtual void* memory() { return memory_; } -+ private: -+ FILE* file_; -+ void* memory_; -+ int size_; -+}; -+ -+ -+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, -+ void* initial) { -+ FILE* file = fopen(name, "w+"); -+ if (file == NULL) return NULL; -+ int result = fwrite(initial, size, 1, file); -+ if (result < 1) { -+ fclose(file); -+ return NULL; -+ } -+ void* memory = -+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); -+ return new PosixMemoryMappedFile(file, memory, size); -+} -+ -+ -+PosixMemoryMappedFile::~PosixMemoryMappedFile() { -+ if (memory_) munmap(memory_, size_); -+ fclose(file_); -+} -+ -+ -+void OS::LogSharedLibraryAddresses() { -+#ifdef ENABLE_LOGGING_AND_PROFILING -+ // This function assumes that the layout of the file is as follows: -+ // hex_start_addr-hex_end_addr rwxp [binary_file_name] -+ // If we encounter an unexpected situation we abort scanning further entries. -+ FILE* fp = fopen("/proc/self/maps", "r"); -+ if (fp == NULL) return; -+ -+ // Allocate enough room to be able to store a full file name. -+ const int kLibNameLen = FILENAME_MAX + 1; -+ char* lib_name = reinterpret_cast(malloc(kLibNameLen)); -+ -+ // This loop will terminate once the scanning hits an EOF. -+ while (true) { -+ uintptr_t start, end; -+ char attr_r, attr_w, attr_x, attr_p; -+ // Parse the addresses and permission bits at the beginning of the line. -+ if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; -+ if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; -+ -+ int c; -+ if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') { -+ // Found a read-only executable entry. Skip characters until we reach -+ // the beginning of the filename or the end of the line. -+ do { -+ c = getc(fp); -+ } while ((c != EOF) && (c != '\n') && (c != '/')); -+ if (c == EOF) break; // EOF: Was unexpected, just exit. -+ -+ // Process the filename if found. -+ if (c == '/') { -+ ungetc(c, fp); // Push the '/' back into the stream to be read below. -+ -+ // Read to the end of the line. Exit if the read fails. -+ if (fgets(lib_name, kLibNameLen, fp) == NULL) break; -+ -+ // Drop the newline character read by fgets. We do not need to check -+ // for a zero-length string because we know that we at least read the -+ // '/' character. -+ lib_name[strlen(lib_name) - 1] = '\0'; -+ } else { -+ // No library name found, just record the raw address range. -+ snprintf(lib_name, kLibNameLen, -+ "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); -+ } -+ LOG(SharedLibraryEvent(lib_name, start, end)); -+ } else { -+ // Entry not describing executable data. Skip to end of line to setup -+ // reading the next entry. -+ do { -+ c = getc(fp); -+ } while ((c != EOF) && (c != '\n')); -+ if (c == EOF) break; -+ } -+ } -+ free(lib_name); -+ fclose(fp); -+#endif -+} -+ -+ -+int OS::StackWalk(Vector frames) { -+ // backtrace is a glibc extension. -+#ifdef __GLIBC__ -+ int frames_size = frames.length(); -+ ScopedVector addresses(frames_size); -+ -+ int frames_count = backtrace(addresses.start(), frames_size); -+ -+ char** symbols = backtrace_symbols(addresses.start(), frames_count); -+ if (symbols == NULL) { -+ return kStackWalkError; -+ } -+ -+ for (int i = 0; i < frames_count; i++) { -+ frames[i].address = addresses[i]; -+ // Format a text representation of the frame based on the information -+ // available. -+ SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), -+ "%s", -+ symbols[i]); -+ // Make sure line termination is in place. -+ frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; -+ } -+ -+ free(symbols); -+ -+ return frames_count; -+#else // ndef __GLIBC__ -+ return 0; -+#endif // ndef __GLIBC__ -+} -+ -+ -+// Constants used for mmap. -+static const int kMmapFd = -1; -+static const int kMmapFdOffset = 0; -+ -+ -+VirtualMemory::VirtualMemory(size_t size) { -+ address_ = mmap(NULL, size, PROT_NONE, -+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -+ kMmapFd, kMmapFdOffset); -+ size_ = size; -+} -+ -+ -+VirtualMemory::~VirtualMemory() { -+ if (IsReserved()) { -+ if (0 == munmap(address(), size())) address_ = MAP_FAILED; -+ } -+} -+ -+ -+bool VirtualMemory::IsReserved() { -+ return address_ != MAP_FAILED; -+} -+ -+ -+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { -+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); -+ -+#ifdef HAS_MAP_FIXED -+ if (MAP_FAILED == mmap(address, size, prot, -+ MAP_PRIVATE | MAP_ANONYMOUS, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED -+ kMmapFd, kMmapFdOffset)) { -+ return false; -+ } -+#else -+ if (mprotect(address, size, prot) != 0) { -+ return false; -+ } -+#endif -+ -+ UpdateAllocatedSpaceLimits(address, size); -+ return true; -+} -+ -+ -+bool VirtualMemory::Uncommit(void* address, size_t size) { -+ return mmap(address, size, PROT_NONE, -+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, // | MAP_FIXED, - Cygwin doesn't have MAP_FIXED -+ kMmapFd, kMmapFdOffset) != MAP_FAILED; -+} -+ -+ -+class ThreadHandle::PlatformData : public Malloced { -+ public: -+ explicit PlatformData(ThreadHandle::Kind kind) { -+ Initialize(kind); -+ } -+ -+ void Initialize(ThreadHandle::Kind kind) { -+ switch (kind) { -+ case ThreadHandle::SELF: thread_ = pthread_self(); break; -+ case ThreadHandle::INVALID: thread_ = kNoThread; break; -+ } -+ } -+ -+ pthread_t thread_; // Thread handle for pthread. -+}; -+ -+ -+ThreadHandle::ThreadHandle(Kind kind) { -+ data_ = new PlatformData(kind); -+} -+ -+ -+void ThreadHandle::Initialize(ThreadHandle::Kind kind) { -+ data_->Initialize(kind); -+} -+ -+ -+ThreadHandle::~ThreadHandle() { -+ delete data_; -+} -+ -+ -+bool ThreadHandle::IsSelf() const { -+ return pthread_equal(data_->thread_, pthread_self()); -+} -+ -+ -+bool ThreadHandle::IsValid() const { -+ return data_->thread_ != kNoThread; -+} -+ -+ -+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) { -+} -+ -+ -+Thread::~Thread() { -+} -+ -+ -+static void* ThreadEntry(void* arg) { -+ Thread* thread = reinterpret_cast(arg); -+ // This is also initialized by the first argument to pthread_create() but we -+ // don't know which thread will run first (the original thread or the new -+ // one) so we initialize it here too. -+ thread->thread_handle_data()->thread_ = pthread_self(); -+ ASSERT(thread->IsValid()); -+ thread->Run(); -+ return NULL; -+} -+ -+ -+void Thread::Start() { -+ pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this); -+ ASSERT(IsValid()); -+} -+ -+ -+void Thread::Join() { -+ pthread_join(thread_handle_data()->thread_, NULL); -+} -+ -+ -+Thread::LocalStorageKey Thread::CreateThreadLocalKey() { -+ pthread_key_t key; -+ int result = pthread_key_create(&key, NULL); -+ USE(result); -+ ASSERT(result == 0); -+ return static_cast(key); -+} -+ -+ -+void Thread::DeleteThreadLocalKey(LocalStorageKey key) { -+ pthread_key_t pthread_key = static_cast(key); -+ int result = pthread_key_delete(pthread_key); -+ USE(result); -+ ASSERT(result == 0); -+} -+ -+ -+void* Thread::GetThreadLocal(LocalStorageKey key) { -+ pthread_key_t pthread_key = static_cast(key); -+ return pthread_getspecific(pthread_key); -+} -+ -+ -+void Thread::SetThreadLocal(LocalStorageKey key, void* value) { -+ pthread_key_t pthread_key = static_cast(key); -+ pthread_setspecific(pthread_key, value); -+} -+ -+ -+void Thread::YieldCPU() { -+ sched_yield(); -+} -+ -+ -+class CygwinMutex : public Mutex { -+ public: -+ -+ CygwinMutex() { -+ pthread_mutexattr_t attrs; -+ memset(&attrs, 0, sizeof(attrs)); -+ -+ int result = pthread_mutexattr_init(&attrs); -+ ASSERT(result == 0); -+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); -+ ASSERT(result == 0); -+ result = pthread_mutex_init(&mutex_, &attrs); -+ ASSERT(result == 0); -+ } -+ -+ virtual ~CygwinMutex() { pthread_mutex_destroy(&mutex_); } -+ -+ virtual int Lock() { -+ int result = pthread_mutex_lock(&mutex_); -+ return result; -+ } -+ -+ virtual int Unlock() { -+ int result = pthread_mutex_unlock(&mutex_); -+ return result; -+ } -+ -+ private: -+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. -+}; -+ -+ -+Mutex* OS::CreateMutex() { -+ return new CygwinMutex(); -+} -+ -+ -+class CygwinSemaphore : public Semaphore { -+ public: -+ explicit CygwinSemaphore(int count) { sem_init(&sem_, 0, count); } -+ virtual ~CygwinSemaphore() { sem_destroy(&sem_); } -+ -+ virtual void Wait(); -+ virtual bool Wait(int timeout); -+ virtual void Signal() { sem_post(&sem_); } -+ private: -+ sem_t sem_; -+}; -+ -+ -+void CygwinSemaphore::Wait() { -+ while (true) { -+ int result = sem_wait(&sem_); -+ if (result == 0) return; // Successfully got semaphore. -+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. -+ } -+} -+ -+ -+#ifndef TIMEVAL_TO_TIMESPEC -+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ -+ (ts)->tv_sec = (tv)->tv_sec; \ -+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ -+} while (false) -+#endif -+ -+ -+bool CygwinSemaphore::Wait(int timeout) { -+ const long kOneSecondMicros = 1000000; // NOLINT -+ -+ // Split timeout into second and nanosecond parts. -+ struct timeval delta; -+ delta.tv_usec = timeout % kOneSecondMicros; -+ delta.tv_sec = timeout / kOneSecondMicros; -+ -+ struct timeval current_time; -+ // Get the current time. -+ if (gettimeofday(¤t_time, NULL) == -1) { -+ return false; -+ } -+ -+ // Calculate time for end of timeout. -+ struct timeval end_time; -+ timeradd(¤t_time, &delta, &end_time); -+ -+ struct timespec ts; -+ TIMEVAL_TO_TIMESPEC(&end_time, &ts); -+ // Wait for semaphore signalled or timeout. -+ while (true) { -+ int result = sem_timedwait(&sem_, &ts); -+ if (result == 0) return true; // Successfully got semaphore. -+ if (result > 0) { -+ // For glibc prior to 2.3.4 sem_timedwait returns the error instead of -1. -+ errno = result; -+ result = -1; -+ } -+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. -+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. -+ } -+} -+ -+ -+Semaphore* OS::CreateSemaphore(int count) { -+ return new CygwinSemaphore(count); -+} -+ -+ -+#ifdef ENABLE_LOGGING_AND_PROFILING -+ -+static Sampler* active_sampler_ = NULL; -+static pthread_t vm_thread_ = 0; -+ -+ -+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) -+// Android runs a fairly new Linux kernel, so signal info is there, -+// but the C library doesn't have the structs defined. -+ -+struct sigcontext { -+ uint32_t trap_no; -+ uint32_t error_code; -+ uint32_t oldmask; -+ uint32_t gregs[16]; -+ uint32_t arm_cpsr; -+ uint32_t fault_address; -+}; -+typedef uint32_t __sigset_t; -+typedef struct sigcontext mcontext_t; -+typedef struct ucontext { -+ uint32_t uc_flags; -+ struct ucontext* uc_link; -+ stack_t uc_stack; -+ mcontext_t uc_mcontext; -+ __sigset_t uc_sigmask; -+} ucontext_t; -+enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11}; -+ -+#endif -+ -+ -+// A function that determines if a signal handler is called in the context -+// of a VM thread. -+// -+// The problem is that SIGPROF signal can be delivered to an arbitrary thread -+// (see http://code.google.com/p/google-perftools/issues/detail?id=106#c2) -+// So, if the signal is being handled in the context of a non-VM thread, -+// it means that the VM thread is running, and trying to sample its stack can -+// cause a crash. -+static inline bool IsVmThread() { -+ // In the case of a single VM thread, this check is enough. -+ if (pthread_equal(pthread_self(), vm_thread_)) return true; -+ // If there are multiple threads that use VM, they must have a thread id -+ // stored in TLS. To verify that the thread is really executing VM, -+ // we check Top's data. Having that ThreadManager::RestoreThread first -+ // restores ThreadLocalTop from TLS, and only then erases the TLS value, -+ // reading Top::thread_id() should not be affected by races. -+ if (ThreadManager::HasId() && !ThreadManager::IsArchived() && -+ ThreadManager::CurrentId() == Top::thread_id()) { -+ return true; -+ } -+ return false; -+} -+ -+ -+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { -+#ifndef V8_HOST_ARCH_MIPS -+ USE(info); -+ if (signal != SIGPROF) return; -+ if (active_sampler_ == NULL) return; -+ -+ TickSample sample_obj; -+ TickSample* sample = CpuProfiler::TickSampleEvent(); -+ if (sample == NULL) sample = &sample_obj; -+ -+ // We always sample the VM state. -+ sample->state = VMState::current_state(); -+ -+#if 0 -+ // If profiling, we extract the current pc and sp. -+ if (active_sampler_->IsProfiling()) { -+ // Extracting the sample from the context is extremely machine dependent. -+ ucontext_t* ucontext = reinterpret_cast(context); -+ mcontext_t& mcontext = ucontext->uc_mcontext; -+#if V8_HOST_ARCH_IA32 -+ sample->pc = reinterpret_cast
(mcontext.gregs[REG_EIP]); -+ sample->sp = reinterpret_cast
(mcontext.gregs[REG_ESP]); -+ sample->fp = reinterpret_cast
(mcontext.gregs[REG_EBP]); -+#elif V8_HOST_ARCH_X64 -+ sample->pc = reinterpret_cast
(mcontext.gregs[REG_RIP]); -+ sample->sp = reinterpret_cast
(mcontext.gregs[REG_RSP]); -+ sample->fp = reinterpret_cast
(mcontext.gregs[REG_RBP]); -+#elif V8_HOST_ARCH_ARM -+// An undefined macro evaluates to 0, so this applies to Android's Bionic also. -+#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) -+ sample->pc = reinterpret_cast
(mcontext.gregs[R15]); -+ sample->sp = reinterpret_cast
(mcontext.gregs[R13]); -+ sample->fp = reinterpret_cast
(mcontext.gregs[R11]); -+#else -+ sample->pc = reinterpret_cast
(mcontext.arm_pc); -+ sample->sp = reinterpret_cast
(mcontext.arm_sp); -+ sample->fp = reinterpret_cast
(mcontext.arm_fp); -+#endif -+#elif V8_HOST_ARCH_MIPS -+ // Implement this on MIPS. -+ UNIMPLEMENTED(); -+#endif -+ if (IsVmThread()) { -+ active_sampler_->SampleStack(sample); -+ } -+ } -+#endif -+ -+ active_sampler_->Tick(sample); -+#endif -+} -+ -+ -+class Sampler::PlatformData : public Malloced { -+ public: -+ PlatformData() { -+ signal_handler_installed_ = false; -+ } -+ -+ bool signal_handler_installed_; -+ struct sigaction old_signal_handler_; -+ struct itimerval old_timer_value_; -+}; -+ -+ -+Sampler::Sampler(int interval, bool profiling) -+ : interval_(interval), profiling_(profiling), active_(false) { -+ data_ = new PlatformData(); -+} -+ -+ -+Sampler::~Sampler() { -+ delete data_; -+} -+ -+ -+void Sampler::Start() { -+ // There can only be one active sampler at the time on POSIX -+ // platforms. -+ if (active_sampler_ != NULL) return; -+ -+ vm_thread_ = pthread_self(); -+ -+ // Request profiling signals. -+ struct sigaction sa; -+ sa.sa_sigaction = ProfilerSignalHandler; -+ sigemptyset(&sa.sa_mask); -+ sa.sa_flags = SA_SIGINFO; -+ if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; -+ data_->signal_handler_installed_ = true; -+ -+ // Set the itimer to generate a tick for each interval. -+ itimerval itimer; -+ itimer.it_interval.tv_sec = interval_ / 1000; -+ itimer.it_interval.tv_usec = (interval_ % 1000) * 1000; -+ itimer.it_value.tv_sec = itimer.it_interval.tv_sec; -+ itimer.it_value.tv_usec = itimer.it_interval.tv_usec; -+ setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_); -+ -+ // Set this sampler as the active sampler. -+ active_sampler_ = this; -+ active_ = true; -+} -+ -+ -+void Sampler::Stop() { -+ // Restore old signal handler -+ if (data_->signal_handler_installed_) { -+ setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL); -+ sigaction(SIGPROF, &data_->old_signal_handler_, 0); -+ data_->signal_handler_installed_ = false; -+ } -+ -+ // This sampler is no longer the active sampler. -+ active_sampler_ = NULL; -+ active_ = false; -+} -+ -+ -+#endif // ENABLE_LOGGING_AND_PROFILING -+ -+} } // namespace v8::internal -diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h -index d63ca5e..1091ba6 100644 ---- a/deps/v8/src/platform.h -+++ b/deps/v8/src/platform.h -@@ -360,7 +360,11 @@ class ThreadHandle { - class Thread: public ThreadHandle { - public: - // Opaque data type for thread-local storage keys. -+#ifndef __CYGWIN__ - enum LocalStorageKey {}; -+#else -+ typedef void *LocalStorageKey; -+#endif - - // Create new thread. - Thread(); -diff --git a/deps/v8/tools/utils.py b/deps/v8/tools/utils.py -index 3a55722..505c398 100644 ---- a/deps/v8/tools/utils.py -+++ b/deps/v8/tools/utils.py -@@ -59,6 +59,8 @@ def GuessOS(): - return 'openbsd' - elif id == 'SunOS': - return 'solaris' -+ elif id.find('CYGWIN') >= 0: -+ return 'cygwin' - else: - return None - diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 29b8e1f377..7fae8d4b6f 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -62,7 +62,6 @@ SOURCES = { execution.cc factory.cc flags.cc - flow-graph.cc frame-element.cc frames.cc full-codegen.cc @@ -121,6 +120,7 @@ SOURCES = { jump-target-light.cc virtual-frame-light.cc arm/builtins-arm.cc + arm/code-stubs-arm.cc arm/codegen-arm.cc arm/constants-arm.cc arm/cpu-arm.cc @@ -159,6 +159,7 @@ SOURCES = { virtual-frame-heavy.cc ia32/assembler-ia32.cc ia32/builtins-ia32.cc + ia32/code-stubs-ia32.cc ia32/codegen-ia32.cc ia32/cpu-ia32.cc ia32/debug-ia32.cc @@ -178,6 +179,7 @@ SOURCES = { virtual-frame-heavy.cc x64/assembler-x64.cc x64/builtins-x64.cc + x64/code-stubs-x64.cc x64/codegen-x64.cc x64/cpu-x64.cc x64/debug-x64.cc @@ -200,7 +202,6 @@ SOURCES = { 'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], - 'os:cygwin': ['platform-cygwin.cc', 'platform-posix.cc'], 'os:nullos': ['platform-nullos.cc'], 'os:win32': ['platform-win32.cc'], 'mode:release': [], diff --git a/deps/v8/src/SConscript.orig b/deps/v8/src/SConscript.orig deleted file mode 100755 index e6b4e3820c..0000000000 --- a/deps/v8/src/SConscript.orig +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright 2008 the V8 project authors. All rights reserved. -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import sys -from os.path import join, dirname, abspath -root_dir = dirname(File('SConstruct').rfile().abspath) -sys.path.append(join(root_dir, 'tools')) -import js2c -Import('context') - - -SOURCES = { - 'all': Split(""" - accessors.cc - allocation.cc - api.cc - assembler.cc - ast.cc - bootstrapper.cc - builtins.cc - checks.cc - circular-queue.cc - code-stubs.cc - codegen.cc - compilation-cache.cc - compiler.cc - contexts.cc - conversions.cc - counters.cc - cpu-profiler.cc - data-flow.cc - dateparser.cc - debug-agent.cc - debug.cc - disassembler.cc - diy-fp.cc - dtoa.cc - execution.cc - factory.cc - flags.cc - flow-graph.cc - frame-element.cc - frames.cc - full-codegen.cc - func-name-inferrer.cc - global-handles.cc - fast-dtoa.cc - fixed-dtoa.cc - handles.cc - hashmap.cc - heap-profiler.cc - heap.cc - ic.cc - interpreter-irregexp.cc - jsregexp.cc - jump-target.cc - liveedit.cc - log-utils.cc - log.cc - mark-compact.cc - messages.cc - objects.cc - objects-visiting.cc - oprofile-agent.cc - parser.cc - profile-generator.cc - property.cc - regexp-macro-assembler-irregexp.cc - regexp-macro-assembler.cc - regexp-stack.cc - register-allocator.cc - rewriter.cc - runtime.cc - scanner.cc - scopeinfo.cc - scopes.cc - serialize.cc - snapshot-common.cc - spaces.cc - string-stream.cc - stub-cache.cc - token.cc - top.cc - type-info.cc - unicode.cc - utils.cc - v8-counters.cc - v8.cc - v8threads.cc - variables.cc - version.cc - virtual-frame.cc - vm-state.cc - zone.cc - """), - 'arch:arm': Split(""" - jump-target-light.cc - virtual-frame-light.cc - arm/builtins-arm.cc - arm/codegen-arm.cc - arm/constants-arm.cc - arm/cpu-arm.cc - arm/debug-arm.cc - arm/disasm-arm.cc - arm/frames-arm.cc - arm/full-codegen-arm.cc - arm/ic-arm.cc - arm/jump-target-arm.cc - arm/macro-assembler-arm.cc - arm/regexp-macro-assembler-arm.cc - arm/register-allocator-arm.cc - arm/stub-cache-arm.cc - arm/virtual-frame-arm.cc - arm/assembler-arm.cc - """), - 'arch:mips': Split(""" - mips/assembler-mips.cc - mips/builtins-mips.cc - mips/codegen-mips.cc - mips/constants-mips.cc - mips/cpu-mips.cc - mips/debug-mips.cc - mips/disasm-mips.cc - mips/full-codegen-mips.cc - mips/frames-mips.cc - mips/ic-mips.cc - mips/jump-target-mips.cc - mips/macro-assembler-mips.cc - mips/register-allocator-mips.cc - mips/stub-cache-mips.cc - mips/virtual-frame-mips.cc - """), - 'arch:ia32': Split(""" - jump-target-heavy.cc - virtual-frame-heavy.cc - ia32/assembler-ia32.cc - ia32/builtins-ia32.cc - ia32/codegen-ia32.cc - ia32/cpu-ia32.cc - ia32/debug-ia32.cc - ia32/disasm-ia32.cc - ia32/frames-ia32.cc - ia32/full-codegen-ia32.cc - ia32/ic-ia32.cc - ia32/jump-target-ia32.cc - ia32/macro-assembler-ia32.cc - ia32/regexp-macro-assembler-ia32.cc - ia32/register-allocator-ia32.cc - ia32/stub-cache-ia32.cc - ia32/virtual-frame-ia32.cc - """), - 'arch:x64': Split(""" - jump-target-heavy.cc - virtual-frame-heavy.cc - x64/assembler-x64.cc - x64/builtins-x64.cc - x64/codegen-x64.cc - x64/cpu-x64.cc - x64/debug-x64.cc - x64/disasm-x64.cc - x64/frames-x64.cc - x64/full-codegen-x64.cc - x64/ic-x64.cc - x64/jump-target-x64.cc - x64/macro-assembler-x64.cc - x64/regexp-macro-assembler-x64.cc - x64/register-allocator-x64.cc - x64/stub-cache-x64.cc - x64/virtual-frame-x64.cc - """), - 'simulator:arm': ['arm/simulator-arm.cc'], - 'simulator:mips': ['mips/simulator-mips.cc'], - 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], - 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], - 'os:linux': ['platform-linux.cc', 'platform-posix.cc'], - 'os:android': ['platform-linux.cc', 'platform-posix.cc'], - 'os:macos': ['platform-macos.cc', 'platform-posix.cc'], - 'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'], - 'os:nullos': ['platform-nullos.cc'], - 'os:win32': ['platform-win32.cc'], - 'mode:release': [], - 'mode:debug': [ - 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc' - ] -} - - -D8_FILES = { - 'all': [ - 'd8.cc', 'd8-debug.cc' - ], - 'os:linux': [ - 'd8-posix.cc' - ], - 'os:macos': [ - 'd8-posix.cc' - ], - 'os:android': [ - 'd8-posix.cc' - ], - 'os:freebsd': [ - 'd8-posix.cc' - ], - 'os:openbsd': [ - 'd8-posix.cc' - ], - 'os:solaris': [ - 'd8-posix.cc' - ], - 'os:win32': [ - 'd8-windows.cc' - ], - 'os:nullos': [ - 'd8-windows.cc' # Empty implementation at the moment. - ], - 'console:readline': [ - 'd8-readline.cc' - ] -} - - -LIBRARY_FILES = ''' -runtime.js -v8natives.js -array.js -string.js -uri.js -math.js -messages.js -apinatives.js -date.js -regexp.js -json.js -liveedit-debugger.js -mirror-debugger.js -debug-debugger.js -'''.split() - - -def Abort(message): - print message - sys.exit(1) - - -def ConfigureObjectFiles(): - env = Environment() - env.Replace(**context.flags['v8']) - context.ApplyEnvOverrides(env) - env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C) - env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions') - - # Build the standard platform-independent source files. - source_files = context.GetRelevantSources(SOURCES) - - d8_files = context.GetRelevantSources(D8_FILES) - d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8') - d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.']) - d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj] - - # Combine the JavaScript library files into a single C++ file and - # compile it. - library_files = [s for s in LIBRARY_FILES] - library_files.append('macros.py') - libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE') - libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.']) - - # Build dtoa. - dtoa_env = env.Copy() - dtoa_env.Replace(**context.flags['dtoa']) - dtoa_files = ['dtoa-config.c'] - dtoa_obj = context.ConfigureObject(dtoa_env, dtoa_files) - - source_objs = context.ConfigureObject(env, source_files) - non_snapshot_files = [dtoa_obj, source_objs] - - # Create snapshot if necessary. For cross compilation you should either - # do without snapshots and take the performance hit or you should build a - # host VM with the simulator=arm and snapshot=on options and then take the - # resulting snapshot.cc file from obj/release and put it in the src - # directory. Then rebuild the VM with the cross compiler and specify - # snapshot=nobuild on the scons command line. - empty_snapshot_obj = context.ConfigureObject(env, 'snapshot-empty.cc') - mksnapshot_env = env.Copy() - mksnapshot_env.Replace(**context.flags['mksnapshot']) - mksnapshot_src = 'mksnapshot.cc' - mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb') - if context.use_snapshot: - if context.build_snapshot: - snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath) - else: - snapshot_cc = 'snapshot.cc' - snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.']) - else: - snapshot_obj = empty_snapshot_obj - library_objs = [non_snapshot_files, libraries_obj, snapshot_obj] - return (library_objs, d8_objs, [mksnapshot]) - - -(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles() -Return('library_objs d8_objs mksnapshot') diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 7a840a191e..eeab2acfe4 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -75,8 +75,10 @@ class Accessors : public AllStatic { }; // Accessor functions called directly from the runtime system. - static Object* FunctionGetPrototype(Object* object, void*); - static Object* FunctionSetPrototype(JSObject* object, Object* value, void*); + MUST_USE_RESULT static Object* FunctionGetPrototype(Object* object, void*); + MUST_USE_RESULT static Object* FunctionSetPrototype(JSObject* object, + Object* value, + void*); private: // Accessor functions only used through the descriptor. static Object* FunctionGetLength(Object* object, void*); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 7a967dbffd..0d01fcc75e 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1136,13 +1136,18 @@ ScriptData* ScriptData::PreCompile(v8::Handle source) { ScriptData* ScriptData::New(const char* data, int length) { // Return an empty ScriptData if the length is obviously invalid. if (length % sizeof(unsigned) != 0) { - return new i::ScriptDataImpl(i::Vector()); + return new i::ScriptDataImpl(); } // Copy the data to ensure it is properly aligned. int deserialized_data_length = length / sizeof(unsigned); + // If aligned, don't create a copy of the data. + if (reinterpret_cast(data) % sizeof(unsigned) == 0) { + return new i::ScriptDataImpl(data, length); + } + // Copy the data to align it. unsigned* deserialized_data = i::NewArray(deserialized_data_length); - memcpy(deserialized_data, data, length); + i::MemCopy(deserialized_data, data, length); return new i::ScriptDataImpl( i::Vector(deserialized_data, deserialized_data_length)); @@ -3905,6 +3910,22 @@ void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { } +void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback, + ObjectSpace space, + AllocationAction action) { + if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return; + i::MemoryAllocator::AddMemoryAllocationCallback(callback, + space, + action); +} + + +void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) { + if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return; + i::MemoryAllocator::RemoveMemoryAllocationCallback(callback); +} + + void V8::PauseProfiler() { #ifdef ENABLE_LOGGING_AND_PROFILING PauseProfilerEx(PROFILER_MODULE_CPU); @@ -4592,10 +4613,18 @@ Handle HeapGraphNode::GetName() const { uint64_t HeapGraphNode::GetId() const { IsDeadCheck("v8::HeapGraphNode::GetId"); + ASSERT(ToInternal(this)->snapshot()->type() != i::HeapSnapshot::kAggregated); return ToInternal(this)->id(); } +int HeapGraphNode::GetInstancesCount() const { + IsDeadCheck("v8::HeapGraphNode::GetInstancesCount"); + ASSERT(ToInternal(this)->snapshot()->type() == i::HeapSnapshot::kAggregated); + return static_cast(ToInternal(this)->id()); +} + + int HeapGraphNode::GetSelfSize() const { IsDeadCheck("v8::HeapGraphNode::GetSelfSize"); return ToInternal(this)->self_size(); @@ -4677,6 +4706,12 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { } +HeapSnapshot::Type HeapSnapshot::GetType() const { + IsDeadCheck("v8::HeapSnapshot::GetType"); + return static_cast(ToInternal(this)->type()); +} + + unsigned HeapSnapshot::GetUid() const { IsDeadCheck("v8::HeapSnapshot::GetUid"); return ToInternal(this)->uid(); @@ -4724,10 +4759,22 @@ const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) { } -const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title) { +const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle title, + HeapSnapshot::Type type) { IsDeadCheck("v8::HeapProfiler::TakeSnapshot"); + i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; + switch (type) { + case HeapSnapshot::kFull: + internal_type = i::HeapSnapshot::kFull; + break; + case HeapSnapshot::kAggregated: + internal_type = i::HeapSnapshot::kAggregated; + break; + default: + UNREACHABLE(); + } return reinterpret_cast( - i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title))); + i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type)); } #endif // ENABLE_LOGGING_AND_PROFILING diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 136c82e7ef..7d368bf415 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1809,6 +1809,7 @@ void Assembler::stc2(Coprocessor coproc, // Support for VFP. + void Assembler::vldr(const DwVfpRegister dst, const Register base, int offset, @@ -1820,6 +1821,7 @@ void Assembler::vldr(const DwVfpRegister dst, ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } @@ -1836,7 +1838,10 @@ void Assembler::vldr(const SwVfpRegister dst, ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); - emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | + ASSERT(offset >= 0); + int sd, d; + dst.split_code(&sd, &d); + emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); } @@ -1852,11 +1857,31 @@ void Assembler::vstr(const DwVfpRegister src, ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(offset % 4 == 0); ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } +void Assembler::vstr(const SwVfpRegister src, + const Register base, + int offset, + const Condition cond) { + // MEM(Rbase + offset) = SSrc. + // Instruction details available in ARM DDI 0406A, A8-786. + // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) | + // Vdst(15-12) | 1010(11-8) | (offset/4) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(offset % 4 == 0); + ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); + int sd, d; + src.split_code(&sd, &d); + emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 | + 0xA*B8 | ((offset / 4) & 255)); +} + + static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { uint64_t i; memcpy(&i, &d, 8); @@ -1959,8 +1984,10 @@ void Assembler::vmov(const SwVfpRegister dst, // Sd = Sm // Instruction details available in ARM DDI 0406B, A8-642. ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | 0xB*B20 | - dst.code()*B12 | 0x5*B9 | B6 | src.code()); + int sd, d, sm, m; + dst.split_code(&sd, &d); + src.split_code(&sm, &m); + emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm); } @@ -2014,8 +2041,9 @@ void Assembler::vmov(const SwVfpRegister dst, // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(!src.is(pc)); - emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 | - src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4); + int sn, n; + dst.split_code(&sn, &n); + emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); } @@ -2028,8 +2056,9 @@ void Assembler::vmov(const Register dst, // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(!dst.is(pc)); - emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 | - dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4); + int sn, n; + src.split_code(&sn, &n); + emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); } @@ -2079,16 +2108,21 @@ static bool IsDoubleVFPType(VFPType type) { } -// Depending on split_last_bit split binary representation of reg_code into Vm:M -// or M:Vm form (where M is single bit). -static void SplitRegCode(bool split_last_bit, +// Split five bit reg_code based on size of reg_type. +// 32-bit register codes are Vm:M +// 64-bit register codes are M:Vm +// where Vm is four bits, and M is a single bit. +static void SplitRegCode(VFPType reg_type, int reg_code, int* vm, int* m) { - if (split_last_bit) { + ASSERT((reg_code >= 0) && (reg_code <= 31)); + if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) { + // 32 bit type. *m = reg_code & 0x1; *vm = reg_code >> 1; } else { + // 64 bit type. *m = (reg_code & 0x10) >> 4; *vm = reg_code & 0x0F; } @@ -2101,6 +2135,11 @@ static Instr EncodeVCVT(const VFPType dst_type, const VFPType src_type, const int src_code, const Condition cond) { + ASSERT(src_type != dst_type); + int D, Vd, M, Vm; + SplitRegCode(src_type, src_code, &Vm, &M); + SplitRegCode(dst_type, dst_code, &Vd, &D); + if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { // Conversion between IEEE floating point and 32-bit integer. // Instruction details available in ARM DDI 0406B, A8.6.295. @@ -2108,22 +2147,17 @@ static Instr EncodeVCVT(const VFPType dst_type, // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); - int sz, opc2, D, Vd, M, Vm, op; + int sz, opc2, op; if (IsIntegerVFPType(dst_type)) { opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; op = 1; // round towards zero - SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M); - SplitRegCode(true, dst_code, &Vd, &D); } else { ASSERT(IsIntegerVFPType(src_type)); - opc2 = 0x0; sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; op = IsSignedVFPType(src_type) ? 0x1 : 0x0; - SplitRegCode(true, src_code, &Vm, &M); - SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D); } return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | @@ -2133,13 +2167,7 @@ static Instr EncodeVCVT(const VFPType dst_type, // Instruction details available in ARM DDI 0406B, A8.6.298. // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) | // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) - int sz, D, Vd, M, Vm; - - ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type)); - sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; - SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D); - SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M); - + int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 218eb97f3c..be9aa92f1a 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -120,6 +120,11 @@ struct SwVfpRegister { ASSERT(is_valid()); return 1 << code_; } + void split_code(int* vm, int* m) const { + ASSERT(is_valid()); + *m = code_ & 0x1; + *vm = code_ >> 1; + } int code_; }; @@ -152,6 +157,11 @@ struct DwVfpRegister { ASSERT(is_valid()); return 1 << code_; } + void split_code(int* vm, int* m) const { + ASSERT(is_valid()); + *m = (code_ & 0x10) >> 4; + *vm = code_ & 0x0F; + } int code_; }; @@ -966,6 +976,11 @@ class Assembler : public Malloced { int offset, // Offset must be a multiple of 4. const Condition cond = al); + void vstr(const SwVfpRegister src, + const Register base, + int offset, // Offset must be a multiple of 4. + const Condition cond = al); + void vmov(const DwVfpRegister dst, double imm, const Condition cond = al); diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 7e7e358c10..8b21558165 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -125,7 +125,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset)); // Field JSArray::kElementsOffset is initialized later. - __ mov(scratch3, Operand(0)); + __ mov(scratch3, Operand(0, RelocInfo::NONE)); __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset)); // Calculate the location of the elements array and set elements array member @@ -311,7 +311,7 @@ static void ArrayNativeCode(MacroAssembler* masm, Label argc_one_or_more, argc_two_or_more; // Check for array construction with zero arguments or one. - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); __ b(ne, &argc_one_or_more); // Handle construction of an empty array. @@ -481,6 +481,13 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { } +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { + // TODO(849): implement custom construct stub. + // Generate a copy of the generic stub for now. + Generate_JSConstructStubGeneric(masm); +} + + void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : number of arguments @@ -505,12 +512,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // r0: number of arguments // r1: called object __ bind(&non_function_call); - // CALL_NON_FUNCTION expects the non-function constructor as receiver - // (instead of the original receiver from the call site). The receiver is - // stack element argc. - __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); // Set expected number of arguments to zero (not changing r0). - __ mov(r2, Operand(0)); + __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ Jump(Handle(builtin(ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET); @@ -840,7 +843,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r5-r7, cp may be clobbered // Clear the context before we push it when entering the JS frame. - __ mov(cp, Operand(0)); + __ mov(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. __ EnterInternalFrame(); @@ -1027,7 +1030,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ str(r1, MemOperand(r2, -kPointerSize)); // Clear r1 to indicate a non-function being called. - __ mov(r1, Operand(0)); + __ mov(r1, Operand(0, RelocInfo::NONE)); // 4. Shift arguments and return address one slot down on the stack // (overwriting the original receiver). Adjust argument count to make @@ -1057,7 +1060,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { { Label function; __ tst(r1, r1); __ b(ne, &function); - __ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION + // Expected number of arguments is 0 for CALL_NON_FUNCTION. + __ mov(r2, Operand(0, RelocInfo::NONE)); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ Jump(Handle(builtin(ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET); @@ -1073,8 +1077,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ ldr(r2, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); __ mov(r2, Operand(r2, ASR, kSmiTagSize)); - __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset)); - __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ cmp(r2, r0); // Check formal and actual parameter counts. __ Jump(Handle(builtin(ArgumentsAdaptorTrampoline)), RelocInfo::CODE_TARGET, ne); @@ -1121,7 +1124,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Push current limit and index. __ bind(&okay); __ push(r0); // limit - __ mov(r1, Operand(0)); // initial index + __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index __ push(r1); // Change context eagerly to get the right global object if necessary. diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc new file mode 100644 index 0000000000..fa93030ff4 --- /dev/null +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -0,0 +1,4688 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_ARM) + +#include "bootstrapper.h" +#include "code-stubs.h" +#include "regexp-macro-assembler.h" + +namespace v8 { +namespace internal { + + +#define __ ACCESS_MASM(masm) + +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc, + bool never_nan_nan); +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict); +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs); + + +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in cp. + Label gc; + + // Pop the function info from the stack. + __ pop(r3); + + // Attempt to allocate new JSFunction in new space. + __ AllocateInNewSpace(JSFunction::kSize, + r0, + r1, + r2, + &gc, + TAG_OBJECT); + + // Compute the function map in the current global context and set that + // as the map of the allocated object. + __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); + __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); + __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); + __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); + __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); + + // Return result. The argument function info has been popped already. + __ Ret(); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ Push(cp, r3); + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + + // Attempt to allocate the context in new space. + __ AllocateInNewSpace(FixedArray::SizeFor(length), + r0, + r1, + r2, + &gc, + TAG_OBJECT); + + // Load the function from the stack. + __ ldr(r3, MemOperand(sp, 0)); + + // Setup the object header. + __ LoadRoot(r2, Heap::kContextMapRootIndex); + __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ mov(r2, Operand(Smi::FromInt(length))); + __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); + + // Setup the fixed slots. + __ mov(r1, Operand(Smi::FromInt(0))); + __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); + + // Copy the global object from the surrounding context. + __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); + + // Initialize the rest of the slots to undefined. + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ str(r1, MemOperand(r0, Context::SlotOffset(i))); + } + + // Remove the on-stack argument and return. + __ mov(cp, r0); + __ pop(); + __ Ret(); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); +} + + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [sp]: constant elements. + // [sp + kPointerSize]: literal index. + // [sp + (2 * kPointerSize)]: literals array. + + // All sizes here are multiples of kPointerSize. + int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; + int size = JSArray::kSize + elements_size; + + // Load boilerplate object into r3 and check if we need to create a + // boilerplate. + Label slow_case; + __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); + __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(r3, ip); + __ b(eq, &slow_case); + + if (FLAG_debug_code) { + const char* message; + Heap::RootListIndex expected_map_index; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map_index = Heap::kFixedArrayMapRootIndex; + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map_index = Heap::kFixedCOWArrayMapRootIndex; + } + __ push(r3); + __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); + __ LoadRoot(ip, expected_map_index); + __ cmp(r3, ip); + __ Assert(eq, message); + __ pop(r3); + } + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, + r0, + r1, + r2, + &slow_case, + TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length_ == 0)) { + __ ldr(r1, FieldMemOperand(r3, i)); + __ str(r1, FieldMemOperand(r0, i)); + } + } + + if (length_ > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); + __ add(r2, r0, Operand(JSArray::kSize)); + __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); + + // Copy the elements array. + __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); + } + + // Return and remove the on-stack parameters. + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +// Takes a Smi and converts to an IEEE 64 bit floating point value in two +// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and +// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a +// scratch register. Destroys the source register. No GC occurs during this +// stub so you don't have to set up the frame. +class ConvertToDoubleStub : public CodeStub { + public: + ConvertToDoubleStub(Register result_reg_1, + Register result_reg_2, + Register source_reg, + Register scratch_reg) + : result1_(result_reg_1), + result2_(result_reg_2), + source_(source_reg), + zeros_(scratch_reg) { } + + private: + Register result1_; + Register result2_; + Register source_; + Register zeros_; + + // Minor key encoding in 16 bits. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + + Major MajorKey() { return ConvertToDouble; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return result1_.code() + + (result2_.code() << 4) + + (source_.code() << 8) + + (zeros_.code() << 12); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "ConvertToDoubleStub"; } + +#ifdef DEBUG + void Print() { PrintF("ConvertToDoubleStub\n"); } +#endif +}; + + +void ConvertToDoubleStub::Generate(MacroAssembler* masm) { +#ifndef BIG_ENDIAN_FLOATING_POINT + Register exponent = result1_; + Register mantissa = result2_; +#else + Register exponent = result2_; + Register mantissa = result1_; +#endif + Label not_special; + // Convert from Smi to integer. + __ mov(source_, Operand(source_, ASR, kSmiTagSize)); + // Move sign bit from source to destination. This works because the sign bit + // in the exponent word of the double has the same position and polarity as + // the 2's complement sign bit in a Smi. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); + // Subtract from 0 if source was negative. + __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); + + // We have -1, 0 or 1, which we treat specially. Register source_ contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). + __ cmp(source_, Operand(1)); + __ b(gt, ¬_special); + + // For 1 or -1 we need to or in the 0 exponent (biased to 1023). + static const uint32_t exponent_word_for_1 = + HeapNumber::kExponentBias << HeapNumber::kExponentShift; + __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); + // 1, 0 and -1 all have 0 for the second word. + __ mov(mantissa, Operand(0, RelocInfo::NONE)); + __ Ret(); + + __ bind(¬_special); + // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. + // Gets the wrong answer for 0, but we already checked for that case above. + __ CountLeadingZeros(zeros_, source_, mantissa); + // Compute exponent and or it into the exponent register. + // We use mantissa as a scratch register here. Use a fudge factor to + // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts + // that fit in the ARM's constant field. + int fudge = 0x400; + __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); + __ add(mantissa, mantissa, Operand(fudge)); + __ orr(exponent, + exponent, + Operand(mantissa, LSL, HeapNumber::kExponentShift)); + // Shift up the source chopping the top bit off. + __ add(zeros_, zeros_, Operand(1)); + // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. + __ mov(source_, Operand(source_, LSL, zeros_)); + // Compute lower part of fraction (last 12 bits). + __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); + // And the top (top 20 bits). + __ orr(exponent, + exponent, + Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); + __ Ret(); +} + + +// See comment for class. +void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { + Label max_negative_int; + // the_int_ has the answer which is a signed int32 but not a Smi. + // We test for the special value that has a different exponent. This test + // has the neat side effect of setting the flags according to the sign. + STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); + __ cmp(the_int_, Operand(0x80000000u)); + __ b(eq, &max_negative_int); + // Set up the correct exponent in scratch_. All non-Smi int32s have the same. + // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). + uint32_t non_smi_exponent = + (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + __ mov(scratch_, Operand(non_smi_exponent)); + // Set the sign bit in scratch_ if the value was negative. + __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); + // Subtract from 0 if the value was negative. + __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); + // We should be masking the implict first digit of the mantissa away here, + // but it just ends up combining harmlessly with the last digit of the + // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get + // the most significant 1 to hit the last bit of the 12 bit sign and exponent. + ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); + __ str(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kExponentOffset)); + __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); + __ str(scratch_, FieldMemOperand(the_heap_number_, + HeapNumber::kMantissaOffset)); + __ Ret(); + + __ bind(&max_negative_int); + // The max negative int32 is stored as a positive number in the mantissa of + // a double because it uses a sign bit instead of using two's complement. + // The actual mantissa bits stored are all 0 because the implicit most + // significant 1 bit is not stored. + non_smi_exponent += 1 << HeapNumber::kExponentShift; + __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); + __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); + __ mov(ip, Operand(0, RelocInfo::NONE)); + __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); + __ Ret(); +} + + +// Handle the case where the lhs and rhs are the same object. +// Equality is almost reflexive (everything but NaN), so this is a test +// for "identity and not NaN". +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc, + bool never_nan_nan) { + Label not_identical; + Label heap_number, return_equal; + __ cmp(r0, r1); + __ b(ne, ¬_identical); + + // The two objects are identical. If we know that one of them isn't NaN then + // we now know they test equal. + if (cc != eq || !never_nan_nan) { + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cc == lt || cc == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); + __ b(ge, slow); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cc != eq) { + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(ge, slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cc == le || cc == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cc == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); + } + __ Ret(); + } + } + } + } + + __ bind(&return_equal); + if (cc == lt) { + __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. + } else if (cc == gt) { + __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. + } else { + __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. + } + __ Ret(); + + if (cc != eq || !never_nan_nan) { + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cc != lt && cc != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cc != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cc == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + } + } + __ Ret(); + } + // No fall through here. + } + + __ bind(¬_identical); +} + + +// See comment at call site. +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* lhs_not_nan, + Label* slow, + bool strict) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + Label rhs_is_smi; + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); + + // Lhs is a Smi. Check whether the rhs is a heap number. + __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If rhs is not a number and lhs is a Smi then strict equality cannot + // succeed. Return non-equal + // If rhs is r0 then there is already a non zero value in it. + if (!rhs.is(r0)) { + __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); + } + __ Ret(ne); + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Lhs is a smi, rhs is a number. + if (CpuFeatures::IsSupported(VFP3)) { + // Convert lhs to a double in d7. + CpuFeatures::Scope scope(VFP3); + __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); + // Load the double from rhs, tagged HeapNumber r0, to d6. + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + __ push(lr); + // Convert lhs to a double in r2, r3. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Load rhs to a double in r0, r1. + __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + __ pop(lr); + } + + // We now have both loaded as doubles but we can skip the lhs nan check + // since it's a smi. + __ jmp(lhs_not_nan); + + __ bind(&rhs_is_smi); + // Rhs is a smi. Check whether the non-smi lhs is a heap number. + __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If lhs is not a number and rhs is a smi then strict equality cannot + // succeed. Return non-equal. + // If lhs is r0 then there is already a non zero value in it. + if (!lhs.is(r0)) { + __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); + } + __ Ret(ne); + } else { + // Smi compared non-strictly with a non-smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Rhs is a smi, lhs is a heap number. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Load the double from lhs, tagged HeapNumber r1, to d7. + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + // Convert rhs to a double in d6 . + __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); + } else { + __ push(lr); + // Load lhs to a double in r2, r3. + __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + // Convert rhs to a double in r0, r1. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + // Fall through to both_loaded_as_doubles. +} + + +void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register rhs_exponent = exp_first ? r0 : r1; + Register lhs_exponent = exp_first ? r2 : r3; + Register rhs_mantissa = exp_first ? r1 : r0; + Register lhs_mantissa = exp_first ? r3 : r2; + Label one_is_nan, neither_is_nan; + + __ Sbfx(r4, + lhs_exponent, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r4, Operand(-1)); + __ b(ne, lhs_not_nan); + __ mov(r4, + Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); + __ b(ne, &one_is_nan); + + __ bind(lhs_not_nan); + __ Sbfx(r4, + rhs_exponent, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r4, Operand(-1)); + __ b(ne, &neither_is_nan); + __ mov(r4, + Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); + __ b(eq, &neither_is_nan); + + __ bind(&one_is_nan); + // NaN comparisons always fail. + // Load whatever we need in r0 to make the comparison fail. + if (cc == lt || cc == le) { + __ mov(r0, Operand(GREATER)); + } else { + __ mov(r0, Operand(LESS)); + } + __ Ret(); + + __ bind(&neither_is_nan); +} + + +// See comment at call site. +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register rhs_exponent = exp_first ? r0 : r1; + Register lhs_exponent = exp_first ? r2 : r3; + Register rhs_mantissa = exp_first ? r1 : r0; + Register lhs_mantissa = exp_first ? r3 : r2; + + // r0, r1, r2, r3 have the two doubles. Neither is a NaN. + if (cc == eq) { + // Doubles are not equal unless they have the same bit pattern. + // Exception: 0 and -0. + __ cmp(rhs_mantissa, Operand(lhs_mantissa)); + __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); + // Return non-zero if the numbers are unequal. + __ Ret(ne); + + __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); + // If exponents are equal then return 0. + __ Ret(eq); + + // Exponents are unequal. The only way we can return that the numbers + // are equal is if one is -0 and the other is 0. We already dealt + // with the case where both are -0 or both are 0. + // We start by seeing if the mantissas (that are equal) or the bottom + // 31 bits of the rhs exponent are non-zero. If so we return not + // equal. + __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); + __ mov(r0, Operand(r4), LeaveCC, ne); + __ Ret(ne); + // Now they are equal if and only if the lhs exponent is zero in its + // low 31 bits. + __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); + __ Ret(); + } else { + // Call a native function to do a comparison between two non-NaNs. + // Call C routine that may not cause GC or other trouble. + __ push(lr); + __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. + __ CallCFunction(ExternalReference::compare_doubles(), 4); + __ pop(pc); // Return. + } +} + + +// See comment at call site. +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, + Register lhs, + Register rhs) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + // If either operand is a JSObject or an oddball value, then they are + // not equal since their pointers are different. + // There is no test for undetectability in strict equality. + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + Label first_non_object; + // Get the type of the first operand into r2 and compare it with + // FIRST_JS_OBJECT_TYPE. + __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &first_non_object); + + // Return non-zero (r0 is not zero) + Label return_not_equal; + __ bind(&return_not_equal); + __ Ret(); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ cmp(r2, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); + + __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); + __ b(ge, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ cmp(r3, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); + + // Now that we have the types we might as well check for symbol-symbol. + // Ensure that no non-strings have the symbol bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); + STATIC_ASSERT(kSymbolTag != 0); + __ and_(r2, r2, Operand(r3)); + __ tst(r2, Operand(kIsSymbolMask)); + __ b(ne, &return_not_equal); +} + + +// See comment at call site. +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* both_loaded_as_doubles, + Label* not_heap_numbers, + Label* slow) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); + __ b(ne, not_heap_numbers); + __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); + __ cmp(r2, r3); + __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. + + // Both are heap numbers. Load them up then jump to the code we have + // for that. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ sub(r7, rhs, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + __ sub(r7, lhs, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); + __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); + } + __ jmp(both_loaded_as_doubles); +} + + +// Fast negative check for symbol-to-symbol equality. +static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { + ASSERT((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0))); + + // r2 is object type of rhs. + // Ensure that no non-strings have the symbol bit set. + Label object_test; + STATIC_ASSERT(kSymbolTag != 0); + __ tst(r2, Operand(kIsNotStringMask)); + __ b(ne, &object_test); + __ tst(r2, Operand(kIsSymbolMask)); + __ b(eq, possible_strings); + __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); + __ b(ge, not_both_strings); + __ tst(r3, Operand(kIsSymbolMask)); + __ b(eq, possible_strings); + + // Both are symbols. We already checked they weren't the same pointer + // so they are not equal. + __ mov(r0, Operand(NOT_EQUAL)); + __ Ret(); + + __ bind(&object_test); + __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(lt, not_both_strings); + __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, not_both_strings); + // If both objects are undetectable, they are equal. Otherwise, they + // are not equal, since they are different objects and an object is not + // equal to undefined. + __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); + __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); + __ and_(r0, r2, Operand(r3)); + __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ Ret(); +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); + __ sub(mask, mask, Operand(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + if (!object_is_smi) { + __ BranchOnSmi(object, &is_smi); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + true); + + STATIC_ASSERT(8 == kDoubleSize); + __ add(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); + __ eor(scratch1, scratch1, Operand(scratch2)); + __ and_(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch1, + number_string_cache, + Operand(scratch1, LSL, kPointerSizeLog2 + 1)); + + Register probe = mask; + __ ldr(probe, + FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + __ BranchOnSmi(probe, not_found); + __ sub(scratch2, object, Operand(kHeapObjectTag)); + __ vldr(d0, scratch2, HeapNumber::kValueOffset); + __ sub(probe, probe, Operand(kHeapObjectTag)); + __ vldr(d1, probe, HeapNumber::kValueOffset); + __ vcmp(d0, d1); + __ vmrs(pc); + __ b(ne, not_found); // The cache did not contain this value. + __ b(&load_result_from_cache); + } else { + __ b(not_found); + } + } + + __ bind(&is_smi); + Register scratch = scratch1; + __ and_(scratch, mask, Operand(object, ASR, 1)); + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + __ add(scratch, + number_string_cache, + Operand(scratch, LSL, kPointerSizeLog2 + 1)); + + // Check if the entry is the smi we are looking for. + Register probe = mask; + __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + __ cmp(object, probe); + __ b(ne, not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ ldr(result, + FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(&Counters::number_to_string_native, + 1, + scratch1, + scratch2); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ ldr(r1, MemOperand(sp, 0)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); + __ add(sp, sp, Operand(1 * kPointerSize)); + __ Ret(); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +void RecordWriteStub::Generate(MacroAssembler* masm) { + __ add(offset_, object_, Operand(offset_)); + __ RecordWriteHelper(object_, offset_, scratch_); + __ Ret(); +} + + +// On entry lhs_ and rhs_ are the values to be compared. +// On exit r0 is 0, positive or negative to indicate the result of +// the comparison. +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles, lhs_not_nan; + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Handle the case where the objects are identical. Either returns the answer + // or goes to slow. Only falls through if the objects were not identical. + EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + + // If either is a Smi (we know that not both are), then they can only + // be strictly equal if the other is a HeapNumber. + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(0, Smi::FromInt(0)); + __ and_(r2, lhs_, Operand(rhs_)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_smis); + // One operand is a smi. EmitSmiNonsmiComparison generates code that can: + // 1) Return the answer. + // 2) Go to slow. + // 3) Fall through to both_loaded_as_doubles. + // 4) Jump to lhs_not_nan. + // In cases 3 and 4 we have found out we were dealing with a number-number + // comparison. If VFP3 is supported the double values of the numbers have + // been loaded into d7 and d6. Otherwise, the double values have been loaded + // into r0, r1, r2, and r3. + EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); + + __ bind(&both_loaded_as_doubles); + // The arguments have been converted to doubles and stored in d6 and d7, if + // VFP3 is supported, or in r0, r1, r2, and r3. + if (CpuFeatures::IsSupported(VFP3)) { + __ bind(&lhs_not_nan); + CpuFeatures::Scope scope(VFP3); + Label no_nan; + // ARMv7 VFP3 instructions to implement double precision comparison. + __ vcmp(d7, d6); + __ vmrs(pc); // Move vector status bits to normal status bits. + Label nan; + __ b(vs, &nan); + __ mov(r0, Operand(EQUAL), LeaveCC, eq); + __ mov(r0, Operand(LESS), LeaveCC, lt); + __ mov(r0, Operand(GREATER), LeaveCC, gt); + __ Ret(); + + __ bind(&nan); + // If one of the sides was a NaN then the v flag is set. Load r0 with + // whatever it takes to make the comparison fail, since comparisons with NaN + // always fail. + if (cc_ == lt || cc_ == le) { + __ mov(r0, Operand(GREATER)); + } else { + __ mov(r0, Operand(LESS)); + } + __ Ret(); + } else { + // Checks for NaN in the doubles we have loaded. Can return the answer or + // fall through if neither is a NaN. Also binds lhs_not_nan. + EmitNanCheck(masm, &lhs_not_nan, cc_); + // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the + // answer. Never falls through. + EmitTwoNonNanDoubleComparison(masm, cc_); + } + + __ bind(¬_smis); + // At this point we know we are dealing with two different objects, + // and neither of them is a Smi. The objects are in rhs_ and lhs_. + if (strict_) { + // This returns non-equal for some object types, or falls through if it + // was not lucky. + EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + } + + Label check_for_symbols; + Label flat_string_check; + // Check for heap-number-heap-number comparison. Can jump to slow case, + // or load both doubles into r0, r1, r2, r3 and jump to the code that handles + // that case. If the inputs are not doubles then jumps to check_for_symbols. + // In this case r2 will contain the type of rhs_. Never falls through. + EmitCheckForTwoHeapNumbers(masm, + lhs_, + rhs_, + &both_loaded_as_doubles, + &check_for_symbols, + &flat_string_check); + + __ bind(&check_for_symbols); + // In the strict case the EmitStrictTwoHeapObjectCompare already took care of + // symbols. + if (cc_ == eq && !strict_) { + // Returns an answer for two symbols or two detectable objects. + // Otherwise jumps to string case or not both strings case. + // Assumes that r2 is the type of rhs_ on entry. + EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + } + + // Check for both being sequential ASCII strings, and inline if that is the + // case. + __ bind(&flat_string_check); + + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); + + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + lhs_, + rhs_, + r2, + r3, + r4, + r5); + // Never falls through to here. + + __ bind(&slow); + + __ Push(lhs_, rhs_); + // Figure out which native to call and setup the arguments. + Builtins::JavaScript native; + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + native = Builtins::COMPARE; + int ncr; // NaN compare result + if (cc_ == lt || cc_ == le) { + ncr = GREATER; + } else { + ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ncr = LESS; + } + __ mov(r0, Operand(Smi::FromInt(ncr))); + __ push(r0); + } + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(native, JUMP_JS); +} + + +// This stub does not handle the inlined cases (Smis, Booleans, undefined). +// The stub returns zero for false, and a non-zero value for true. +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result; + Label not_heap_number; + Register scratch = r7; + + // HeapNumber => false iff +0, -0, or NaN. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, ip); + __ b(¬_heap_number, ne); + + __ sub(ip, tos_, Operand(kHeapObjectTag)); + __ vldr(d1, ip, HeapNumber::kValueOffset); + __ vcmp(d1, 0.0); + __ vmrs(pc); + // "tos_" is a register, and contains a non zero value by default. + // Hence we only need to overwrite "tos_" with zero to return false for + // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. + __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO + __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN + __ Ret(); + + __ bind(¬_heap_number); + + // Check if the value is 'null'. + // 'null' => false. + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos_, ip); + __ b(&false_result, eq); + + // It can be an undetectable object. + // Undetectable => false. + __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset)); + __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); + __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); + __ b(&false_result, eq); + + // JavaScript object => true. + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // Check for string + __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); + // "tos_" is a register and contains a non-zero value. + // Hence we implicitly return true if the greater than + // condition is satisfied. + __ Ret(gt); + + // String value => false iff empty, i.e., length is zero + __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); + // If length is zero, "tos_" contains zero ==> false. + // If length is not zero, "tos_" contains a non-zero value ==> true. + __ Ret(); + + // Return 0 in "tos_" for false . + __ bind(&false_result); + __ mov(tos_, Operand(0, RelocInfo::NONE)); + __ Ret(); +} + + +// We fall into this code if the operands were Smis, but the result was +// not (eg. overflow). We branch into this code (to the not_smi label) if +// the operands were not both Smi. The operands are in r0 and r1. In order +// to call the C-implemented binary fp operation routines we need to end up +// with the double precision floating point operands in r0 and r1 (for the +// value in r1) and r2 and r3 (for the value in r0). +void GenericBinaryOpStub::HandleBinaryOpSlowCases( + MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin) { + Label slow, slow_reverse, do_the_call; + bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; + + ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); + Register heap_number_map = r6; + + if (ShouldGenerateSmiCode()) { + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // Smi-smi case (overflow). + // Since both are Smis there is no heap number to overwrite, so allocate. + // The new heap number is in r5. r3 and r7 are scratch. + __ AllocateHeapNumber( + r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); + + // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, + // using registers d7 and d6 for the double values. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. + __ mov(r7, Operand(rhs)); + ConvertToDoubleStub stub1(r3, r2, r7, r9); + __ push(lr); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. + __ mov(r7, Operand(lhs)); + ConvertToDoubleStub stub2(r1, r0, r7, r9); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ jmp(&do_the_call); // Tail call. No return. + } + + // We branch here if at least one of r0 and r1 is not a Smi. + __ bind(not_smi); + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + // After this point we have the left hand side in r1 and the right hand side + // in r0. + if (lhs.is(r0)) { + __ Swap(r0, r1, ip); + } + + // The type transition also calculates the answer. + bool generate_code_to_calculate_answer = true; + + if (ShouldGenerateFPCode()) { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + GenerateTypeTransition(masm); // Tail call. + generate_code_to_calculate_answer = false; + break; + + default: + break; + } + } + + if (generate_code_to_calculate_answer) { + Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; + if (mode_ == NO_OVERWRITE) { + // In the case where there is no chance of an overwritable float we may + // as well do the allocation immediately while r0 and r1 are untouched. + __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); + } + + // Move r0 to a double in r2-r3. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_RIGHT) { + __ mov(r5, Operand(r0)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r0 to d7. + __ sub(r7, r0, Operand(kHeapObjectTag)); + __ vldr(d7, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that second double is in r2 and r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r0); + __ bind(&r0_is_smi); + if (mode_ == OVERWRITE_RIGHT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r0 to double in d7. + __ mov(r7, Operand(r0, ASR, kSmiTagSize)); + __ vmov(s15, r7); + __ vcvt_f64_s32(d7, s15); + if (!use_fp_registers) { + __ vmov(r2, r3, d7); + } + } else { + // Write Smi from r0 to r3 and r2 in double format. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub3(r3, r2, r7, r4); + __ push(lr); + __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. + // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. + Label r1_is_not_smi; + if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { + __ tst(r1, Operand(kSmiTagMask)); + __ b(ne, &r1_is_not_smi); + GenerateTypeTransition(masm); // Tail call. + } + + __ bind(&finished_loading_r0); + + // Move r1 to a double in r0-r1. + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. + __ bind(&r1_is_not_smi); + __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + if (mode_ == OVERWRITE_LEFT) { + __ mov(r5, Operand(r1)); // Overwrite this heap number. + } + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // Load the double from tagged HeapNumber r1 to d6. + __ sub(r7, r1, Operand(kHeapObjectTag)); + __ vldr(d6, r7, HeapNumber::kValueOffset); + } else { + // Calling convention says that first double is in r0 and r1. + __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); + } + __ jmp(&finished_loading_r1); + __ bind(&r1_is_smi); + if (mode_ == OVERWRITE_LEFT) { + // We can't overwrite a Smi so get address of new heap number into r5. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // Convert smi in r1 to double in d6. + __ mov(r7, Operand(r1, ASR, kSmiTagSize)); + __ vmov(s13, r7); + __ vcvt_f64_s32(d6, s13); + if (!use_fp_registers) { + __ vmov(r0, r1, d6); + } + } else { + // Write Smi from r1 to r1 and r0 in double format. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub4(r1, r0, r7, r9); + __ push(lr); + __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + + __ bind(&finished_loading_r1); + } + + if (generate_code_to_calculate_answer || do_the_call.is_linked()) { + __ bind(&do_the_call); + // If we are inlining the operation using VFP3 instructions for + // add, subtract, multiply, or divide, the arguments are in d6 and d7. + if (use_fp_registers) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement + // double precision, add, subtract, multiply, divide. + + if (Token::MUL == op_) { + __ vmul(d5, d6, d7); + } else if (Token::DIV == op_) { + __ vdiv(d5, d6, d7); + } else if (Token::ADD == op_) { + __ vadd(d5, d6, d7); + } else if (Token::SUB == op_) { + __ vsub(d5, d6, d7); + } else { + UNREACHABLE(); + } + __ sub(r0, r5, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ add(r0, r0, Operand(kHeapObjectTag)); + __ mov(pc, lr); + } else { + // If we did not inline the operation, then the arguments are in: + // r0: Left value (least significant part of mantissa). + // r1: Left value (sign, exponent, top of mantissa). + // r2: Right value (least significant part of mantissa). + // r3: Right value (sign, exponent, top of mantissa). + // r5: Address of heap number for result. + + __ push(lr); // For later. + __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. + // Call C routine that may not cause GC or other trouble. r5 is callee + // save. + __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); + // Store answer in the overwritable heap number. + #if !defined(USE_ARM_EABI) + // Double returned in fp coprocessor register 0 and 1, encoded as + // register cr8. Offsets must be divisible by 4 for coprocessor so we + // need to substract the tag from r5. + __ sub(r4, r5, Operand(kHeapObjectTag)); + __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); + #else + // Double returned in registers 0 and 1. + __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); + #endif + __ mov(r0, Operand(r5)); + // And we are done. + __ pop(pc); + } + } + } + + if (!generate_code_to_calculate_answer && + !slow_reverse.is_linked() && + !slow.is_linked()) { + return; + } + + if (lhs.is(r0)) { + __ b(&slow); + __ bind(&slow_reverse); + __ Swap(r0, r1, ip); + } + + heap_number_map = no_reg; // Don't use this any more from here on. + + // We jump to here if something goes wrong (one param is not a number of any + // sort or new-space allocation fails). + __ bind(&slow); + + // Push arguments to the stack + __ Push(r1, r0); + + if (Token::ADD == op_) { + // Test for string arguments before calling runtime. + // r1 : first argument + // r0 : second argument + // sp[0] : second argument + // sp[4] : first argument + + Label not_strings, not_string1, string1, string1_smi2; + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, ¬_string1); + __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_string1); + + // First argument is a a string, test second. + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &string1_smi2); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, &string1); + + // First and second argument are strings. + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + __ TailCallStub(&string_add_stub); + + __ bind(&string1_smi2); + // First argument is a string, second is a smi. Try to lookup the number + // string for the smi in the number string cache. + NumberToStringStub::GenerateLookupNumberStringCache( + masm, r0, r2, r4, r5, r6, true, &string1); + + // Replace second argument on stack and tailcall string add stub to make + // the result. + __ str(r2, MemOperand(sp, 0)); + __ TailCallStub(&string_add_stub); + + // Only first argument is a string. + __ bind(&string1); + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); + + // First argument was not a string, test second. + __ bind(¬_string1); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, ¬_strings); + __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, ¬_strings); + + // Only second argument is a string. + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); + + __ bind(¬_strings); + } + + __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. +} + + +// For bitwise ops where the inputs are not both Smis we here try to determine +// whether both inputs are either Smis or at least heap numbers that can be +// represented by a 32 bit signed value. We truncate towards zero as required +// by the ES spec. If this is the case we do the bitwise op and see if the +// result is a Smi. If so, great, otherwise we try to find a heap number to +// write the answer into (either by allocating or by overwriting). +// On entry the operands are in lhs and rhs. On exit the answer is in r0. +void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs) { + Label slow, result_not_a_smi; + Label rhs_is_smi, lhs_is_smi; + Label done_checking_rhs, done_checking_lhs; + + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + __ ConvertToInt32(lhs, r3, r5, r4, &slow); + __ jmp(&done_checking_lhs); + __ bind(&lhs_is_smi); + __ mov(r3, Operand(lhs, ASR, 1)); + __ bind(&done_checking_lhs); + + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. + __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); + __ cmp(r4, heap_number_map); + __ b(ne, &slow); + __ ConvertToInt32(rhs, r2, r5, r4, &slow); + __ jmp(&done_checking_rhs); + __ bind(&rhs_is_smi); + __ mov(r2, Operand(rhs, ASR, 1)); + __ bind(&done_checking_rhs); + + ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); + + // r0 and r1: Original operands (Smi or heap numbers). + // r2 and r3: Signed int32 operands. + switch (op_) { + case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; + case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; + case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; + case Token::SAR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, ASR, r2)); + break; + case Token::SHR: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSR, r2), SetCC); + // SHR is special because it is required to produce a positive answer. + // The code below for writing into heap numbers isn't capable of writing + // the register as an unsigned int so we go to slow case if we hit this + // case. + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + __ b(mi, &slow); + } + break; + case Token::SHL: + // Use only the 5 least significant bits of the shift count. + __ and_(r2, r2, Operand(0x1f)); + __ mov(r2, Operand(r3, LSL, r2)); + break; + default: UNREACHABLE(); + } + // check that the *signed* result fits in a smi + __ add(r3, r2, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); + __ Ret(); + + Label have_to_allocate, got_a_heap_number; + __ bind(&result_not_a_smi); + switch (mode_) { + case OVERWRITE_RIGHT: { + __ tst(rhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(rhs)); + break; + } + case OVERWRITE_LEFT: { + __ tst(lhs, Operand(kSmiTagMask)); + __ b(eq, &have_to_allocate); + __ mov(r5, Operand(lhs)); + break; + } + case NO_OVERWRITE: { + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + } + default: break; + } + __ bind(&got_a_heap_number); + // r2: Answer as signed int32. + // r5: Heap number to write answer into. + + // Nothing can go wrong now, so move the heap number to r0, which is the + // result. + __ mov(r0, Operand(r5)); + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r2); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r3, HeapNumber::kValueOffset); + __ Ret(); + } else { + // Tail call that writes the int32 in r2 to the heap number in r0, using + // r3 as scratch. r0 is preserved and returned. + WriteInt32ToHeapNumberStub stub(r2, r0, r3); + __ TailCallStub(&stub); + } + + if (mode_ != NO_OVERWRITE) { + __ bind(&have_to_allocate); + // Get a new heap number in r5. r4 and r7 are scratch. + __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); + __ jmp(&got_a_heap_number); + } + + // If all else failed then we go to the runtime system. + __ bind(&slow); + __ Push(lhs, rhs); // Restore stack. + switch (op_) { + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_JS); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_JS); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_JS); + break; + default: + UNREACHABLE(); + } +} + + + + +// This function takes the known int in a register for the cases +// where it doesn't know a good trick, and may deliver +// a result that needs shifting. +static void MultiplyByKnownIntInStub( + MacroAssembler* masm, + Register result, + Register source, + Register known_int_register, // Smi tagged. + int known_int, + int* required_shift) { // Including Smi tag shift + switch (known_int) { + case 3: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 1; + break; + case 5: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 1; + break; + case 6: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 2; + break; + case 7: + __ rsb(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 9: + __ add(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 10: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 2; + break; + default: + ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. + __ mul(result, source, known_int_register); + *required_shift = 0; + } +} + + +// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 +// trick. See http://en.wikipedia.org/wiki/Divisibility_rule +// Takes the sum of the digits base (mask + 1) repeatedly until we have a +// number from 0 to mask. On exit the 'eq' condition flags are set if the +// answer is exactly the mask. +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); +} + + +void IntegerModStub::DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry) { + ASSERT(mask > 0); + ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. + Label loop; + __ bind(&loop); + __ bic(scratch, lhs, Operand(mask)); + __ and_(ip, lhs, Operand(mask)); + __ add(lhs, ip, Operand(lhs, LSR, shift1)); + __ add(lhs, lhs, Operand(scratch, LSR, shift2)); + __ bind(entry); + __ cmp(lhs, Operand(mask)); + __ b(gt, &loop); +} + + +// Splits the number into two halves (bottom half has shift bits). The top +// half is subtracted from the bottom half. If the result is negative then +// rhs is added. +void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs) { + int mask = (1 << shift) - 1; + __ and_(ip, lhs, Operand(mask)); + __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); + __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); +} + + +void IntegerModStub::ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator) { + int limit = denominator; + while (limit * 2 <= max) limit *= 2; + while (limit >= denominator) { + __ cmp(lhs, Operand(limit)); + __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); + limit >>= 1; + } +} + + +void IntegerModStub::ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits) { + __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); + __ Ret(); +} + + +// See comment for class. +void IntegerModStub::Generate(MacroAssembler* masm) { + __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); + __ bic(odd_number_, odd_number_, Operand(1)); + __ mov(odd_number_, Operand(odd_number_, LSL, 1)); + // We now have (odd_number_ - 1) * 2 in the register. + // Build a switch out of branches instead of data because it avoids + // having to teach the assembler about intra-code-object pointers + // that are not in relative branch instructions. + Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; + Label mod21, mod23, mod25; + { Assembler::BlockConstPoolScope block_const_pool(masm); + __ add(pc, pc, Operand(odd_number_)); + // When you read pc it is always 8 ahead, but when you write it you always + // write the actual value. So we put in two nops to take up the slack. + __ nop(); + __ nop(); + __ b(&mod3); + __ b(&mod5); + __ b(&mod7); + __ b(&mod9); + __ b(&mod11); + __ b(&mod13); + __ b(&mod15); + __ b(&mod17); + __ b(&mod19); + __ b(&mod21); + __ b(&mod23); + __ b(&mod25); + } + + // For each denominator we find a multiple that is almost only ones + // when expressed in binary. Then we do the sum-of-digits trick for + // that number. If the multiple is not 1 then we have to do a little + // more work afterwards to get the answer into the 0-denominator-1 + // range. + DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. + __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. + ModGetInRangeBySubtraction(masm, lhs_, 2, 5); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. + __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. + ModGetInRangeBySubtraction(masm, lhs_, 3, 9); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. + ModReduce(masm, lhs_, 0x3f, 11); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. + ModReduce(masm, lhs_, 0xff, 13); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. + __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. + ModGetInRangeBySubtraction(masm, lhs_, 4, 17); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. + ModReduce(masm, lhs_, 0xff, 19); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. + ModReduce(masm, lhs_, 0x3f, 21); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. + ModReduce(masm, lhs_, 0xff, 23); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); + + DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. + ModReduce(masm, lhs_, 0x7f, 25); + ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + // lhs_ : x + // rhs_ : y + // r0 : result + + Register result = r0; + Register lhs = lhs_; + Register rhs = rhs_; + + // This code can't cope with other register allocations yet. + ASSERT(result.is(r0) && + ((lhs.is(r0) && rhs.is(r1)) || + (lhs.is(r1) && rhs.is(r0)))); + + Register smi_test_reg = r7; + Register scratch = r9; + + // All ops need to know whether we are dealing with two Smis. Set up + // smi_test_reg to tell us that. + if (ShouldGenerateSmiCode()) { + __ orr(smi_test_reg, lhs, Operand(rhs)); + } + + switch (op_) { + case Token::ADD: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r0, Operand(r1)); // Revert optimistic add. + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); + break; + } + + case Token::SUB: { + Label not_smi; + // Fast path. + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, ¬_smi); + if (lhs.is(r1)) { + __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. + } else { + __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. + // Return if no overflow. + __ Ret(vc); + __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. + } + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); + break; + } + + case Token::MUL: { + Label not_smi, slow; + if (ShouldGenerateSmiCode()) { + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ b(ne, ¬_smi); + // Remove tag from one operand (but keep sign), so that result is Smi. + __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); + // Do multiplication + // scratch = lower 32 bits of ip * lhs. + __ smull(scratch, scratch2, lhs, ip); + // Go slow on overflows (overflow bit is not set). + __ mov(ip, Operand(scratch, ASR, 31)); + // No overflow if higher 33 bits are identical. + __ cmp(ip, Operand(scratch2)); + __ b(ne, &slow); + // Go slow on zero result to handle -0. + __ tst(scratch, Operand(scratch)); + __ mov(result, Operand(scratch), LeaveCC, ne); + __ Ret(ne); + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(scratch2, rhs, Operand(lhs), SetCC); + __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ Ret(pl); // Return Smi 0 if the non-zero one was positive. + // Slow case. We fall through here if we multiplied a negative number + // with 0, because that would mean we should produce -0. + __ bind(&slow); + } + HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); + break; + } + + case Token::DIV: + case Token::MOD: { + Label not_smi; + if (ShouldGenerateSmiCode() && specialized_on_rhs_) { + Label lhs_is_unsuitable; + __ BranchOnNotSmi(lhs, ¬_smi); + if (IsPowerOf2(constant_rhs_)) { + if (op_ == Token::MOD) { + __ and_(rhs, + lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), + SetCC); + // We now have the answer, but if the input was negative we also + // have the sign bit. Our work is done if the result is + // positive or zero: + if (!rhs.is(r0)) { + __ mov(r0, rhs, LeaveCC, pl); + } + __ Ret(pl); + // A mod of a negative left hand side must return a negative number. + // Unfortunately if the answer is 0 then we must return -0. And we + // already optimistically trashed rhs so we may need to restore it. + __ eor(rhs, rhs, Operand(0x80000000u), SetCC); + // Next two instructions are conditional on the answer being -0. + __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); + __ b(eq, &lhs_is_unsuitable); + // We need to subtract the dividend. Eg. -3 % 4 == -3. + __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); + } else { + ASSERT(op_ == Token::DIV); + __ tst(lhs, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); + __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. + int shift = 0; + int d = constant_rhs_; + while ((d & 1) == 0) { + d >>= 1; + shift++; + } + __ mov(r0, Operand(lhs, LSR, shift)); + __ bic(r0, r0, Operand(kSmiTagMask)); + } + } else { + // Not a power of 2. + __ tst(lhs, Operand(0x80000000u)); + __ b(ne, &lhs_is_unsuitable); + // Find a fixed point reciprocal of the divisor so we can divide by + // multiplying. + double divisor = 1.0 / constant_rhs_; + int shift = 32; + double scale = 4294967296.0; // 1 << 32. + uint32_t mul; + // Maximise the precision of the fixed point reciprocal. + while (true) { + mul = static_cast(scale * divisor); + if (mul >= 0x7fffffff) break; + scale *= 2.0; + shift++; + } + mul++; + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + __ mov(scratch2, Operand(mul)); + __ umull(scratch, scratch2, scratch2, lhs); + __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); + // scratch2 is lhs / rhs. scratch2 is not Smi tagged. + // rhs is still the known rhs. rhs is Smi tagged. + // lhs is still the unkown lhs. lhs is Smi tagged. + int required_scratch_shift = 0; // Including the Smi tag shift of 1. + // scratch = scratch2 * rhs. + MultiplyByKnownIntInStub(masm, + scratch, + scratch2, + rhs, + constant_rhs_, + &required_scratch_shift); + // scratch << required_scratch_shift is now the Smi tagged rhs * + // (lhs / rhs) where / indicates integer division. + if (op_ == Token::DIV) { + __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); + __ b(ne, &lhs_is_unsuitable); // There was a remainder. + __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); + } else { + ASSERT(op_ == Token::MOD); + __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); + } + } + __ Ret(); + __ bind(&lhs_is_unsuitable); + } else if (op_ == Token::MOD && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS) { + // Do generate a bit of smi code for modulus even though the default for + // modulus is not to do it, but as the ARM processor has no coprocessor + // support for modulus checking for smis makes sense. We can handle + // 1 to 25 times any power of 2. This covers over half the numbers from + // 1 to 100 including all of the first 25. (Actually the constants < 10 + // are handled above by reciprocal multiplication. We only get here for + // those cases if the right hand side is not a constant or for cases + // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod + // stub.) + Label slow; + Label not_power_of_2; + ASSERT(!ShouldGenerateSmiCode()); + STATIC_ASSERT(kSmiTag == 0); // Adjust code below. + // Check for two positive smis. + __ orr(smi_test_reg, lhs, Operand(rhs)); + __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); + __ b(ne, &slow); + // Check that rhs is a power of two and not zero. + Register mask_bits = r3; + __ sub(scratch, rhs, Operand(1), SetCC); + __ b(mi, &slow); + __ and_(mask_bits, rhs, Operand(scratch), SetCC); + __ b(ne, ¬_power_of_2); + // Calculate power of two modulus. + __ and_(result, lhs, Operand(scratch)); + __ Ret(); + + __ bind(¬_power_of_2); + __ eor(scratch, scratch, Operand(mask_bits)); + // At least two bits are set in the modulus. The high one(s) are in + // mask_bits and the low one is scratch + 1. + __ and_(mask_bits, scratch, Operand(lhs)); + Register shift_distance = scratch; + scratch = no_reg; + + // The rhs consists of a power of 2 multiplied by some odd number. + // The power-of-2 part we handle by putting the corresponding bits + // from the lhs in the mask_bits register, and the power in the + // shift_distance register. Shift distance is never 0 due to Smi + // tagging. + __ CountLeadingZeros(r4, shift_distance, shift_distance); + __ rsb(shift_distance, r4, Operand(32)); + + // Now we need to find out what the odd number is. The last bit is + // always 1. + Register odd_number = r4; + __ mov(odd_number, Operand(rhs, LSR, shift_distance)); + __ cmp(odd_number, Operand(25)); + __ b(gt, &slow); + + IntegerModStub stub( + result, shift_distance, odd_number, mask_bits, lhs, r5); + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. + + __ bind(&slow); + } + HandleBinaryOpSlowCases( + masm, + ¬_smi, + lhs, + rhs, + op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); + break; + } + + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHR: + case Token::SHL: { + Label slow; + STATIC_ASSERT(kSmiTag == 0); // adjust code below + __ tst(smi_test_reg, Operand(kSmiTagMask)); + __ b(ne, &slow); + Register scratch2 = smi_test_reg; + smi_test_reg = no_reg; + switch (op_) { + case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; + case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; + case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; + case Token::SAR: + // Remove tags from right operand. + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(result, Operand(lhs, ASR, scratch2)); + // Smi tag result. + __ bic(result, result, Operand(kSmiTagMask)); + break; + case Token::SHR: + // Remove tags from operands. We can't do this on a 31 bit number + // because then the 0s get shifted into bit 30 instead of bit 31. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSR, scratch2)); + // Unsigned shift is not allowed to produce a negative number, so + // check the sign bit and the sign bit after Smi tagging. + __ tst(scratch, Operand(0xc0000000)); + __ b(ne, &slow); + // Smi tag result. + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + case Token::SHL: + // Remove tags from operands. + __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x + __ GetLeastBitsFromSmi(scratch2, rhs, 5); + __ mov(scratch, Operand(scratch, LSL, scratch2)); + // Check that the signed result fits in a Smi. + __ add(scratch2, scratch, Operand(0x40000000), SetCC); + __ b(mi, &slow); + __ mov(result, Operand(scratch, LSL, kSmiTagSize)); + break; + default: UNREACHABLE(); + } + __ Ret(); + __ bind(&slow); + HandleNonSmiBitwiseOp(masm, lhs, rhs); + break; + } + + default: UNREACHABLE(); + } + // This code should be unreachable. + __ stop("Unreachable"); + + // Generate an unreachable reference to the DEFAULT stub so that it can be + // found at the end of this stub when clearing ICs at GC. + // TODO(kaznacheev): Check performance impact and get rid of this. + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); + __ CallStub(&uninit); + } +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + Label get_result; + + __ Push(r1, r0); + + __ mov(r2, Operand(Smi::FromInt(MinorKey()))); + __ mov(r1, Operand(Smi::FromInt(op_))); + __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); + __ Push(r2, r1, r0); + + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Argument is a number and is on stack and in r0. + Label runtime_call; + Label input_not_smi; + Label loaded; + + if (CpuFeatures::IsSupported(VFP3)) { + // Load argument and check if it is a smi. + __ BranchOnNotSmi(r0, &input_not_smi); + + CpuFeatures::Scope scope(VFP3); + // Input is a smi. Convert to double and load the low and high words + // of the double into r2, r3. + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ b(&loaded); + + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ CheckMap(r0, + r1, + Heap::kHeapNumberMapRootIndex, + &runtime_call, + true); + // Input is a HeapNumber. Load it to a double register and store the + // low and high words into r2, r3. + __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); + + __ bind(&loaded); + // r2 = low 32 bits of double value + // r3 = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ eor(r1, r2, Operand(r3)); + __ eor(r1, r1, Operand(r1, ASR, 16)); + __ eor(r1, r1, Operand(r1, ASR, 8)); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); + + // r2 = low 32 bits of double value. + // r3 = high 32 bits of double value. + // r1 = TranscendentalCache::hash(double value). + __ mov(r0, + Operand(ExternalReference::transcendental_cache_array_address())); + // r0 points to cache array. + __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); + // r0 points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ b(eq, &runtime_call); + +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { TranscendentalCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } +#endif + + // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. + __ add(r1, r1, Operand(r1, LSL, 1)); + __ add(r0, r0, Operand(r1, LSL, 2)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); + __ cmp(r2, r4); + __ b(ne, &runtime_call); + __ cmp(r3, r5); + __ b(ne, &runtime_call); + // Cache hit. Load result, pop argument and return. + __ mov(r0, Operand(r6)); + __ pop(); + __ Ret(); + } + + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + // Do tail-call to runtime routine. Runtime routines expect at least one + // argument, so give it a Smi. + __ mov(r0, Operand(Smi::FromInt(0))); + __ push(r0); + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); + + __ StubReturn(1); +} + + +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; + + Register heap_number_map = r6; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + + if (op_ == Token::SUB) { + // Check whether the value is a smi. + Label try_float; + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &try_float); + + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + if (negative_zero_ == kStrictNegativeZero) { + // If we have to check for zero, then we can check for the max negative + // smi while we are at it. + __ bic(ip, r0, Operand(0x80000000), SetCC); + __ b(eq, &slow); + __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + __ StubReturn(1); + } else { + // The value of the expression is a smi and 0 is OK for -0. Try + // optimistic subtraction '0 - value'. + __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC); + __ StubReturn(1, vc); + // We don't have to reverse the optimistic neg since the only case + // where we fall through is the minimum negative Smi, which is the case + // where the neg leaves the register unchanged. + __ jmp(&slow); // Go slow on max negative Smi. + } + + __ bind(&try_float); + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + // r0 is a heap number. Get a new heap number in r1. + if (overwrite_ == UNARY_OVERWRITE) { + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + } else { + __ AllocateHeapNumber(r1, r2, r3, r6, &slow); + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. + __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); + __ mov(r0, Operand(r1)); + } + } else if (op_ == Token::BIT_NOT) { + // Check if the operand is a heap number. + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmp(r1, heap_number_map); + __ b(ne, &slow); + + // Convert the heap number is r0 to an untagged integer in r1. + __ ConvertToInt32(r0, r1, r2, r3, &slow); + + // Do the bitwise operation (move negated) and check if the result + // fits in a smi. + Label try_float; + __ mvn(r1, Operand(r1)); + __ add(r2, r1, Operand(0x40000000), SetCC); + __ b(mi, &try_float); + __ mov(r0, Operand(r1, LSL, kSmiTagSize)); + __ b(&done); + + __ bind(&try_float); + if (!overwrite_ == UNARY_OVERWRITE) { + // Allocate a fresh heap number, but don't overwrite r0 until + // we're sure we can do it without going through the slow case + // that needs the value in r0. + __ AllocateHeapNumber(r2, r3, r4, r6, &slow); + __ mov(r0, Operand(r2)); + } + + if (CpuFeatures::IsSupported(VFP3)) { + // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ sub(r2, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r2, HeapNumber::kValueOffset); + } else { + // WriteInt32ToHeapNumberStub does not trigger GC, so we do not + // have to set up a frame. + WriteInt32ToHeapNumberStub stub(r1, r0, r2); + __ push(lr); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + } else { + UNIMPLEMENTED(); + } + + __ bind(&done); + __ StubReturn(1); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ push(r0); + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); + break; + default: + UNREACHABLE(); + } +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + // r0 holds the exception. + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); + __ ldr(sp, MemOperand(r3)); + + // Restore the next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(r2); + __ str(r2, MemOperand(r3)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + __ cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ pop(pc); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop sp to the top stack handler. + __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); + __ ldr(sp, MemOperand(r3)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + __ bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + __ ldr(r2, MemOperand(sp, kStateOffset)); + __ cmp(r2, Operand(StackHandler::ENTRY)); + __ b(eq, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + __ ldr(sp, MemOperand(sp, kNextOffset)); + __ jmp(&loop); + __ bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(r2); + __ str(r2, MemOperand(r3)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + __ mov(r0, Operand(false)); + __ mov(r2, Operand(external_caught)); + __ str(r0, MemOperand(r2)); + + // Set pending exception and r0 to out of memory exception. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ mov(r0, Operand(reinterpret_cast(out_of_memory))); + __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r0, MemOperand(r2)); + } + + // Stack layout at this point. See also StackHandlerConstants. + // sp -> state (ENTRY) + // fp + // lr + + // Discard handler state (r2 is not used) and restore frame pointer. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); + __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of a + // JS entry frame. + __ cmp(fp, Operand(0, RelocInfo::NONE)); + // Set cp to NULL if fp is NULL. + __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); + // Restore cp otherwise. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ pop(pc); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate, + int frame_alignment_skew) { + // r0: result parameter for PerformGC, if any + // r4: number of arguments including receiver (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + // r6: pointer to the first argument (C callee-saved) + + if (do_gc) { + // Passing r0. + __ PrepareCallCFunction(1, r1); + __ CallCFunction(ExternalReference::perform_gc_function(), 1); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate) { + __ mov(r0, Operand(scope_depth)); + __ ldr(r1, MemOperand(r0)); + __ add(r1, r1, Operand(1)); + __ str(r1, MemOperand(r0)); + } + + // Call C built-in. + // r0 = argc, r1 = argv + __ mov(r0, Operand(r4)); + __ mov(r1, Operand(r6)); + + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + int frame_alignment_mask = frame_alignment - 1; +#if defined(V8_HOST_ARCH_ARM) + if (FLAG_debug_code) { + if (frame_alignment > kPointerSize) { + Label alignment_as_expected; + ASSERT(IsPowerOf2(frame_alignment)); + __ sub(r2, sp, Operand(frame_alignment_skew)); + __ tst(r2, Operand(frame_alignment_mask)); + __ b(eq, &alignment_as_expected); + // Don't use Check here, as it will call Runtime_Abort re-entering here. + __ stop("Unexpected alignment"); + __ bind(&alignment_as_expected); + } + } +#endif + + // Just before the call (jump) below lr is pushed, so the actual alignment is + // adding one to the current skew. + int alignment_before_call = + (frame_alignment_skew + kPointerSize) & frame_alignment_mask; + if (alignment_before_call > 0) { + // Push until the alignment before the call is met. + __ mov(r2, Operand(0, RelocInfo::NONE)); + for (int i = alignment_before_call; + (i & frame_alignment_mask) != 0; + i += kPointerSize) { + __ push(r2); + } + } + + // TODO(1242173): To let the GC traverse the return address of the exit + // frames, we need to know where the return address is. Right now, + // we push it on the stack to be able to find it again, but we never + // restore from it in case of changes, which makes it impossible to + // support moving the C entry code stub. This should be fixed, but currently + // this is OK because the CEntryStub gets generated so early in the V8 boot + // sequence that it is not moving ever. + masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 + masm->push(lr); + masm->Jump(r5); + + // Restore sp back to before aligning the stack. + if (alignment_before_call > 0) { + __ add(sp, sp, Operand(alignment_before_call)); + } + + if (always_allocate) { + // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 + // though (contain the result). + __ mov(r2, Operand(scope_depth)); + __ ldr(r3, MemOperand(r2)); + __ sub(r3, r3, Operand(1)); + __ str(r3, MemOperand(r2)); + } + + // check for failure result + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); + // Lower 2 bits of r2 are 0 iff r0 has failure tag. + __ add(r2, r0, Operand(1)); + __ tst(r2, Operand(kFailureTagMask)); + __ b(eq, &failure_returned); + + // Exit C frame and return. + // r0:r1: result + // sp: stack pointer + // fp: frame pointer + __ LeaveExitFrame(); + + // check if we should retry or throw exception + Label retry; + __ bind(&failure_returned); + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ b(eq, &retry); + + // Special handling of out of memory exceptions. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ cmp(r0, Operand(reinterpret_cast(out_of_memory))); + __ b(eq, throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r3, MemOperand(ip)); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r0, MemOperand(ip)); + __ str(r3, MemOperand(ip)); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(r0, Operand(Factory::termination_exception())); + __ b(eq, throw_termination_exception); + + // Handle normal exception. + __ jmp(throw_normal_exception); + + __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // Called from JavaScript; parameters are on stack as if calling JS function + // r0: number of arguments including receiver + // r1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + + // Result returned in r0 or r0+r1 by default. + + // NOTE: Invocations of builtins may return failure objects + // instead of a proper result. The builtin entry handles + // this by performing a garbage collection and retrying the + // builtin once. + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(); + + // r4: number of arguments (C callee-saved) + // r5: pointer to builtin function (C callee-saved) + // r6: pointer to first argument (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false, + -kPointerSize); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false, + 0); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ mov(r0, Operand(reinterpret_cast(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true, + kPointerSize); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // [sp+0]: argv + + Label invoke, exit; + + // Called from C, so do not pop argc and args on exit (preserve sp) + // No need to save register-passed args + // Save callee-saved registers (incl. cp and fp), sp, and lr + __ stm(db_w, sp, kCalleeSaved | lr.bit()); + + // Get address of argv, see stm above. + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv + + // Push a frame with special values setup to mark it as an entry frame. + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // r4: argv + __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ mov(r7, Operand(Smi::FromInt(marker))); + __ mov(r6, Operand(Smi::FromInt(marker))); + __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); + __ ldr(r5, MemOperand(r5)); + __ Push(r8, r7, r6, r5); + + // Setup frame pointer for the frame to be pushed. + __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // Call a faked try-block that does the invoke. + __ bl(&invoke); + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + // Coming in here the fp will be invalid because the PushTryHandler below + // sets it to 0 to signal the existence of the JSEntry frame. + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r0, MemOperand(ip)); + __ mov(r0, Operand(reinterpret_cast(Failure::Exception()))); + __ b(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + // Must preserve r0-r4, r5-r7 are available. + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bl(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + + // Clear any pending exceptions. + __ mov(ip, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r5, MemOperand(ip)); + __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); + __ str(r5, MemOperand(ip)); + + // Invoke the function by calling through JS entry trampoline builtin. + // Notice that we cannot store a reference to the trampoline code directly in + // this stub, because runtime stubs are not traversed when doing GC. + + // Expected registers by Builtins::JSEntryTrampoline + // r0: code entry + // r1: function + // r2: receiver + // r3: argc + // r4: argv + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ mov(ip, Operand(construct_entry)); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ mov(ip, Operand(entry)); + } + __ ldr(ip, MemOperand(ip)); // deref address + + // Branch and link to JSEntryTrampoline. We don't use the double underscore + // macro for the add instruction because we don't want the coverage tool + // inserting instructions here after we read the pc. + __ mov(lr, Operand(pc)); + masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Unlink this frame from the handler chain. When reading the + // address of the next handler, there is no need to use the address + // displacement since the current stack pointer (sp) points directly + // to the stack handler. + __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); + __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); + __ str(r3, MemOperand(ip)); + // No need to restore registers + __ add(sp, sp, Operand(StackHandlerConstants::kSize)); + + + __ bind(&exit); // r0 holds result + // Restore the top frame descriptors from the stack. + __ pop(r3); + __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); + __ str(r3, MemOperand(ip)); + + // Reset the stack to the callee saved registers. + __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); + + // Restore callee-saved registers and return. +#ifdef DEBUG + if (FLAG_debug_code) { + __ mov(lr, Operand(pc)); + } +#endif + __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); +} + + +// This stub performs an instanceof, calling the builtin function if +// necessary. Uses r1 for the object, r0 for the function that it may +// be an instance of (these are fetched from the stack). +void InstanceofStub::Generate(MacroAssembler* masm) { + // Get the object - slow case for smis (we may need to throw an exception + // depending on the rhs). + Label slow, loop, is_instance, is_not_instance; + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); + __ BranchOnSmi(r0, &slow); + + // Check that the left hand is a JS object and put map in r3. + __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); + __ b(gt, &slow); + + // Get the prototype of the function (r4 is result, r2 is scratch). + __ ldr(r1, MemOperand(sp, 0)); + // r1 is function, r3 is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); + __ cmp(r1, ip); + __ b(ne, &miss); + __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); + __ cmp(r3, ip); + __ b(ne, &miss); + __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); + + __ bind(&miss); + __ TryGetFunctionPrototype(r1, r4, r2, &slow); + + // Check that the function prototype is a JS object. + __ BranchOnSmi(r4, &slow); + __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); + __ b(gt, &slow); + + __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); + __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); + + // Register mapping: r3 is object map and r4 is function prototype. + // Get prototype of object into r2. + __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + __ bind(&loop); + __ cmp(r2, Operand(r4)); + __ b(eq, &is_instance); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r2, ip); + __ b(eq, &is_not_instance); + __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ mov(r0, Operand(Smi::FromInt(0))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); // Return. + + __ bind(&is_not_instance); + __ mov(r0, Operand(Smi::FromInt(1))); + __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); + __ pop(); + __ pop(); + __ mov(pc, Operand(lr)); // Return. + + // Slow-case. Tail call builtin. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The displacement is the offset of the last parameter (if any) + // relative to the frame pointer. + static const int kDisplacement = + StandardFrameConstants::kCallerSPOffset - kPointerSize; + + // Check that the key is a smi. + Label slow; + __ BranchOnNotSmi(r1, &slow); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(eq, &adaptor); + + // Check index against formal parameters count limit passed in + // through register r0. Use unsigned comparison to get negative + // check for free. + __ cmp(r1, r0); + __ b(cs, &slow); + + // Read the argument from the stack and return it. + __ sub(r3, r0, r1); + __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r0, MemOperand(r3, kDisplacement)); + __ Jump(lr); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmp(r1, r0); + __ b(cs, &slow); + + // Read the argument from the adaptor frame and return it. + __ sub(r3, r0, r1); + __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(r0, MemOperand(r3, kDisplacement)); + __ Jump(lr); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ push(r1); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // sp[0] : number of parameters + // sp[4] : receiver displacement + // sp[8] : function + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ b(eq, &adaptor_frame); + + // Get the length from the frame. + __ ldr(r1, MemOperand(sp, 0)); + __ b(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ str(r1, MemOperand(sp, 0)); + __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); + __ str(r3, MemOperand(sp, 1 * kPointerSize)); + + // Try the new space allocation. Start out with computing the size + // of the arguments object and the elements array in words. + Label add_arguments_object; + __ bind(&try_allocate); + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(eq, &add_arguments_object); + __ mov(r1, Operand(r1, LSR, kSmiTagSize)); + __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); + __ bind(&add_arguments_object); + __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace( + r1, + r0, + r2, + r3, + &runtime, + static_cast(TAG_OBJECT | SIZE_IN_WORDS)); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); + __ ldr(r4, MemOperand(r4, offset)); + + // Copy the JS object part. + __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); + + // Setup the callee in-object property. + STATIC_ASSERT(Heap::arguments_callee_index == 0); + __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); + __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::arguments_length_index == 1); + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); + __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); + + // If there are no actual arguments, we're done. + Label done; + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(eq, &done); + + // Get the parameters pointer from the stack. + __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); + __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); + __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); + __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); + __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. + + // Copy the fixed array slots. + Label loop; + // Setup r4 to point to the first array slot. + __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); + __ bind(&loop); + // Pre-decrement r2 with kPointerSize on each iteration. + // Pre-decrement in order to skip receiver. + __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); + // Post-increment r4 with kPointerSize on each iteration. + __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); + __ sub(r1, r1, Operand(1)); + __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ b(ne, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } + + // Stack frame on entry. + // sp[0]: last_match_info (expected JSArray) + // sp[4]: previous index + // sp[8]: subject string + // sp[12]: JSRegExp object + + static const int kLastMatchInfoOffset = 0 * kPointerSize; + static const int kPreviousIndexOffset = 1 * kPointerSize; + static const int kSubjectOffset = 2 * kPointerSize; + static const int kJSRegExpOffset = 3 * kPointerSize; + + Label runtime, invoke_regexp; + + // Allocation of registers for this function. These are in callee save + // registers and will be preserved by the call to the native RegExp code, as + // this code is called using the normal C calling convention. When calling + // directly from generated code the native RegExp code will not do a GC and + // therefore the content of these registers are safe to use after the call. + Register subject = r4; + Register regexp_data = r5; + Register last_match_info_elements = r6; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(); + __ mov(r0, Operand(address_of_regexp_stack_memory_size)); + __ ldr(r0, MemOperand(r0, 0)); + __ tst(r0, Operand(r0)); + __ b(eq, &runtime); + + // Check that the first argument is a JSRegExp object. + __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); + __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); + __ b(ne, &runtime); + + // Check that the RegExp has been compiled (data contains a fixed array). + __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ tst(regexp_data, Operand(kSmiTagMask)); + __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); + __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); + __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); + } + + // regexp_data: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); + __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); + __ b(ne, &runtime); + + // regexp_data: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ ldr(r2, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. This + // uses the asumption that smis are 2 * their untagged value. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(r2, r2, Operand(2)); // r2 was a smi. + // Check that the static offsets vector buffer is large enough. + __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); + __ b(hi, &runtime); + + // r2: Number of capture registers + // regexp_data: RegExp data (FixedArray) + // Check that the second argument is a string. + __ ldr(subject, MemOperand(sp, kSubjectOffset)); + __ tst(subject, Operand(kSmiTagMask)); + __ b(eq, &runtime); + Condition is_string = masm->IsObjectStringType(subject, r0); + __ b(NegateCondition(is_string), &runtime); + // Get the length of the string to r3. + __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); + + // r2: Number of capture registers + // r3: Length of subject string as a smi + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the third argument is a positive smi less than the subject + // string length. A negative value will be greater (unsigned comparison). + __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); + __ tst(r0, Operand(kSmiTagMask)); + __ b(ne, &runtime); + __ cmp(r3, Operand(r0)); + __ b(ls, &runtime); + + // r2: Number of capture registers + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check that the fourth object is a JSArray object. + __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &runtime); + __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); + __ b(ne, &runtime); + // Check that the JSArray is in fast case. + __ ldr(last_match_info_elements, + FieldMemOperand(r0, JSArray::kElementsOffset)); + __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(r0, ip); + __ b(ne, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ ldr(r0, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); + __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); + __ b(gt, &runtime); + + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_string; + __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + // First check for flat string. + __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); + STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); + __ b(eq, &seq_string); + + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Check for flat cons string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + STATIC_ASSERT(kExternalStringTag !=0); + STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); + __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); + __ b(ne, &runtime); + __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); + __ LoadRoot(r1, Heap::kEmptyStringRootIndex); + __ cmp(r0, r1); + __ b(ne, &runtime); + __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); + __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); + __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + // Is first part a flat string? + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r0, Operand(kStringRepresentationMask)); + __ b(nz, &runtime); + + __ bind(&seq_string); + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // r0: Instance type of subject string + STATIC_ASSERT(4 == kAsciiStringTag); + STATIC_ASSERT(kTwoByteStringTag == 0); + // Find the code object based on the assumptions above. + __ and_(r0, r0, Operand(kStringEncodingMask)); + __ mov(r3, Operand(r0, ASR, 2), SetCC); + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); + __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); + + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // the hole. + __ CompareObjectType(r7, r0, r0, CODE_TYPE); + __ b(ne, &runtime); + + // r3: encoding of subject string (1 if ascii, 0 if two_byte); + // r7: code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); + __ mov(r1, Operand(r1, ASR, kSmiTagSize)); + + // r1: previous index + // r3: encoding of subject string (1 if ascii, 0 if two_byte); + // r7: code + // subject: Subject string + // regexp_data: RegExp data (FixedArray) + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); + + static const int kRegExpExecuteArguments = 7; + __ push(lr); + __ PrepareCallCFunction(kRegExpExecuteArguments, r0); + + // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. + __ mov(r0, Operand(1)); + __ str(r0, MemOperand(sp, 2 * kPointerSize)); + + // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. + __ mov(r0, Operand(address_of_regexp_stack_memory_address)); + __ ldr(r0, MemOperand(r0, 0)); + __ mov(r2, Operand(address_of_regexp_stack_memory_size)); + __ ldr(r2, MemOperand(r2, 0)); + __ add(r0, r0, Operand(r2)); + __ str(r0, MemOperand(sp, 1 * kPointerSize)); + + // Argument 5 (sp[0]): static offsets vector buffer. + __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); + __ str(r0, MemOperand(sp, 0 * kPointerSize)); + + // For arguments 4 and 3 get string length, calculate start of string data and + // calculate the shift of the index (0 for ASCII and 1 for two byte). + __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); + STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ eor(r3, r3, Operand(1)); + // Argument 4 (r3): End of string data + // Argument 3 (r2): Start of string data + __ add(r2, r9, Operand(r1, LSL, r3)); + __ add(r3, r9, Operand(r0, LSL, r3)); + + // Argument 2 (r1): Previous index. + // Already there + + // Argument 1 (r0): Subject string. + __ mov(r0, subject); + + // Locate the code entry and call it. + __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(r7, kRegExpExecuteArguments); + __ pop(lr); + + // r0: result + // subject: subject string (callee saved) + // regexp_data: RegExp data (callee saved) + // last_match_info_elements: Last match info elements (callee saved) + + // Check the result. + Label success; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); + __ b(eq, &success); + Label failure; + __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); + __ b(eq, &failure); + __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); + // If not exception it can only be retry. Handle that in the runtime system. + __ b(ne, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + __ mov(r0, Operand(ExternalReference::the_hole_value_location())); + __ ldr(r0, MemOperand(r0, 0)); + __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); + __ ldr(r1, MemOperand(r1, 0)); + __ cmp(r0, r1); + __ b(eq, &runtime); + __ bind(&failure); + // For failure and exception return null. + __ mov(r0, Operand(Factory::null_value())); + __ add(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // Process the result from the native regexp code. + __ bind(&success); + __ ldr(r1, + FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(r1, r1, Operand(2)); // r1 was a smi. + + // r1: number of capture registers + // r4: subject string + // Store the capture count. + __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. + __ str(r2, FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastCaptureCountOffset)); + // Store last subject and last input. + __ mov(r3, last_match_info_elements); // Moved up to reduce latency. + __ str(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastSubjectOffset)); + __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); + __ str(subject, + FieldMemOperand(last_match_info_elements, + RegExpImpl::kLastInputOffset)); + __ mov(r3, last_match_info_elements); + __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(); + __ mov(r2, Operand(address_of_static_offsets_vector)); + + // r1: number of capture registers + // r2: offsets vector + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ add(r0, + last_match_info_elements, + Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); + __ bind(&next_capture); + __ sub(r1, r1, Operand(1), SetCC); + __ b(mi, &done); + // Read the value from the static offsets vector buffer. + __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); + // Store the smi value in the last match info. + __ mov(r3, Operand(r3, LSL, kSmiTagSize)); + __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); + __ add(sp, sp, Operand(4 * kPointerSize)); + __ Ret(); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + Label slow; + + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // function, receiver [, arguments] + Label receiver_is_value, receiver_is_js_object; + __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ BranchOnSmi(r1, &receiver_is_value); + + // Check if the receiver is a valid JS object. + __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(ge, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(r1); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); + __ LeaveInternalFrame(); + __ str(r0, MemOperand(sp, argc_ * kPointerSize)); + + __ bind(&receiver_is_js_object); + } + + // Get the function to call from the stack. + // function, receiver [, arguments] + __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); + + // Check that the function is really a JavaScript function. + // r1: pushed function (to be verified) + __ BranchOnSmi(r1, &slow); + // Get the map of the function object. + __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); + __ b(ne, &slow); + + // Fast-case: Invoke the function now. + // r1: pushed function + ParameterCount actual(argc_); + __ InvokeFunction(r1, actual, JUMP_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ str(r1, MemOperand(sp, argc_ * kPointerSize)); + __ mov(r0, Operand(argc_)); // Setup the number of arguments. + __ mov(r2, Operand(0, RelocInfo::NONE)); + __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); + __ Jump(Handle(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), + RelocInfo::CODE_TARGET); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +const char* CompareStub::GetName() { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; + switch (cc_) { + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; + const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; + + const char* strict_name = ""; + if (strict_ && (cc_ == eq || cc_ == ne)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "CompareStub_%s%s%s%s%s%s", + cc_name, + lhs_name, + rhs_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT((static_cast(cc_) >> 28) < (1 << 12)); + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + return ConditionField::encode(static_cast(cc_) >> 28) + | RegisterField::encode(lhs_.is(r0)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + __ BranchOnSmi(object_, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ tst(result_, Operand(kIsNotStringMask)); + __ b(ne, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + __ BranchOnNotSmi(index_, &index_not_smi_); + + // Put smi-tagged index into scratch register. + __ mov(scratch_, index_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); + __ cmp(ip, Operand(scratch_)); + __ b(ls, index_out_of_range_); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result_, Operand(kStringRepresentationMask)); + __ b(eq, &flat_string); + + // Handle non-flat strings. + __ tst(result_, Operand(kIsConsStringMask)); + __ b(eq, &call_runtime_); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); + __ LoadRoot(ip, Heap::kEmptyStringRootIndex); + __ cmp(result_, Operand(ip)); + __ b(ne, &call_runtime_); + // Get the first of the two strings and load its instance type. + __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(result_, Operand(kStringRepresentationMask)); + __ b(nz, &call_runtime_); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ tst(result_, Operand(kStringEncodingMask)); + __ b(nz, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. We can + // add without shifting since the smi tag size is the log2 of the + // number of bytes in a two-byte character. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); + __ add(scratch_, object_, Operand(scratch_)); + __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); + __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); + + __ bind(&got_char_code); + __ mov(result_, Operand(result_, LSL, kSmiTagSize)); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, + scratch_, + Heap::kHeapNumberMapRootIndex, + index_not_number_, + true); + call_helper.BeforeCall(masm); + __ Push(object_, index_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ Move(scratch_, r0); + __ pop(index_); + __ pop(object_); + // Reload the instance type. + __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); + __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + __ BranchOnNotSmi(scratch_, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ Push(object_, index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + __ Move(result_, r0); + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ tst(code_, + Operand(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ b(nz, &slow_case_); + + __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); + // At this point code register contains smi tagged ascii char code. + STATIC_ASSERT(kSmiTag == 0); + __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); + __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(result_, Operand(ip)); + __ b(eq, &slow_case_); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + __ Move(result_, r0); + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersLong adds too much + // overhead. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); + + // Generate code for copying a large number of characters. This function + // is allowed to spend extra time setting up conditions to make copying + // faster. Copying of overlapping regions is not supported. + // Dest register ends at the position after the last character written. + static void GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags); + + + // Probe the symbol table for a two character string. If the string is + // not found by probing a jump to the label not_found is performed. This jump + // does not guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in register r0. + // Contents of both c1 and c2 registers are modified. At the exit c1 is + // guaranteed to contain halfword with low and high bytes equal to + // initial contents of c1 and c2 respectively. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character); + + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + Label loop; + Label done; + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (!ascii) { + __ add(count, count, Operand(count), SetCC); + } else { + __ cmp(count, Operand(0, RelocInfo::NONE)); + } + __ b(eq, &done); + + __ bind(&loop); + __ ldrb(scratch, MemOperand(src, 1, PostIndex)); + // Perform sub between load and dependent store to get the load time to + // complete. + __ sub(count, count, Operand(1), SetCC); + __ strb(scratch, MemOperand(dest, 1, PostIndex)); + // last iteration. + __ b(gt, &loop); + + __ bind(&done); +} + + +enum CopyCharactersFlags { + COPY_ASCII = 1, + DEST_ALWAYS_ALIGNED = 2 +}; + + +void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + int flags) { + bool ascii = (flags & COPY_ASCII) != 0; + bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; + + if (dest_always_aligned && FLAG_debug_code) { + // Check that destination is actually word aligned if the flag says + // that it is. + __ tst(dest, Operand(kPointerAlignmentMask)); + __ Check(eq, "Destination of copy not aligned."); + } + + const int kReadAlignment = 4; + const int kReadAlignmentMask = kReadAlignment - 1; + // Ensure that reading an entire aligned word containing the last character + // of a string will not read outside the allocated area (because we pad up + // to kObjectAlignment). + STATIC_ASSERT(kObjectAlignment >= kReadAlignment); + // Assumes word reads and writes are little endian. + // Nothing to do for zero characters. + Label done; + if (!ascii) { + __ add(count, count, Operand(count), SetCC); + } else { + __ cmp(count, Operand(0, RelocInfo::NONE)); + } + __ b(eq, &done); + + // Assume that you cannot read (or write) unaligned. + Label byte_loop; + // Must copy at least eight bytes, otherwise just do it one byte at a time. + __ cmp(count, Operand(8)); + __ add(count, dest, Operand(count)); + Register limit = count; // Read until src equals this. + __ b(lt, &byte_loop); + + if (!dest_always_aligned) { + // Align dest by byte copying. Copies between zero and three bytes. + __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); + Label dest_aligned; + __ b(eq, &dest_aligned); + __ cmp(scratch4, Operand(2)); + __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); + __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); + __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); + __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); + __ bind(&dest_aligned); + } + + Label simple_loop; + + __ sub(scratch4, dest, Operand(src)); + __ and_(scratch4, scratch4, Operand(0x03), SetCC); + __ b(eq, &simple_loop); + // Shift register is number of bits in a source word that + // must be combined with bits in the next source word in order + // to create a destination word. + + // Complex loop for src/dst that are not aligned the same way. + { + Label loop; + __ mov(scratch4, Operand(scratch4, LSL, 3)); + Register left_shift = scratch4; + __ and_(src, src, Operand(~3)); // Round down to load previous word. + __ ldr(scratch1, MemOperand(src, 4, PostIndex)); + // Store the "shift" most significant bits of scratch in the least + // signficant bits (i.e., shift down by (32-shift)). + __ rsb(scratch2, left_shift, Operand(32)); + Register right_shift = scratch2; + __ mov(scratch1, Operand(scratch1, LSR, right_shift)); + + __ bind(&loop); + __ ldr(scratch3, MemOperand(src, 4, PostIndex)); + __ sub(scratch5, limit, Operand(dest)); + __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); + __ str(scratch1, MemOperand(dest, 4, PostIndex)); + __ mov(scratch1, Operand(scratch3, LSR, right_shift)); + // Loop if four or more bytes left to copy. + // Compare to eight, because we did the subtract before increasing dst. + __ sub(scratch5, scratch5, Operand(8), SetCC); + __ b(ge, &loop); + } + // There is now between zero and three bytes left to copy (negative that + // number is in scratch5), and between one and three bytes already read into + // scratch1 (eight times that number in scratch4). We may have read past + // the end of the string, but because objects are aligned, we have not read + // past the end of the object. + // Find the minimum of remaining characters to move and preloaded characters + // and write those as bytes. + __ add(scratch5, scratch5, Operand(4), SetCC); + __ b(eq, &done); + __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); + // Move minimum of bytes read and bytes left to copy to scratch4. + __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); + // Between one and three (value in scratch5) characters already read into + // scratch ready to write. + __ cmp(scratch5, Operand(2)); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); + __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); + __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); + __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); + // Copy any remaining bytes. + __ b(&byte_loop); + + // Simple loop. + // Copy words from src to dst, until less than four bytes left. + // Both src and dest are word aligned. + __ bind(&simple_loop); + { + Label loop; + __ bind(&loop); + __ ldr(scratch1, MemOperand(src, 4, PostIndex)); + __ sub(scratch3, limit, Operand(dest)); + __ str(scratch1, MemOperand(dest, 4, PostIndex)); + // Compare to 8, not 4, because we do the substraction before increasing + // dest. + __ cmp(scratch3, Operand(8)); + __ b(ge, &loop); + } + + // Copy bytes from src to dst until dst hits limit. + __ bind(&byte_loop); + __ cmp(dest, Operand(limit)); + __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); + __ b(ge, &done); + __ strb(scratch1, MemOperand(dest, 1, PostIndex)); + __ b(&byte_loop); + + __ bind(&done); +} + + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Register scratch5, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ sub(scratch, c1, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + __ b(hi, ¬_array_index); + __ sub(scratch, c2, Operand(static_cast('0'))); + __ cmp(scratch, Operand(static_cast('9' - '0'))); + + // If check failed combine both characters into single halfword. + // This is required by the contract of the method: code at the + // not_found branch expects this combination in c1 register + __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); + __ b(ls, not_found); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + StringHelper::GenerateHashInit(masm, hash, c1); + StringHelper::GenerateHashAddCharacter(masm, hash, c2); + StringHelper::GenerateHashGetHash(masm, hash); + + // Collect the two characters in a register. + Register chars = c1; + __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load symbol table + // Load address of first element of the symbol table. + Register symbol_table = c2; + __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + + // Load undefined value + Register undefined = scratch4; + __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ mov(mask, Operand(mask, ASR, 1)); + __ sub(mask, mask, Operand(1)); + + // Calculate untagged address of the first element of the symbol table. + Register first_symbol_table_element = symbol_table; + __ add(first_symbol_table_element, symbol_table, + Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // mask: capacity mask + // first_symbol_table_element: address of the first element of + // the symbol table + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes]; + for (int i = 0; i < kProbes; i++) { + Register candidate = scratch5; // Scratch register contains candidate. + + // Calculate entry in symbol table. + if (i > 0) { + __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + } else { + __ mov(candidate, hash); + } + + __ and_(candidate, candidate, Operand(mask)); + + // Load the entry from the symble table. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ ldr(candidate, + MemOperand(first_symbol_table_element, + candidate, + LSL, + kPointerSizeLog2)); + + // If entry is undefined no string with this hash can be found. + __ cmp(candidate, undefined); + __ b(eq, not_found); + + // If length is not 2 the string is not a candidate. + __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); + __ cmp(scratch, Operand(Smi::FromInt(2))); + __ b(ne, &next_probe[i]); + + // Check that the candidate is a non-external ascii string. + __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, + &next_probe[i]); + + // Check if the two characters match. + // Assumes that word load is little endian. + __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ cmp(chars, scratch); + __ b(eq, &found_in_symbol_table); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + __ Move(r0, result); +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character) { + // hash = character + (character << 10); + __ add(hash, character, Operand(character, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character) { + // hash += character; + __ add(hash, hash, Operand(character)); + // hash += hash << 10; + __ add(hash, hash, Operand(hash, LSL, 10)); + // hash ^= hash >> 6; + __ eor(hash, hash, Operand(hash, ASR, 6)); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash) { + // hash += hash << 3; + __ add(hash, hash, Operand(hash, LSL, 3)); + // hash ^= hash >> 11; + __ eor(hash, hash, Operand(hash, ASR, 11)); + // hash += hash << 15; + __ add(hash, hash, Operand(hash, LSL, 15), SetCC); + + // if (hash == 0) hash = 27; + __ mov(hash, Operand(27), LeaveCC, nz); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // lr: return address + // sp[0]: to + // sp[4]: from + // sp[8]: string + + // This stub is called from the native-call %_SubString(...), so + // nothing can be assumed about the arguments. It is tested that: + // "string" is a sequential string, + // both "from" and "to" are smis, and + // 0 <= from <= to <= string.length. + // If any of these assumptions fail, we call the runtime system. + + static const int kToOffset = 0 * kPointerSize; + static const int kFromOffset = 1 * kPointerSize; + static const int kStringOffset = 2 * kPointerSize; + + + // Check bounds and smi-ness. + __ ldr(r7, MemOperand(sp, kToOffset)); + __ ldr(r6, MemOperand(sp, kFromOffset)); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + // I.e., arithmetic shift right by one un-smi-tags. + __ mov(r2, Operand(r7, ASR, 1), SetCC); + __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); + // If either r2 or r6 had the smi tag bit set, then carry is set now. + __ b(cs, &runtime); // Either "from" or "to" is not a smi. + __ b(mi, &runtime); // From is negative. + + __ sub(r2, r2, Operand(r3), SetCC); + __ b(mi, &runtime); // Fail if from > to. + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. + __ cmp(r2, Operand(2)); + __ b(lt, &runtime); + + // r2: length + // r3: from index (untaged smi) + // r6: from (smi) + // r7: to (smi) + + // Make sure first argument is a sequential (or flat) string. + __ ldr(r5, MemOperand(sp, kStringOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ tst(r5, Operand(kSmiTagMask)); + __ b(eq, &runtime); + Condition is_string = masm->IsObjectStringType(r5, r1); + __ b(NegateCondition(is_string), &runtime); + + // r1: instance type + // r2: length + // r3: from index (untaged smi) + // r5: string + // r6: from (smi) + // r7: to (smi) + Label seq_string; + __ and_(r4, r1, Operand(kStringRepresentationMask)); + STATIC_ASSERT(kSeqStringTag < kConsStringTag); + STATIC_ASSERT(kConsStringTag < kExternalStringTag); + __ cmp(r4, Operand(kConsStringTag)); + __ b(gt, &runtime); // External strings go to runtime. + __ b(lt, &seq_string); // Sequential strings are handled directly. + + // Cons string. Try to recurse (once) on the first substring. + // (This adds a little more generality than necessary to handle flattened + // cons strings, but not much). + __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); + __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ tst(r1, Operand(kStringRepresentationMask)); + STATIC_ASSERT(kSeqStringTag == 0); + __ b(ne, &runtime); // Cons and External strings go to runtime. + + // Definitly a sequential string. + __ bind(&seq_string); + + // r1: instance type. + // r2: length + // r3: from index (untaged smi) + // r5: string + // r6: from (smi) + // r7: to (smi) + __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); + __ cmp(r4, Operand(r7)); + __ b(lt, &runtime); // Fail if to > length. + + // r1: instance type. + // r2: result string length. + // r3: from index (untaged smi) + // r5: string. + // r6: from offset (smi) + // Check for flat ascii string. + Label non_ascii_flat; + __ tst(r1, Operand(kStringEncodingMask)); + STATIC_ASSERT(kTwoByteStringTag == 0); + __ b(eq, &non_ascii_flat); + + Label result_longer_than_two; + __ cmp(r2, Operand(2)); + __ b(gt, &result_longer_than_two); + + // Sub string of length 2 requested. + // Get the two characters forming the sub string. + __ add(r5, r5, Operand(r3)); + __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); + __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // r2: result string length. + // r3: two characters combined into halfword in little endian byte order. + __ bind(&make_two_character_string); + __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); + __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&result_longer_than_two); + + // Allocate the result. + __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); + + // r0: result string. + // r2: result string length. + // r5: string. + // r6: from offset (smi) + // Locate first character of result. + __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Locate 'from' character of string. + __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r5, r5, Operand(r6, ASR, 1)); + + // r0: result string. + // r1: first character of result string. + // r2: result string length. + // r5: first character of sub string to copy. + STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + COPY_ASCII | DEST_ALWAYS_ALIGNED); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii_flat); + // r2: result string length. + // r5: string. + // r6: from offset (smi) + // Check for flat two byte string. + + // Allocate the result. + __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); + + // r0: result string. + // r2: result string length. + // r5: string. + // Locate first character of result. + __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Locate 'from' character of string. + __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // As "from" is a smi it is 2 times the value which matches the size of a two + // byte character. + __ add(r5, r5, Operand(r6)); + + // r0: result string. + // r1: first character of result. + // r2: result length. + // r5: first character of string to copy. + STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + DEST_ALWAYS_ALIGNED); + __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); + __ add(sp, sp, Operand(3 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4) { + Label compare_lengths; + // Find minimum length and length difference. + __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); + __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); + __ sub(scratch3, scratch1, Operand(scratch2), SetCC); + Register length_delta = scratch3; + __ mov(scratch1, scratch2, LeaveCC, gt); + Register min_length = scratch1; + STATIC_ASSERT(kSmiTag == 0); + __ tst(min_length, Operand(min_length)); + __ b(eq, &compare_lengths); + + // Untag smi. + __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); + + // Setup registers so that we only need to increment one register + // in the loop. + __ add(scratch2, min_length, + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(left, left, Operand(scratch2)); + __ add(right, right, Operand(scratch2)); + // Registers left and right points to the min_length character of strings. + __ rsb(min_length, min_length, Operand(-1)); + Register index = min_length; + // Index starts at -min_length. + + { + // Compare loop. + Label loop; + __ bind(&loop); + // Compare characters. + __ add(index, index, Operand(1), SetCC); + __ ldrb(scratch2, MemOperand(left, index), ne); + __ ldrb(scratch4, MemOperand(right, index), ne); + // Skip to compare lengths with eq condition true. + __ b(eq, &compare_lengths); + __ cmp(scratch2, scratch4); + __ b(eq, &loop); + // Fallthrough with eq condition false. + } + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + ASSERT(Smi::FromInt(EQUAL) == static_cast(0)); + // Use zero length_delta as result. + __ mov(r0, Operand(length_delta), SetCC, eq); + // Fall through to here if characters compare not-equal. + __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); + __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); + __ Ret(); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // sp[0]: right string + // sp[4]: left string + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right + + Label not_same; + __ cmp(r0, r1); + __ b(ne, ¬_same); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ mov(r0, Operand(Smi::FromInt(EQUAL))); + __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(¬_same); + + // Check that both objects are sequential ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); + + // Compare flat ascii strings natively. Remove arguments from stack first. + __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label string_add_runtime; + // Stack on entry: + // sp[0]: second argument. + // sp[4]: first argument. + + // Load the two arguments. + __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (string_check_) { + STATIC_ASSERT(kSmiTag == 0); + __ JumpIfEitherSmi(r0, r1, &string_add_runtime); + // Load instance types. + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + STATIC_ASSERT(kStringTag == 0); + // If either is not a string, go to runtime. + __ tst(r4, Operand(kIsNotStringMask)); + __ tst(r5, Operand(kIsNotStringMask), eq); + __ b(ne, &string_add_runtime); + } + + // Both arguments are strings. + // r0: first string + // r1: second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + { + Label strings_not_empty; + // Check if either of the strings are empty. In that case return the other. + __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); + __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. + __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. + STATIC_ASSERT(kSmiTag == 0); + // Else test if second string is empty. + __ cmp(r3, Operand(Smi::FromInt(0)), ne); + __ b(ne, &strings_not_empty); // If either string was empty, return r0. + + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&strings_not_empty); + } + + __ mov(r2, Operand(r2, ASR, kSmiTagSize)); + __ mov(r3, Operand(r3, ASR, kSmiTagSize)); + // Both strings are non-empty. + // r0: first string + // r1: second string + // r2: length of first string + // r3: length of second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + // Look at the length of the result of adding the two strings. + Label string_add_flat_result, longer_than_two; + // Adding two lengths can't overflow. + STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); + __ add(r6, r2, Operand(r3)); + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. + __ cmp(r6, Operand(2)); + __ b(ne, &longer_than_two); + + // Check that both strings are non-external ascii strings. + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, + &string_add_runtime); + + // Get the two characters forming the sub string. + __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&make_two_character_string); + // Resulting string has length 2 and first chars of two strings + // are combined into single halfword in r2 register. + // So we can fill resulting string without two loops by a single + // halfword store instruction (which assumes that processor is + // in a little endian mode) + __ mov(r6, Operand(2)); + __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); + __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ cmp(r6, Operand(String::kMinNonFlatLength)); + __ b(lt, &string_add_flat_result); + // Handle exceptionally long strings in the runtime system. + STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); + ASSERT(IsPowerOf2(String::kMaxLength + 1)); + // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. + __ cmp(r6, Operand(String::kMaxLength + 1)); + __ b(hs, &string_add_runtime); + + // If result is not supposed to be flat, allocate a cons string object. + // If both strings are ascii the result is an ascii cons string. + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + Label non_ascii, allocated, ascii_data; + STATIC_ASSERT(kTwoByteStringTag == 0); + __ tst(r4, Operand(kStringEncodingMask)); + __ tst(r5, Operand(kStringEncodingMask), ne); + __ b(eq, &non_ascii); + + // Allocate an ASCII cons string. + __ bind(&ascii_data); + __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); + __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // r4: first instance type. + // r5: second instance type. + __ tst(r4, Operand(kAsciiDataHintMask)); + __ tst(r5, Operand(kAsciiDataHintMask), ne); + __ b(ne, &ascii_data); + __ eor(r4, r4, Operand(r5)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ b(eq, &ascii_data); + + // Allocate a two byte cons string. + __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); + __ jmp(&allocated); + + // Handle creating a flat result. First check that both strings are + // sequential and that they have the same encoding. + // r0: first string + // r1: second string + // r2: length of first string + // r3: length of second string + // r4: first string instance type (if string_check_) + // r5: second string instance type (if string_check_) + // r6: sum of lengths. + __ bind(&string_add_flat_result); + if (!string_check_) { + __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); + __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); + } + // Check that both strings are sequential. + STATIC_ASSERT(kSeqStringTag == 0); + __ tst(r4, Operand(kStringRepresentationMask)); + __ tst(r5, Operand(kStringRepresentationMask), eq); + __ b(ne, &string_add_runtime); + // Now check if both strings have the same encoding (ASCII/Two-byte). + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: sum of lengths.. + Label non_ascii_string_add_flat_result; + ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. + __ eor(r7, r4, Operand(r5)); + __ tst(r7, Operand(kStringEncodingMask)); + __ b(ne, &string_add_runtime); + // And see if it's ASCII or two-byte. + __ tst(r4, Operand(kStringEncodingMask)); + __ b(eq, &non_ascii_string_add_flat_result); + + // Both strings are sequential ASCII strings. We also know that they are + // short (since the sum of the lengths is less than kMinNonFlatLength). + // r6: length of resulting flat string + __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); + // Locate first character of result. + __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument. + __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r0: first character of first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: first character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); + + // Load second argument and locate first character. + __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // r1: first character of second string. + // r3: length of second string. + // r6: next character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + __ bind(&non_ascii_string_add_flat_result); + // Both strings are sequential two byte strings. + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: sum of length of strings. + __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); + // r0: first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r7: result string. + + // Locate first character of result. + __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Locate first character of first argument. + __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // r0: first character of first string. + // r1: second string. + // r2: length of first string. + // r3: length of second string. + // r6: first character of result. + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); + + // Locate first character of second argument. + __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + + // r1: first character of second string. + // r3: length of second string. + // r6: next character of result (after copy of first string). + // r7: result string. + StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); + + __ mov(r0, Operand(r7)); + __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); + __ add(sp, sp, Operand(2 * kPointerSize)); + __ Ret(); + + // Just jump to runtime to add the two strings. + __ bind(&string_add_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); +} + + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h new file mode 100644 index 0000000000..2e07e3b5c7 --- /dev/null +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -0,0 +1,491 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_CODE_STUBS_ARM_H_ +#define V8_ARM_CODE_STUBS_ARM_H_ + +#include "ic-inl.h" + +namespace v8 { +namespace internal { + + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_; } + Runtime::FunctionId RuntimeFunction(); +}; + + +class ToBooleanStub: public CodeStub { + public: + explicit ToBooleanStub(Register tos) : tos_(tos) { } + + void Generate(MacroAssembler* masm); + + private: + Register tos_; + Major MajorKey() { return ToBoolean; } + int MinorKey() { return tos_.code(); } +}; + + +class GenericBinaryOpStub : public CodeStub { + public: + static const int kUnknownIntValue = -1; + + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + Register lhs, + Register rhs, + int constant_rhs = kUnknownIntValue) + : op_(op), + mode_(mode), + lhs_(lhs), + rhs_(rhs), + constant_rhs_(constant_rhs), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + lhs_(LhsRegister(RegisterBits::decode(key))), + rhs_(RhsRegister(RegisterBits::decode(key))), + constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), + runtime_operands_type_(type_info), + name_(NULL) { } + + private: + Token::Value op_; + OverwriteMode mode_; + Register lhs_; + Register rhs_; + int constant_rhs_; + bool specialized_on_rhs_; + BinaryOpIC::TypeInfo runtime_operands_type_; + char* name_; + + static const int kMaxKnownRhs = 0x40000000; + static const int kKnownRhsKeyBits = 6; + + // Minor key encoding in 17 bits. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class TypeInfoBits: public BitField {}; + class RegisterBits: public BitField {}; + class KnownIntBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + ASSERT((lhs_.is(r0) && rhs_.is(r1)) || + (lhs_.is(r1) && rhs_.is(r0))); + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | KnownIntBits::encode(MinorKeyForKnownInt()) + | TypeInfoBits::encode(runtime_operands_type_) + | RegisterBits::encode(lhs_.is(r0)); + } + + void Generate(MacroAssembler* masm); + void HandleNonSmiBitwiseOp(MacroAssembler* masm, + Register lhs, + Register rhs); + void HandleBinaryOpSlowCases(MacroAssembler* masm, + Label* not_smi, + Register lhs, + Register rhs, + const Builtins::JavaScript& builtin); + void GenerateTypeTransition(MacroAssembler* masm); + + static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { + if (constant_rhs == kUnknownIntValue) return false; + if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; + if (op == Token::MOD) { + if (constant_rhs <= 1) return false; + if (constant_rhs <= 10) return true; + if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; + return false; + } + return false; + } + + int MinorKeyForKnownInt() { + if (!specialized_on_rhs_) return 0; + if (constant_rhs_ <= 10) return constant_rhs_ + 1; + ASSERT(IsPowerOf2(constant_rhs_)); + int key = 12; + int d = constant_rhs_; + while ((d & 1) == 0) { + key++; + d >>= 1; + } + ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); + return key; + } + + int KnownBitsForMinorKey(int key) { + if (!key) return 0; + if (key <= 11) return key - 1; + int d = 1; + while (key != 12) { + key--; + d <<= 1; + } + return d; + } + + Register LhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r0 : r1; + } + + Register RhsRegister(bool lhs_is_r0) { + return lhs_is_r0 ? r1 : r0; + } + + bool ShouldGenerateSmiCode() { + return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } + + const char* GetName(); + +#ifdef DEBUG + void Print() { + if (!specialized_on_rhs_) { + PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); + } else { + PrintF("GenericBinaryOpStub (%s by %d)\n", + Token::String(op_), + constant_rhs_); + } + } +#endif +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) { + string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); + } + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return string_check_ ? 0 : 1; } + + void Generate(MacroAssembler* masm); + + // Should the stub check whether arguments are strings? + bool string_check_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + + +class StringCompareStub: public CodeStub { + public: + StringCompareStub() { } + + // Compare two flat ASCII strings and returns result in r0. + // Does not use the stack. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4); + + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +// This stub can do a fast mod operation without using fp. +// It is tail called from the GenericBinaryOpStub and it always +// returns an answer. It never causes GC so it doesn't need a real frame. +// +// The inputs are always positive Smis. This is never called +// where the denominator is a power of 2. We handle that separately. +// +// If we consider the denominator as an odd number multiplied by a power of 2, +// then: +// * The exponent (power of 2) is in the shift_distance register. +// * The odd number is in the odd_number register. It is always in the range +// of 3 to 25. +// * The bits from the numerator that are to be copied to the answer (there are +// shift_distance of them) are in the mask_bits register. +// * The other bits of the numerator have been shifted down and are in the lhs +// register. +class IntegerModStub : public CodeStub { + public: + IntegerModStub(Register result, + Register shift_distance, + Register odd_number, + Register mask_bits, + Register lhs, + Register scratch) + : result_(result), + shift_distance_(shift_distance), + odd_number_(odd_number), + mask_bits_(mask_bits), + lhs_(lhs), + scratch_(scratch) { + // We don't code these in the minor key, so they should always be the same. + // We don't really want to fix that since this stub is rather large and we + // don't want many copies of it. + ASSERT(shift_distance_.is(r9)); + ASSERT(odd_number_.is(r4)); + ASSERT(mask_bits_.is(r3)); + ASSERT(scratch_.is(r5)); + } + + private: + Register result_; + Register shift_distance_; + Register odd_number_; + Register mask_bits_; + Register lhs_; + Register scratch_; + + // Minor key encoding in 16 bits. + class ResultRegisterBits: public BitField {}; + class LhsRegisterBits: public BitField {}; + + Major MajorKey() { return IntegerMod; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return ResultRegisterBits::encode(result_.code()) + | LhsRegisterBits::encode(lhs_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "IntegerModStub"; } + + // Utility functions. + void DigitSum(MacroAssembler* masm, + Register lhs, + int mask, + int shift, + Label* entry); + void DigitSum(MacroAssembler* masm, + Register lhs, + Register scratch, + int mask, + int shift1, + int shift2, + Label* entry); + void ModGetInRangeBySubtraction(MacroAssembler* masm, + Register lhs, + int shift, + int rhs); + void ModReduce(MacroAssembler* masm, + Register lhs, + int max, + int denominator); + void ModAnswer(MacroAssembler* masm, + Register result, + Register shift_distance, + Register mask_bits, + Register sum_of_digits); + + +#ifdef DEBUG + void Print() { PrintF("IntegerModStub\n"); } +#endif +}; + + +// This stub can convert a signed int32 to a heap number (double). It does +// not work for int32s that are in Smi range! No GC occurs during this stub +// so you don't have to set up the frame. +class WriteInt32ToHeapNumberStub : public CodeStub { + public: + WriteInt32ToHeapNumberStub(Register the_int, + Register the_heap_number, + Register scratch) + : the_int_(the_int), + the_heap_number_(the_heap_number), + scratch_(scratch) { } + + private: + Register the_int_; + Register the_heap_number_; + Register scratch_; + + // Minor key encoding in 16 bits. + class IntRegisterBits: public BitField {}; + class HeapNumberRegisterBits: public BitField {}; + class ScratchRegisterBits: public BitField {}; + + Major MajorKey() { return WriteInt32ToHeapNumber; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return IntRegisterBits::encode(the_int_.code()) + | HeapNumberRegisterBits::encode(the_heap_number_.code()) + | ScratchRegisterBits::encode(scratch_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "WriteInt32ToHeapNumberStub"; } + +#ifdef DEBUG + void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } +#endif +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + bool object_is_smi, + Label* not_found); + + private: + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "NumberToStringStub"; } +}; + + +class RecordWriteStub : public CodeStub { + public: + RecordWriteStub(Register object, Register offset, Register scratch) + : object_(object), offset_(offset), scratch_(scratch) { } + + void Generate(MacroAssembler* masm); + + private: + Register object_; + Register offset_; + Register scratch_; + + // Minor key encoding in 12 bits. 4 bits for each of the three + // registers (object, offset and scratch) OOOOAAAASSSS. + class ScratchBits: public BitField {}; + class OffsetBits: public BitField {}; + class ObjectBits: public BitField {}; + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + // Encode the registers. + return ObjectBits::encode(object_.code()) | + OffsetBits::encode(offset_.code()) | + ScratchBits::encode(scratch_.code()); + } + +#ifdef DEBUG + void Print() { + PrintF("RecordWriteStub (object reg %d), (offset reg %d)," + " (scratch reg %d)\n", + object_.code(), offset_.code(), scratch_.code()); + } +#endif +}; + + +// Enter C code from generated RegExp code in a way that allows +// the C code to fix the return address in case of a GC. +// Currently only needed on ARM. +class RegExpCEntryStub: public CodeStub { + public: + RegExpCEntryStub() {} + virtual ~RegExpCEntryStub() {} + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return RegExpCEntry; } + int MinorKey() { return 0; } + const char* GetName() { return "RegExpCEntryStub"; } +}; + + +} } // namespace v8::internal + +#endif // V8_ARM_CODE_STUBS_ARM_H_ diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index df17b6f864..f985fb4ba1 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "bootstrapper.h" +#include "code-stubs.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -49,27 +50,6 @@ namespace v8 { namespace internal { -static void EmitIdenticalObjectComparison(MacroAssembler* masm, - Label* slow, - Condition cc, - bool never_nan_nan); -static void EmitSmiNonsmiComparison(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* lhs_not_nan, - Label* slow, - bool strict); -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); -static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, - Register lhs, - Register rhs); -static void MultiplyByKnownInt(MacroAssembler* masm, - Register source, - Register destination, - int known_int); -static bool IsEasyToMultiplyBy(int x); - - #define __ ACCESS_MASM(masm_) // ------------------------------------------------------------------------- @@ -790,7 +770,7 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, ToBooleanStub stub(tos); frame_->CallStub(&stub, 0); // Convert the result in "tos" to a condition code. - __ cmp(tos, Operand(0)); + __ cmp(tos, Operand(0, RelocInfo::NONE)); } else { // Implements slow case by calling the runtime. frame_->EmitPush(tos); @@ -937,16 +917,55 @@ class DeferredInlineSmiOperation: public DeferredCode { } virtual void Generate(); + // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and + // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty + // methods, it is the responsibility of the deferred code to save and restore + // registers. + virtual bool AutoSaveAndRestore() { return false; } + + void JumpToNonSmiInput(Condition cond); + void JumpToAnswerOutOfRange(Condition cond); private: + void GenerateNonSmiInput(); + void GenerateAnswerOutOfRange(); + void WriteNonSmiAnswer(Register answer, + Register heap_number, + Register scratch); + Token::Value op_; int value_; bool reversed_; OverwriteMode overwrite_mode_; Register tos_register_; + Label non_smi_input_; + Label answer_out_of_range_; }; +// For bit operations we try harder and handle the case where the input is not +// a Smi but a 32bits integer without calling the generic stub. +void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) { + ASSERT(Token::IsBitOp(op_)); + + __ b(cond, &non_smi_input_); +} + + +// For bit operations the result is always 32bits so we handle the case where +// the result does not fit in a Smi without calling the generic stub. +void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) { + ASSERT(Token::IsBitOp(op_)); + + if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) { + // >>> requires an unsigned to double conversion and the non VFP code + // does not support this conversion. + __ b(cond, entry_label()); + } else { + __ b(cond, &answer_out_of_range_); + } +} + // On entry the non-constant side of the binary operation is in tos_register_ // and the constant smi side is nowhere. The tos_register_ is not used by the @@ -1025,6 +1044,172 @@ void DeferredInlineSmiOperation::Generate() { // came into this function with, so we can merge back to that frame // without trashing it. copied_frame.MergeTo(frame_state()->frame()); + + Exit(); + + if (non_smi_input_.is_linked()) { + GenerateNonSmiInput(); + } + + if (answer_out_of_range_.is_linked()) { + GenerateAnswerOutOfRange(); + } +} + + +// Convert and write the integer answer into heap_number. +void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer, + Register heap_number, + Register scratch) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, answer); + if (op_ == Token::SHR) { + __ vcvt_f64_u32(d0, s0); + } else { + __ vcvt_f64_s32(d0, s0); + } + __ sub(scratch, heap_number, Operand(kHeapObjectTag)); + __ vstr(d0, scratch, HeapNumber::kValueOffset); + } else { + WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch); + __ CallStub(&stub); + } +} + + +void DeferredInlineSmiOperation::GenerateNonSmiInput() { + // We know the left hand side is not a Smi and the right hand side is an + // immediate value (value_) which can be represented as a Smi. We only + // handle bit operations. + ASSERT(Token::IsBitOp(op_)); + + if (FLAG_debug_code) { + __ Abort("Should not fall through!"); + } + + __ bind(&non_smi_input_); + if (FLAG_debug_code) { + __ AbortIfSmi(tos_register_); + } + + // This routine uses the registers from r2 to r6. At the moment they are + // not used by the register allocator, but when they are it should use + // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above. + + Register heap_number_map = r7; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset)); + __ cmp(r3, heap_number_map); + // Not a number, fall back to the GenericBinaryOpStub. + __ b(ne, entry_label()); + + Register int32 = r2; + // Not a 32bits signed int, fall back to the GenericBinaryOpStub. + __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label()); + + // tos_register_ (r0 or r1): Original heap number. + // int32: signed 32bits int. + + Label result_not_a_smi; + int shift_value = value_ & 0x1f; + switch (op_) { + case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break; + case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break; + case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break; + case Token::SAR: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, ASR, shift_value)); + } + break; + case Token::SHR: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, LSR, shift_value), SetCC); + } else { + // SHR is special because it is required to produce a positive answer. + __ cmp(int32, Operand(0, RelocInfo::NONE)); + } + if (CpuFeatures::IsSupported(VFP3)) { + __ b(mi, &result_not_a_smi); + } else { + // Non VFP code cannot convert from unsigned to double, so fall back + // to GenericBinaryOpStub. + __ b(mi, entry_label()); + } + break; + case Token::SHL: + ASSERT(!reversed_); + if (shift_value != 0) { + __ mov(int32, Operand(int32, LSL, shift_value)); + } + break; + default: UNREACHABLE(); + } + // Check that the *signed* result fits in a smi. Not necessary for AND, SAR + // if the shift if more than 0 or SHR if the shit is more than 1. + if (!( (op_ == Token::AND) || + ((op_ == Token::SAR) && (shift_value > 0)) || + ((op_ == Token::SHR) && (shift_value > 1)))) { + __ add(r3, int32, Operand(0x40000000), SetCC); + __ b(mi, &result_not_a_smi); + } + __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize)); + Exit(); + + if (result_not_a_smi.is_linked()) { + __ bind(&result_not_a_smi); + if (overwrite_mode_ != OVERWRITE_LEFT) { + ASSERT((overwrite_mode_ == NO_OVERWRITE) || + (overwrite_mode_ == OVERWRITE_RIGHT)); + // If the allocation fails, fall back to the GenericBinaryOpStub. + __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label()); + // Nothing can go wrong now, so overwrite tos. + __ mov(tos_register_, Operand(r4)); + } + + // int32: answer as signed 32bits integer. + // tos_register_: Heap number to write the answer into. + WriteNonSmiAnswer(int32, tos_register_, r3); + + Exit(); + } +} + + +void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() { + // The input from a bitwise operation were Smis but the result cannot fit + // into a Smi, so we store it into a heap number. tos_resgiter_ holds the + // result to be converted. + ASSERT(Token::IsBitOp(op_)); + ASSERT(!reversed_); + + if (FLAG_debug_code) { + __ Abort("Should not fall through!"); + } + + __ bind(&answer_out_of_range_); + if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) { + // >>> 0 is a special case where the result is already tagged but wrong + // because the Smi is negative. We untag it. + __ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize)); + } + + // This routine uses the registers from r2 to r6. At the moment they are + // not used by the register allocator, but when they are it should use + // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above. + + // Allocate the result heap number. + Register heap_number_map = r7; + Register heap_number = r4; + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + // If the allocation fails, fall back to the GenericBinaryOpStub. + __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label()); + WriteNonSmiAnswer(tos_register_, heap_number, r3); + __ mov(tos_register_, Operand(heap_number)); + + Exit(); } @@ -1049,6 +1234,43 @@ static int BitPosition(unsigned x) { } +// Can we multiply by x with max two shifts and an add. +// This answers yes to all integers from 2 to 10. +static bool IsEasyToMultiplyBy(int x) { + if (x < 2) return false; // Avoid special cases. + if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. + if (IsPowerOf2(x)) return true; // Simple shift. + if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. + if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. + return false; +} + + +// Can multiply by anything that IsEasyToMultiplyBy returns true for. +// Source and destination may be the same register. This routine does +// not set carry and overflow the way a mul instruction would. +static void InlineMultiplyByKnownInt(MacroAssembler* masm, + Register source, + Register destination, + int known_int) { + if (IsPowerOf2(known_int)) { + masm->mov(destination, Operand(source, LSL, BitPosition(known_int))); + } else if (PopCountLessThanEqual2(known_int)) { + int first_bit = BitPosition(known_int); + int second_bit = BitPosition(known_int ^ (1 << first_bit)); + masm->add(destination, source, + Operand(source, LSL, second_bit - first_bit)); + if (first_bit != 0) { + masm->mov(destination, Operand(destination, LSL, first_bit)); + } + } else { + ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. + int the_bit = BitPosition(known_int + 1); + masm->rsb(destination, source, Operand(source, LSL, the_bit)); + } +} + + void CodeGenerator::SmiOperation(Token::Value op, Handle value, bool reversed, @@ -1118,7 +1340,8 @@ void CodeGenerator::SmiOperation(Token::Value op, frame_->EmitPush(lhs, TypeInfo::Smi()); TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown(); frame_->EmitPush(rhs, t); - GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, kUnknownIntValue); + GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, + GenericBinaryOpStub::kUnknownIntValue); } return; } @@ -1173,10 +1396,10 @@ void CodeGenerator::SmiOperation(Token::Value op, } frame_->EmitPush(tos, TypeInfo::Smi()); } else { - DeferredCode* deferred = + DeferredInlineSmiOperation* deferred = new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); __ tst(tos, Operand(kSmiTagMask)); - deferred->Branch(ne); + deferred->JumpToNonSmiInput(ne); switch (op) { case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; @@ -1222,92 +1445,87 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::SHR: case Token::SAR: { ASSERT(!reversed); - TypeInfo result = - (op == Token::SAR) ? TypeInfo::Integer32() : TypeInfo::Number(); - if (!reversed) { - if (op == Token::SHR) { - if (int_value >= 2) { - result = TypeInfo::Smi(); - } else if (int_value >= 1) { - result = TypeInfo::Integer32(); - } + int shift_value = int_value & 0x1f; + TypeInfo result = TypeInfo::Number(); + + if (op == Token::SHR) { + if (shift_value > 1) { + result = TypeInfo::Smi(); + } else if (shift_value > 0) { + result = TypeInfo::Integer32(); + } + } else if (op == Token::SAR) { + if (shift_value > 0) { + result = TypeInfo::Smi(); } else { - if (int_value >= 1) { - result = TypeInfo::Smi(); - } + result = TypeInfo::Integer32(); } + } else { + ASSERT(op == Token::SHL); + result = TypeInfo::Integer32(); } - Register scratch = VirtualFrame::scratch0(); - Register scratch2 = VirtualFrame::scratch1(); - int shift_value = int_value & 0x1f; // least significant 5 bits - DeferredCode* deferred = + + DeferredInlineSmiOperation* deferred = new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); - uint32_t problematic_mask = kSmiTagMask; - // For unsigned shift by zero all negative smis are problematic. - bool skip_smi_test = both_sides_are_smi; - if (shift_value == 0 && op == Token::SHR) { - problematic_mask |= 0x80000000; - skip_smi_test = false; - } - if (!skip_smi_test) { - __ tst(tos, Operand(problematic_mask)); - deferred->Branch(ne); // Go slow for problematic input. + if (!both_sides_are_smi) { + __ tst(tos, Operand(kSmiTagMask)); + deferred->JumpToNonSmiInput(ne); } switch (op) { case Token::SHL: { if (shift_value != 0) { + Register scratch = VirtualFrame::scratch0(); int adjusted_shift = shift_value - kSmiTagSize; ASSERT(adjusted_shift >= 0); + if (adjusted_shift != 0) { - __ mov(scratch, Operand(tos, LSL, adjusted_shift)); - // Check that the *signed* result fits in a smi. - __ add(scratch2, scratch, Operand(0x40000000), SetCC); - deferred->Branch(mi); - __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); - } else { - // Check that the *signed* result fits in a smi. - __ add(scratch2, tos, Operand(0x40000000), SetCC); - deferred->Branch(mi); - __ mov(tos, Operand(tos, LSL, kSmiTagSize)); + __ mov(tos, Operand(tos, LSL, adjusted_shift)); } + // Check that the *signed* result fits in a smi. + __ add(scratch, tos, Operand(0x40000000), SetCC); + deferred->JumpToAnswerOutOfRange(mi); + __ mov(tos, Operand(tos, LSL, kSmiTagSize)); } break; } case Token::SHR: { if (shift_value != 0) { + Register scratch = VirtualFrame::scratch0(); __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag. - // LSR by immediate 0 means shifting 32 bits. - __ mov(scratch, Operand(scratch, LSR, shift_value)); + __ mov(tos, Operand(scratch, LSR, shift_value)); if (shift_value == 1) { - // check that the *unsigned* result fits in a smi - // neither of the two high-order bits can be set: + // Check that the *unsigned* result fits in a smi. + // Neither of the two high-order bits can be set: // - 0x80000000: high bit would be lost when smi tagging - // - 0x40000000: this number would convert to negative when - // smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi - __ tst(scratch, Operand(0xc0000000)); - deferred->Branch(ne); - } else { - ASSERT(shift_value >= 2); - result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi. + // - 0x40000000: this number would convert to negative when Smi + // tagging. + // These two cases can only happen with shifts by 0 or 1 when + // handed a valid smi. + __ tst(tos, Operand(0xc0000000)); + if (!CpuFeatures::IsSupported(VFP3)) { + // If the unsigned result does not fit in a Smi, we require an + // unsigned to double conversion. Without VFP V8 has to fall + // back to the runtime. The deferred code will expect tos + // to hold the original Smi to be shifted. + __ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne); + } + deferred->JumpToAnswerOutOfRange(ne); } - __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); + __ mov(tos, Operand(tos, LSL, kSmiTagSize)); + } else { + __ cmp(tos, Operand(0, RelocInfo::NONE)); + deferred->JumpToAnswerOutOfRange(mi); } break; } case Token::SAR: { - // In the ARM instructions set, ASR by immediate 0 means shifting 32 - // bits. if (shift_value != 0) { - // Do the shift and the tag removal in one operation. If the shift + // Do the shift and the tag removal in one operation. If the shift // is 31 bits (the highest possible value) then we emit the - // instruction as a shift by 0 which means shift arithmetically by - // 32. + // instruction as a shift by 0 which in the ARM ISA means shift + // arithmetically by 32. __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); - // Put tag back. __ mov(tos, Operand(tos, LSL, kSmiTagSize)); - // SAR by at least 1 gives a Smi. - result = TypeInfo::Smi(); } break; } @@ -1354,7 +1572,7 @@ void CodeGenerator::SmiOperation(Token::Value op, // brevity to comprehensiveness. __ tst(tos, Operand(mask)); deferred->Branch(ne); - MultiplyByKnownInt(masm_, tos, tos, int_value); + InlineMultiplyByKnownInt(masm_, tos, tos, int_value); deferred->BindExit(); frame_->EmitPush(tos); break; @@ -1435,7 +1653,7 @@ void CodeGenerator::Comparison(Condition cc, // We call with 0 args because there are 0 on the stack. CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs); frame_->CallStub(&stub, 0); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); exit.Jump(); smi.Bind(); @@ -1556,7 +1774,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE); __ b(ne, &build_args); Handle apply_code(Builtins::builtin(Builtins::FunctionApply)); - __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset)); + __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); + __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); __ cmp(r1, Operand(apply_code)); __ b(ne, &build_args); @@ -1598,7 +1817,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, // frame. Label loop; // r3 is a small non-negative integer, due to the test above. - __ cmp(r3, Operand(0)); + __ cmp(r3, Operand(0, RelocInfo::NONE)); __ b(eq, &invoke); // Compute the address of the first argument. __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2)); @@ -1756,7 +1975,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { } else if (node->fun() != NULL) { Load(node->fun()); } else { - frame_->EmitPush(Operand(0)); + frame_->EmitPush(Operand(0, RelocInfo::NONE)); } frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); @@ -3445,12 +3664,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { frame_->EmitPush(Operand(Smi::FromInt(node->literal_index()))); frame_->EmitPush(Operand(node->constant_elements())); int length = node->values()->length(); - if (node->depth() > 1) { + if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + frame_->CallStub(&stub, 3); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2); + } else if (node->depth() > 1) { frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); frame_->CallStub(&stub, 3); } frame_->EmitPush(r0); // save the result @@ -3521,9 +3746,7 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) { // Perform the binary operation. Literal* literal = node->value()->AsLiteral(); - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); if (literal != NULL && literal->handle()->IsSmi()) { SmiOperation(node->binary_op(), literal->handle(), @@ -3621,9 +3844,7 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { // Perform the binary operation. Literal* literal = node->value()->AsLiteral(); - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); if (literal != NULL && literal->handle()->IsSmi()) { SmiOperation(node->binary_op(), literal->handle(), @@ -3737,9 +3958,7 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { // Perform the binary operation. Literal* literal = node->value()->AsLiteral(); - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); if (literal != NULL && literal->handle()->IsSmi()) { SmiOperation(node->binary_op(), literal->handle(), @@ -4167,11 +4386,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) { // actual function to call is resolved after the arguments have been // evaluated. - // Compute function to call and use the global object as the - // receiver. There is no need to use the global proxy here because - // it will always be replaced with a newly allocated object. + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. Load(node->expression()); - LoadGlobal(); // Push the arguments ("left-to-right") on the stack. ZoneList* args = node->arguments(); @@ -4180,21 +4398,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) { Load(args->at(i)); } + // Spill everything from here to simplify the implementation. VirtualFrame::SpilledScope spilled_scope(frame_); - // r0: the number of arguments. + // Load the argument count into r0 and the function into r1 as per + // calling convention. __ mov(r0, Operand(arg_count)); - // Load the function into r1 as per calling convention. - __ ldr(r1, frame_->ElementAt(arg_count + 1)); + __ ldr(r1, frame_->ElementAt(arg_count)); // Call the construct call builtin that handles allocation and // constructor invocation. CodeForSourcePosition(node->position()); Handle ic(Builtins::builtin(Builtins::JSConstructCall)); frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1); + frame_->EmitPush(r0); - // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)). - __ str(r0, frame_->Top()); ASSERT_EQ(original_height + 1, frame_->height()); } @@ -4394,7 +4612,8 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) { // Get the absolute untagged value of the exponent and use that for the // calculation. __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC); - __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi); // Negate if negative. + // Negate if negative. + __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi); __ vmov(d2, d0, mi); // 1.0 needed in d2 later if exponent is negative. // Run through all the bits in the exponent. The result is calculated in d0 @@ -4407,14 +4626,14 @@ void CodeGenerator::GenerateMathPow(ZoneList* args) { __ b(ne, &more_bits); // If exponent is positive we are done. - __ cmp(exponent, Operand(0)); + __ cmp(exponent, Operand(0, RelocInfo::NONE)); __ b(ge, &allocate_return); // If exponent is negative result is 1/result (d2 already holds 1.0 in that // case). However if d0 has reached infinity this will not provide the // correct result, so call runtime if that is the case. __ mov(scratch2, Operand(0x7FF00000)); - __ mov(scratch1, Operand(0)); + __ mov(scratch1, Operand(0, RelocInfo::NONE)); __ vmov(d1, scratch1, scratch2); // Load infinity into d1. __ vcmp(d0, d1); __ vmrs(pc); @@ -4917,7 +5136,7 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { __ jmp(exit_label()); __ bind(&false_result); // Set false result. - __ mov(map_result_, Operand(0)); + __ mov(map_result_, Operand(0, RelocInfo::NONE)); } private: @@ -5091,7 +5310,7 @@ void CodeGenerator::GenerateRandomHeapNumber( // Move 0x41300000xxxxxxxx (x = random bits) to VFP. __ vmov(d7, r0, r1); // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand(0)); + __ mov(r0, Operand(0, RelocInfo::NONE)); __ vmov(d8, r0, r1); // Subtract and store the result in the heap number. __ vsub(d7, d7, d8); @@ -5258,6 +5477,73 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { } +void CodeGenerator::GenerateRegExpCloneResult(ZoneList* args) { + ASSERT_EQ(1, args->length()); + + Load(args->at(0)); + frame_->PopToR0(); + { + VirtualFrame::SpilledScope spilled_scope(frame_); + + Label done; + Label call_runtime; + __ BranchOnSmi(r0, &done); + + // Load JSRegExp map into r1. Check that argument object has this map. + // Arguments to this function should be results of calling RegExp exec, + // which is either an unmodified JSRegExpResult or null. Anything not having + // the unmodified JSRegExpResult map is returned unmodified. + // This also ensures that elements are fast. + + __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); + __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset)); + __ ldr(r1, ContextOperand(r1, Context::REGEXP_RESULT_MAP_INDEX)); + __ ldr(ip, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ cmp(r1, Operand(ip)); + __ b(ne, &done); + + if (FLAG_debug_code) { + __ LoadRoot(r2, Heap::kEmptyFixedArrayRootIndex); + __ ldr(ip, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ cmp(ip, r2); + __ Check(eq, "JSRegExpResult: default map but non-empty properties."); + } + + // All set, copy the contents to a new object. + __ AllocateInNewSpace(JSRegExpResult::kSize, + r2, + r3, + r4, + &call_runtime, + NO_ALLOCATION_FLAGS); + // Store RegExpResult map as map of allocated object. + ASSERT(JSRegExpResult::kSize == 6 * kPointerSize); + // Copy all fields (map is already in r1) from (untagged) r0 to r2. + // Change map of elements array (ends up in r4) to be a FixedCOWArray. + __ bic(r0, r0, Operand(kHeapObjectTagMask)); + __ ldm(ib, r0, r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit()); + __ stm(ia, r2, + r1.bit() | r3.bit() | r4.bit() | r5.bit() | r6.bit() | r7.bit()); + ASSERT(JSRegExp::kElementsOffset == 2 * kPointerSize); + // Check whether elements array is empty fixed array, and otherwise make + // it copy-on-write (it never should be empty unless someone is messing + // with the arguments to the runtime function). + __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex); + __ add(r0, r2, Operand(kHeapObjectTag)); // Tag result and move it to r0. + __ cmp(r4, ip); + __ b(eq, &done); + __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); + __ str(ip, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ b(&done); + __ bind(&call_runtime); + __ push(r0); + __ CallRuntime(Runtime::kRegExpCloneResult, 1); + __ bind(&done); + } + frame_->EmitPush(r0); +} + + class DeferredSearchCache: public DeferredCode { public: DeferredSearchCache(Register dst, Register cache, Register key) @@ -5402,7 +5688,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList* args) { __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); deferred->Branch(nz); - // Check the object's elements are in fast case. + // Check the object's elements are in fast case and writable. __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); @@ -5547,6 +5833,27 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList* args) { } +void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + Register tmp = frame_->scratch0(); + __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset)); + __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask)); + cc_reg_ = eq; +} + + +void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Register value = frame_->PopToRegister(); + + __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset)); + __ IndexFromHash(value, value); + frame_->EmitPush(value); +} + void CodeGenerator::VisitCallRuntime(CallRuntime* node) { #ifdef DEBUG @@ -5660,9 +5967,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->EmitPush(r0); // r0 has result } else { - bool can_overwrite = - (node->expression()->AsBinaryOperation() != NULL && - node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool can_overwrite = node->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; @@ -5986,12 +6291,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { Literal* rliteral = node->right()->AsLiteral(); // NOTE: The code below assumes that the slow cases (calls to runtime) // never return a constant/immutable object. - bool overwrite_left = - (node->left()->AsBinaryOperation() != NULL && - node->left()->AsBinaryOperation()->ResultOverwriteAllowed()); - bool overwrite_right = - (node->right()->AsBinaryOperation() != NULL && - node->right()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_left = node->left()->ResultOverwriteAllowed(); + bool overwrite_right = node->right()->ResultOverwriteAllowed(); if (rliteral != NULL && rliteral->handle()->IsSmi()) { VirtualFrame::RegisterAllocationScope scope(this); @@ -6060,47 +6361,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { Expression* right = node->right(); Token::Value op = node->op(); - // To make null checks efficient, we check if either left or right is the - // literal 'null'. If so, we optimize the code by inlining a null check - // instead of calling the (very) general runtime routine for checking - // equality. - if (op == Token::EQ || op == Token::EQ_STRICT) { - bool left_is_null = - left->AsLiteral() != NULL && left->AsLiteral()->IsNull(); - bool right_is_null = - right->AsLiteral() != NULL && right->AsLiteral()->IsNull(); - // The 'null' value can only be equal to 'null' or 'undefined'. - if (left_is_null || right_is_null) { - Load(left_is_null ? right : left); - Register tos = frame_->PopToRegister(); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(tos, ip); - - // The 'null' value is only equal to 'undefined' if using non-strict - // comparisons. - if (op != Token::EQ_STRICT) { - true_target()->Branch(eq); - - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(tos, Operand(ip)); - true_target()->Branch(eq); - - __ tst(tos, Operand(kSmiTagMask)); - false_target()->Branch(eq); - - // It can be an undetectable object. - __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); - __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset)); - __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); - __ cmp(tos, Operand(1 << Map::kIsUndetectable)); - } - - cc_reg_ = eq; - ASSERT(has_cc() && frame_->height() == original_height); - return; - } - } - // To make typeof testing for natives implemented in JavaScript really // efficient, we generate special code for expressions of the form: // 'typeof == '. @@ -6261,6 +6521,40 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } +void CodeGenerator::VisitCompareToNull(CompareToNull* node) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + Comment cmnt(masm_, "[ CompareToNull"); + + Load(node->expression()); + Register tos = frame_->PopToRegister(); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(tos, ip); + + // The 'null' value is only equal to 'undefined' if using non-strict + // comparisons. + if (!node->is_strict()) { + true_target()->Branch(eq); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(tos, Operand(ip)); + true_target()->Branch(eq); + + __ tst(tos, Operand(kSmiTagMask)); + false_target()->Branch(eq); + + // It can be an undetectable object. + __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset)); + __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset)); + __ and_(tos, tos, Operand(1 << Map::kIsUndetectable)); + __ cmp(tos, Operand(1 << Map::kIsUndetectable)); + } + + cc_reg_ = eq; + ASSERT(has_cc() && frame_->height() == original_height); +} + + class DeferredReferenceGetNamedValue: public DeferredCode { public: explicit DeferredReferenceGetNamedValue(Register receiver, @@ -6694,15 +6988,9 @@ void CodeGenerator::EmitKeyedLoad() { __ cmp(scratch1, scratch2); deferred->Branch(ne); - // Get the elements array from the receiver and check that it - // is not a dictionary. + // Get the elements array from the receiver. __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); - if (FLAG_debug_code) { - __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(scratch2, ip); - __ Assert(eq, "JSObject with fast elements map has slow elements"); - } + __ AssertFastElements(scratch1); // Check that key is within bounds. Use unsigned comparison to handle // negative keys. @@ -6991,4695 +7279,30 @@ void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) { } -void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Create a new closure from the given function info in new - // space. Set the context to the current context in cp. - Label gc; - - // Pop the function info from the stack. - __ pop(r3); - - // Attempt to allocate new JSFunction in new space. - __ AllocateInNewSpace(JSFunction::kSize, - r0, - r1, - r2, - &gc, - TAG_OBJECT); - - // Compute the function map in the current global context and set that - // as the map of the allocated object. - __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); - __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - - // Initialize the rest of the function. We don't have to update the - // write barrier because the allocated object is in new space. - __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); - __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); - __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); - __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); - __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); - __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); - __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); - - // Initialize the code pointer in the function to be the one - // found in the shared function info object. - __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); - __ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset)); - - // Return result. The argument function info has been popped already. - __ Ret(); - - // Create a new closure through the slower runtime call. - __ bind(&gc); - __ Push(cp, r3); - __ TailCallRuntime(Runtime::kNewClosure, 2, 1); -} - - -void FastNewContextStub::Generate(MacroAssembler* masm) { - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - - // Attempt to allocate the context in new space. - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, - r1, - r2, - &gc, - TAG_OBJECT); - - // Load the function from the stack. - __ ldr(r3, MemOperand(sp, 0)); - - // Setup the object header. - __ LoadRoot(r2, Heap::kContextMapRootIndex); - __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ mov(r2, Operand(Smi::FromInt(length))); - __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); - - // Setup the fixed slots. - __ mov(r1, Operand(Smi::FromInt(0))); - __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); - __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); - - // Copy the global object from the surrounding context. - __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); - - // Initialize the rest of the slots to undefined. - __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { - __ str(r1, MemOperand(r0, Context::SlotOffset(i))); - } - - // Remove the on-stack argument and return. - __ mov(cp, r0); - __ pop(); - __ Ret(); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kNewContext, 1, 1); -} - - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: constant elements. - // [sp + kPointerSize]: literal index. - // [sp + (2 * kPointerSize)]: literals array. - - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - - // Load boilerplate object into r3 and check if we need to create a - // boilerplate. - Label slow_case; - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(r3, ip); - __ b(eq, &slow_case); - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, - r0, - r1, - r2, - &slow_case, - TAG_OBJECT); - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ ldr(r1, FieldMemOperand(r3, i)); - __ str(r1, FieldMemOperand(r0, i)); - } - } - - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ add(r2, r0, Operand(JSArray::kSize)); - __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); - - // Copy the elements array. - __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); - } - - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - -// Takes a Smi and converts to an IEEE 64 bit floating point value in two -// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and -// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a -// scratch register. Destroys the source register. No GC occurs during this -// stub so you don't have to set up the frame. -class ConvertToDoubleStub : public CodeStub { - public: - ConvertToDoubleStub(Register result_reg_1, - Register result_reg_2, - Register source_reg, - Register scratch_reg) - : result1_(result_reg_1), - result2_(result_reg_2), - source_(source_reg), - zeros_(scratch_reg) { } - - private: - Register result1_; - Register result2_; - Register source_; - Register zeros_; - - // Minor key encoding in 16 bits. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - - Major MajorKey() { return ConvertToDouble; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return result1_.code() + - (result2_.code() << 4) + - (source_.code() << 8) + - (zeros_.code() << 12); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "ConvertToDoubleStub"; } - -#ifdef DEBUG - void Print() { PrintF("ConvertToDoubleStub\n"); } -#endif -}; - - -void ConvertToDoubleStub::Generate(MacroAssembler* masm) { -#ifndef BIG_ENDIAN_FLOATING_POINT - Register exponent = result1_; - Register mantissa = result2_; -#else - Register exponent = result2_; - Register mantissa = result1_; -#endif - Label not_special; - // Convert from Smi to integer. - __ mov(source_, Operand(source_, ASR, kSmiTagSize)); - // Move sign bit from source to destination. This works because the sign bit - // in the exponent word of the double has the same position and polarity as - // the 2's complement sign bit in a Smi. - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); - // Subtract from 0 if source was negative. - __ rsb(source_, source_, Operand(0), LeaveCC, ne); - - // We have -1, 0 or 1, which we treat specially. Register source_ contains - // absolute value: it is either equal to 1 (special case of -1 and 1), - // greater than 1 (not a special case) or less than 1 (special case of 0). - __ cmp(source_, Operand(1)); - __ b(gt, ¬_special); - - // For 1 or -1 we need to or in the 0 exponent (biased to 1023). - static const uint32_t exponent_word_for_1 = - HeapNumber::kExponentBias << HeapNumber::kExponentShift; - __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); - // 1, 0 and -1 all have 0 for the second word. - __ mov(mantissa, Operand(0)); - __ Ret(); - - __ bind(¬_special); - // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. - // Gets the wrong answer for 0, but we already checked for that case above. - __ CountLeadingZeros(zeros_, source_, mantissa); - // Compute exponent and or it into the exponent register. - // We use mantissa as a scratch register here. Use a fudge factor to - // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts - // that fit in the ARM's constant field. - int fudge = 0x400; - __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); - __ add(mantissa, mantissa, Operand(fudge)); - __ orr(exponent, - exponent, - Operand(mantissa, LSL, HeapNumber::kExponentShift)); - // Shift up the source chopping the top bit off. - __ add(zeros_, zeros_, Operand(1)); - // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. - __ mov(source_, Operand(source_, LSL, zeros_)); - // Compute lower part of fraction (last 12 bits). - __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); - // And the top (top 20 bits). - __ orr(exponent, - exponent, - Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); - __ Ret(); -} - - -// See comment for class. -void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { - Label max_negative_int; - // the_int_ has the answer which is a signed int32 but not a Smi. - // We test for the special value that has a different exponent. This test - // has the neat side effect of setting the flags according to the sign. - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ cmp(the_int_, Operand(0x80000000u)); - __ b(eq, &max_negative_int); - // Set up the correct exponent in scratch_. All non-Smi int32s have the same. - // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). - uint32_t non_smi_exponent = - (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ mov(scratch_, Operand(non_smi_exponent)); - // Set the sign bit in scratch_ if the value was negative. - __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); - // Subtract from 0 if the value was negative. - __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs); - // We should be masking the implict first digit of the mantissa away here, - // but it just ends up combining harmlessly with the last digit of the - // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get - // the most significant 1 to hit the last bit of the 12 bit sign and exponent. - ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); - __ str(scratch_, FieldMemOperand(the_heap_number_, - HeapNumber::kExponentOffset)); - __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); - __ str(scratch_, FieldMemOperand(the_heap_number_, - HeapNumber::kMantissaOffset)); - __ Ret(); - - __ bind(&max_negative_int); - // The max negative int32 is stored as a positive number in the mantissa of - // a double because it uses a sign bit instead of using two's complement. - // The actual mantissa bits stored are all 0 because the implicit most - // significant 1 bit is not stored. - non_smi_exponent += 1 << HeapNumber::kExponentShift; - __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); - __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); - __ mov(ip, Operand(0)); - __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); - __ Ret(); -} - - -// Handle the case where the lhs and rhs are the same object. -// Equality is almost reflexive (everything but NaN), so this is a test -// for "identity and not NaN". -static void EmitIdenticalObjectComparison(MacroAssembler* masm, - Label* slow, - Condition cc, - bool never_nan_nan) { - Label not_identical; - Label heap_number, return_equal; - __ cmp(r0, r1); - __ b(ne, ¬_identical); - - // The two objects are identical. If we know that one of them isn't NaN then - // we now know they test equal. - if (cc != eq || !never_nan_nan) { - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cc == lt || cc == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); - __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cc != eq) { - __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cc == le || cc == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cc == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); - } - __ Ret(); - } - } - } +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int len = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(len); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; } - __ bind(&return_equal); - if (cc == lt) { - __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. - } else if (cc == gt) { - __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. - } else { - __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. - } - __ Ret(); - - if (cc != eq || !never_nan_nan) { - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cc != lt && cc != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cc != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cc == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. - } - } - __ Ret(); - } - // No fall through here. - } - - __ bind(¬_identical); -} - - -// See comment at call site. -static void EmitSmiNonsmiComparison(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* lhs_not_nan, - Label* slow, - bool strict) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - Label rhs_is_smi; - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &rhs_is_smi); - - // Lhs is a Smi. Check whether the rhs is a heap number. - __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); - if (strict) { - // If rhs is not a number and lhs is a Smi then strict equality cannot - // succeed. Return non-equal - // If rhs is r0 then there is already a non zero value in it. - if (!rhs.is(r0)) { - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); - } - __ Ret(ne); - } else { - // Smi compared non-strictly with a non-Smi non-heap-number. Call - // the runtime. - __ b(ne, slow); - } - - // Lhs is a smi, rhs is a number. - if (CpuFeatures::IsSupported(VFP3)) { - // Convert lhs to a double in d7. - CpuFeatures::Scope scope(VFP3); - __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); - // Load the double from rhs, tagged HeapNumber r0, to d6. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - __ push(lr); - // Convert lhs to a double in r2, r3. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Load rhs to a double in r0, r1. - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - __ pop(lr); - } - - // We now have both loaded as doubles but we can skip the lhs nan check - // since it's a smi. - __ jmp(lhs_not_nan); - - __ bind(&rhs_is_smi); - // Rhs is a smi. Check whether the non-smi lhs is a heap number. - __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); - if (strict) { - // If lhs is not a number and rhs is a smi then strict equality cannot - // succeed. Return non-equal. - // If lhs is r0 then there is already a non zero value in it. - if (!lhs.is(r0)) { - __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); - } - __ Ret(ne); - } else { - // Smi compared non-strictly with a non-smi non-heap-number. Call - // the runtime. - __ b(ne, slow); - } - - // Rhs is a smi, lhs is a heap number. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Load the double from lhs, tagged HeapNumber r1, to d7. - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - // Convert rhs to a double in d6 . - __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); - } else { - __ push(lr); - // Load lhs to a double in r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - // Convert rhs to a double in r0, r1. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - // Fall through to both_loaded_as_doubles. -} - - -void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - Label one_is_nan, neither_is_nan; - - __ Sbfx(r4, - lhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, lhs_not_nan); - __ mov(r4, - Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(lhs_mantissa, Operand(0)); - __ b(ne, &one_is_nan); - - __ bind(lhs_not_nan); - __ Sbfx(r4, - rhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, &neither_is_nan); - __ mov(r4, - Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(rhs_mantissa, Operand(0)); - __ b(eq, &neither_is_nan); - - __ bind(&one_is_nan); - // NaN comparisons always fail. - // Load whatever we need in r0 to make the comparison fail. - if (cc == lt || cc == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - - __ bind(&neither_is_nan); -} - - -// See comment at call site. -static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - - // r0, r1, r2, r3 have the two doubles. Neither is a NaN. - if (cc == eq) { - // Doubles are not equal unless they have the same bit pattern. - // Exception: 0 and -0. - __ cmp(rhs_mantissa, Operand(lhs_mantissa)); - __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); - // Return non-zero if the numbers are unequal. - __ Ret(ne); - - __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); - // If exponents are equal then return 0. - __ Ret(eq); - - // Exponents are unequal. The only way we can return that the numbers - // are equal is if one is -0 and the other is 0. We already dealt - // with the case where both are -0 or both are 0. - // We start by seeing if the mantissas (that are equal) or the bottom - // 31 bits of the rhs exponent are non-zero. If so we return not - // equal. - __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); - __ mov(r0, Operand(r4), LeaveCC, ne); - __ Ret(ne); - // Now they are equal if and only if the lhs exponent is zero in its - // low 31 bits. - __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); - __ Ret(); - } else { - // Call a native function to do a comparison between two non-NaNs. - // Call C routine that may not cause GC or other trouble. - __ push(lr); - __ PrepareCallCFunction(4, r5); // Two doubles count as 4 arguments. - __ CallCFunction(ExternalReference::compare_doubles(), 4); - __ pop(pc); // Return. - } -} - - -// See comment at call site. -static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, - Register lhs, - Register rhs) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - // If either operand is a JSObject or an oddball value, then they are - // not equal since their pointers are different. - // There is no test for undetectability in strict equality. - STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - Label first_non_object; - // Get the type of the first operand into r2 and compare it with - // FIRST_JS_OBJECT_TYPE. - __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); - __ b(lt, &first_non_object); - - // Return non-zero (r0 is not zero) - Label return_not_equal; - __ bind(&return_not_equal); - __ Ret(); - - __ bind(&first_non_object); - // Check for oddballs: true, false, null, undefined. - __ cmp(r2, Operand(ODDBALL_TYPE)); - __ b(eq, &return_not_equal); - - __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); - __ b(ge, &return_not_equal); - - // Check for oddballs: true, false, null, undefined. - __ cmp(r3, Operand(ODDBALL_TYPE)); - __ b(eq, &return_not_equal); - - // Now that we have the types we might as well check for symbol-symbol. - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); - __ and_(r2, r2, Operand(r3)); - __ tst(r2, Operand(kIsSymbolMask)); - __ b(ne, &return_not_equal); -} - - -// See comment at call site. -static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* both_loaded_as_doubles, - Label* not_heap_numbers, - Label* slow) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); - __ b(ne, not_heap_numbers); - __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); - __ cmp(r2, r3); - __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. - - // Both are heap numbers. Load them up then jump to the code we have - // for that. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - } - __ jmp(both_loaded_as_doubles); -} - - -// Fast negative check for symbol-to-symbol equality. -static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* possible_strings, - Label* not_both_strings) { - ASSERT((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0))); - - // r2 is object type of rhs. - // Ensure that no non-strings have the symbol bit set. - Label object_test; - STATIC_ASSERT(kSymbolTag != 0); - __ tst(r2, Operand(kIsNotStringMask)); - __ b(ne, &object_test); - __ tst(r2, Operand(kIsSymbolMask)); - __ b(eq, possible_strings); - __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); - __ b(ge, not_both_strings); - __ tst(r3, Operand(kIsSymbolMask)); - __ b(eq, possible_strings); - - // Both are symbols. We already checked they weren't the same pointer - // so they are not equal. - __ mov(r0, Operand(NOT_EQUAL)); - __ Ret(); - - __ bind(&object_test); - __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); - __ b(lt, not_both_strings); - __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); - __ b(lt, not_both_strings); - // If both objects are undetectable, they are equal. Otherwise, they - // are not equal, since they are different objects and an object is not - // equal to undefined. - __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); - __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); - __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); - __ and_(r0, r2, Operand(r3)); - __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); - __ Ret(); -} - - -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - bool object_is_smi, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch3; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); - // Divide length by two (length is a smi). - __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); - __ sub(mask, mask, Operand(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label is_smi; - Label load_result_from_cache; - if (!object_is_smi) { - __ BranchOnSmi(object, &is_smi); - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - true); - - STATIC_ASSERT(8 == kDoubleSize); - __ add(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); - __ eor(scratch1, scratch1, Operand(scratch2)); - __ and_(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch1, - number_string_cache, - Operand(scratch1, LSL, kPointerSizeLog2 + 1)); - - Register probe = mask; - __ ldr(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ BranchOnSmi(probe, not_found); - __ sub(scratch2, object, Operand(kHeapObjectTag)); - __ vldr(d0, scratch2, HeapNumber::kValueOffset); - __ sub(probe, probe, Operand(kHeapObjectTag)); - __ vldr(d1, probe, HeapNumber::kValueOffset); - __ vcmp(d0, d1); - __ vmrs(pc); - __ b(ne, not_found); // The cache did not contain this value. - __ b(&load_result_from_cache); - } else { - __ b(not_found); - } - } - - __ bind(&is_smi); - Register scratch = scratch1; - __ and_(scratch, mask, Operand(object, ASR, 1)); - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch, - number_string_cache, - Operand(scratch, LSL, kPointerSizeLog2 + 1)); - - // Check if the entry is the smi we are looking for. - Register probe = mask; - __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); - __ cmp(object, probe); - __ b(ne, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ ldr(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(&Counters::number_to_string_native, - 1, - scratch1, - scratch2); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ ldr(r1, MemOperand(sp, 0)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); - __ add(sp, sp, Operand(1 * kPointerSize)); - __ Ret(); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - -void RecordWriteStub::Generate(MacroAssembler* masm) { - __ add(offset_, object_, Operand(offset_)); - __ RecordWriteHelper(object_, offset_, scratch_); - __ Ret(); -} - - -// On entry lhs_ and rhs_ are the values to be compared. -// On exit r0 is 0, positive or negative to indicate the result of -// the comparison. -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - - Label slow; // Call builtin. - Label not_smis, both_loaded_as_doubles, lhs_not_nan; - - // NOTICE! This code is only reached after a smi-fast-case check, so - // it is certain that at least one operand isn't a smi. - - // Handle the case where the objects are identical. Either returns the answer - // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); - - // If either is a Smi (we know that not both are), then they can only - // be strictly equal if the other is a HeapNumber. - STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs_, Operand(rhs_)); - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, ¬_smis); - // One operand is a smi. EmitSmiNonsmiComparison generates code that can: - // 1) Return the answer. - // 2) Go to slow. - // 3) Fall through to both_loaded_as_doubles. - // 4) Jump to lhs_not_nan. - // In cases 3 and 4 we have found out we were dealing with a number-number - // comparison. If VFP3 is supported the double values of the numbers have - // been loaded into d7 and d6. Otherwise, the double values have been loaded - // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); - - __ bind(&both_loaded_as_doubles); - // The arguments have been converted to doubles and stored in d6 and d7, if - // VFP3 is supported, or in r0, r1, r2, and r3. - if (CpuFeatures::IsSupported(VFP3)) { - __ bind(&lhs_not_nan); - CpuFeatures::Scope scope(VFP3); - Label no_nan; - // ARMv7 VFP3 instructions to implement double precision comparison. - __ vcmp(d7, d6); - __ vmrs(pc); // Move vector status bits to normal status bits. - Label nan; - __ b(vs, &nan); - __ mov(r0, Operand(EQUAL), LeaveCC, eq); - __ mov(r0, Operand(LESS), LeaveCC, lt); - __ mov(r0, Operand(GREATER), LeaveCC, gt); - __ Ret(); - - __ bind(&nan); - // If one of the sides was a NaN then the v flag is set. Load r0 with - // whatever it takes to make the comparison fail, since comparisons with NaN - // always fail. - if (cc_ == lt || cc_ == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - } else { - // Checks for NaN in the doubles we have loaded. Can return the answer or - // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc_); - // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the - // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); - } - - __ bind(¬_smis); - // At this point we know we are dealing with two different objects, - // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict_) { - // This returns non-equal for some object types, or falls through if it - // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); - } - - Label check_for_symbols; - Label flat_string_check; - // Check for heap-number-heap-number comparison. Can jump to slow case, - // or load both doubles into r0, r1, r2, r3 and jump to the code that handles - // that case. If the inputs are not doubles then jumps to check_for_symbols. - // In this case r2 will contain the type of rhs_. Never falls through. - EmitCheckForTwoHeapNumbers(masm, - lhs_, - rhs_, - &both_loaded_as_doubles, - &check_for_symbols, - &flat_string_check); - - __ bind(&check_for_symbols); - // In the strict case the EmitStrictTwoHeapObjectCompare already took care of - // symbols. - if (cc_ == eq && !strict_) { - // Returns an answer for two symbols or two detectable objects. - // Otherwise jumps to string case or not both strings case. - // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); - } - - // Check for both being sequential ASCII strings, and inline if that is the - // case. - __ bind(&flat_string_check); - - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); - - __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, - r2, - r3, - r4, - r5); - // Never falls through to here. - - __ bind(&slow); - - __ Push(lhs_, rhs_); - // Figure out which native to call and setup the arguments. - Builtins::JavaScript native; - if (cc_ == eq) { - native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - native = Builtins::COMPARE; - int ncr; // NaN compare result - if (cc_ == lt || cc_ == le) { - ncr = GREATER; - } else { - ASSERT(cc_ == gt || cc_ == ge); // remaining cases - ncr = LESS; - } - __ mov(r0, Operand(Smi::FromInt(ncr))); - __ push(r0); - } - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(native, JUMP_JS); -} - - -// This stub does not handle the inlined cases (Smis, Booleans, undefined). -// The stub returns zero for false, and a non-zero value for true. -void ToBooleanStub::Generate(MacroAssembler* masm) { - Label false_result; - Label not_heap_number; - Register scratch0 = VirtualFrame::scratch0(); - - // HeapNumber => false iff +0, -0, or NaN. - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch0, ip); - __ b(¬_heap_number, ne); - - __ sub(ip, tos_, Operand(kHeapObjectTag)); - __ vldr(d1, ip, HeapNumber::kValueOffset); - __ vcmp(d1, 0.0); - __ vmrs(pc); - // "tos_" is a register, and contains a non zero value by default. - // Hence we only need to overwrite "tos_" with zero to return false for - // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand(0), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand(0), LeaveCC, vs); // for FP_NAN - __ Ret(); - - __ bind(¬_heap_number); - - // Check if the value is 'null'. - // 'null' => false. - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(tos_, ip); - __ b(&false_result, eq); - - // It can be an undetectable object. - // Undetectable => false. - __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(ip, Map::kBitFieldOffset)); - __ and_(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); - __ cmp(scratch0, Operand(1 << Map::kIsUndetectable)); - __ b(&false_result, eq); - - // JavaScript object => true. - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(gt); - - // Check for string - __ ldr(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); - __ ldrb(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); - __ cmp(scratch0, Operand(FIRST_NONSTRING_TYPE)); - // "tos_" is a register and contains a non-zero value. - // Hence we implicitly return true if the greater than - // condition is satisfied. - __ Ret(gt); - - // String value => false iff empty, i.e., length is zero - __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); - // If length is zero, "tos_" contains zero ==> false. - // If length is not zero, "tos_" contains a non-zero value ==> true. - __ Ret(); - - // Return 0 in "tos_" for false . - __ bind(&false_result); - __ mov(tos_, Operand(0)); - __ Ret(); -} - - -// We fall into this code if the operands were Smis, but the result was -// not (eg. overflow). We branch into this code (to the not_smi label) if -// the operands were not both Smi. The operands are in r0 and r1. In order -// to call the C-implemented binary fp operation routines we need to end up -// with the double precision floating point operands in r0 and r1 (for the -// value in r1) and r2 and r3 (for the value in r0). -void GenericBinaryOpStub::HandleBinaryOpSlowCases( - MacroAssembler* masm, - Label* not_smi, - Register lhs, - Register rhs, - const Builtins::JavaScript& builtin) { - Label slow, slow_reverse, do_the_call; - bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; - - ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); - Register heap_number_map = r6; - - if (ShouldGenerateSmiCode()) { - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - // Smi-smi case (overflow). - // Since both are Smis there is no heap number to overwrite, so allocate. - // The new heap number is in r5. r3 and r7 are scratch. - __ AllocateHeapNumber( - r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); - - // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, - // using registers d7 and d6 for the double values. - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - if (!use_fp_registers) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } - } else { - // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. - __ mov(r7, Operand(rhs)); - ConvertToDoubleStub stub1(r3, r2, r7, r9); - __ push(lr); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. - __ mov(r7, Operand(lhs)); - ConvertToDoubleStub stub2(r1, r0, r7, r9); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - __ jmp(&do_the_call); // Tail call. No return. - } - - // We branch here if at least one of r0 and r1 is not a Smi. - __ bind(not_smi); - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - // After this point we have the left hand side in r1 and the right hand side - // in r0. - if (lhs.is(r0)) { - __ Swap(r0, r1, ip); - } - - // The type transition also calculates the answer. - bool generate_code_to_calculate_answer = true; - - if (ShouldGenerateFPCode()) { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - GenerateTypeTransition(masm); // Tail call. - generate_code_to_calculate_answer = false; - break; - - default: - break; - } - } - - if (generate_code_to_calculate_answer) { - Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; - if (mode_ == NO_OVERWRITE) { - // In the case where there is no chance of an overwritable float we may - // as well do the allocation immediately while r0 and r1 are untouched. - __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); - } - - // Move r0 to a double in r2-r3. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - if (mode_ == OVERWRITE_RIGHT) { - __ mov(r5, Operand(r0)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r0 to d7. - __ sub(r7, r0, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that second double is in r2 and r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - } - __ jmp(&finished_loading_r0); - __ bind(&r0_is_smi); - if (mode_ == OVERWRITE_RIGHT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r0 to double in d7. - __ mov(r7, Operand(r0, ASR, kSmiTagSize)); - __ vmov(s15, r7); - __ vcvt_f64_s32(d7, s15); - if (!use_fp_registers) { - __ vmov(r2, r3, d7); - } - } else { - // Write Smi from r0 to r3 and r2 in double format. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub3(r3, r2, r7, r4); - __ push(lr); - __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. - // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. - Label r1_is_not_smi; - if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { - __ tst(r1, Operand(kSmiTagMask)); - __ b(ne, &r1_is_not_smi); - GenerateTypeTransition(masm); // Tail call. - } - - __ bind(&finished_loading_r0); - - // Move r1 to a double in r0-r1. - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. - __ bind(&r1_is_not_smi); - __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - if (mode_ == OVERWRITE_LEFT) { - __ mov(r5, Operand(r1)); // Overwrite this heap number. - } - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // Load the double from tagged HeapNumber r1 to d6. - __ sub(r7, r1, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - } else { - // Calling convention says that first double is in r0 and r1. - __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); - } - __ jmp(&finished_loading_r1); - __ bind(&r1_is_smi); - if (mode_ == OVERWRITE_LEFT) { - // We can't overwrite a Smi so get address of new heap number into r5. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // Convert smi in r1 to double in d6. - __ mov(r7, Operand(r1, ASR, kSmiTagSize)); - __ vmov(s13, r7); - __ vcvt_f64_s32(d6, s13); - if (!use_fp_registers) { - __ vmov(r0, r1, d6); - } - } else { - // Write Smi from r1 to r1 and r0 in double format. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub4(r1, r0, r7, r9); - __ push(lr); - __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - - __ bind(&finished_loading_r1); - } - - if (generate_code_to_calculate_answer || do_the_call.is_linked()) { - __ bind(&do_the_call); - // If we are inlining the operation using VFP3 instructions for - // add, subtract, multiply, or divide, the arguments are in d6 and d7. - if (use_fp_registers) { - CpuFeatures::Scope scope(VFP3); - // ARMv7 VFP3 instructions to implement - // double precision, add, subtract, multiply, divide. - - if (Token::MUL == op_) { - __ vmul(d5, d6, d7); - } else if (Token::DIV == op_) { - __ vdiv(d5, d6, d7); - } else if (Token::ADD == op_) { - __ vadd(d5, d6, d7); - } else if (Token::SUB == op_) { - __ vsub(d5, d6, d7); - } else { - UNREACHABLE(); - } - __ sub(r0, r5, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ add(r0, r0, Operand(kHeapObjectTag)); - __ mov(pc, lr); - } else { - // If we did not inline the operation, then the arguments are in: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - // r5: Address of heap number for result. - - __ push(lr); // For later. - __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. - // Call C routine that may not cause GC or other trouble. r5 is callee - // save. - __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); - // Store answer in the overwritable heap number. - #if !defined(USE_ARM_EABI) - // Double returned in fp coprocessor register 0 and 1, encoded as - // register cr8. Offsets must be divisible by 4 for coprocessor so we - // need to substract the tag from r5. - __ sub(r4, r5, Operand(kHeapObjectTag)); - __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); - #else - // Double returned in registers 0 and 1. - __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); - #endif - __ mov(r0, Operand(r5)); - // And we are done. - __ pop(pc); - } - } - } - - if (!generate_code_to_calculate_answer && - !slow_reverse.is_linked() && - !slow.is_linked()) { - return; - } - - if (lhs.is(r0)) { - __ b(&slow); - __ bind(&slow_reverse); - __ Swap(r0, r1, ip); - } - - heap_number_map = no_reg; // Don't use this any more from here on. - - // We jump to here if something goes wrong (one param is not a number of any - // sort or new-space allocation fails). - __ bind(&slow); - - // Push arguments to the stack - __ Push(r1, r0); - - if (Token::ADD == op_) { - // Test for string arguments before calling runtime. - // r1 : first argument - // r0 : second argument - // sp[0] : second argument - // sp[4] : first argument - - Label not_strings, not_string1, string1, string1_smi2; - __ tst(r1, Operand(kSmiTagMask)); - __ b(eq, ¬_string1); - __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, ¬_string1); - - // First argument is a a string, test second. - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &string1_smi2); - __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &string1); - - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, r0, r2, r4, r5, r6, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ str(r2, MemOperand(sp, 0)); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); - - // First argument was not a string, test second. - __ bind(¬_string1); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, ¬_strings); - __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); - - __ bind(¬_strings); - } - - __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. -} - - -// Tries to get a signed int32 out of a double precision floating point heap -// number. Rounds towards 0. Fastest for doubles that are in the ranges -// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds -// almost to the range of signed int32 values that are not Smis. Jumps to the -// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 -// (excluding the endpoints). -static void GetInt32(MacroAssembler* masm, - Register source, - Register dest, - Register scratch, - Register scratch2, - Label* slow) { - Label right_exponent, done; - // Get exponent word. - __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); - // Get exponent alone in scratch2. - __ Ubfx(scratch2, - scratch, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // Load dest with zero. We use this either for the final shift or - // for the answer. - __ mov(dest, Operand(0)); - // Check whether the exponent matches a 32 bit signed int that is not a Smi. - // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is - // the exponent that we are fastest at and also the highest exponent we can - // handle here. - const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; - // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we - // split it up to avoid a constant pool entry. You can't do that in general - // for cmp because of the overflow flag, but we know the exponent is in the - // range 0-2047 so there is no overflow. - int fudge_factor = 0x400; - __ sub(scratch2, scratch2, Operand(fudge_factor)); - __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); - // If we have a match of the int32-but-not-Smi exponent then skip some logic. - __ b(eq, &right_exponent); - // If the exponent is higher than that then go to slow case. This catches - // numbers that don't fit in a signed int32, infinities and NaNs. - __ b(gt, slow); - - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie - // it rounds to zero. - const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; - __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); - // Dest already has a Smi zero. - __ b(lt, &done); - if (!CpuFeatures::IsSupported(VFP3)) { - // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to - // get how much to shift down. - __ rsb(dest, scratch2, Operand(30)); - } - __ bind(&right_exponent); - if (CpuFeatures::IsSupported(VFP3)) { - CpuFeatures::Scope scope(VFP3); - // ARMv7 VFP3 instructions implementing double precision to integer - // conversion using round to zero. - __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - __ vmov(d7, scratch2, scratch); - __ vcvt_s32_f64(s15, d7); - __ vmov(dest, s15); - } else { - // Get the top bits of the mantissa. - __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); - // Put back the implicit 1. - __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We just orred in the implicit bit so that took care of one and - // we want to leave the sign bit 0 so we subtract 2 bits from the shift - // distance. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); - // Put sign in zero flag. - __ tst(scratch, Operand(HeapNumber::kSignMask)); - // Get the second half of the double. For some exponents we don't - // actually need this because the bits get shifted out again, but - // it's probably slower to test than just to do it. - __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the last 10 bits. - __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); - // Move down according to the exponent. - __ mov(dest, Operand(scratch, LSR, dest)); - // Fix sign if sign bit was set. - __ rsb(dest, dest, Operand(0), LeaveCC, ne); - } - __ bind(&done); -} - -// For bitwise ops where the inputs are not both Smis we here try to determine -// whether both inputs are either Smis or at least heap numbers that can be -// represented by a 32 bit signed value. We truncate towards zero as required -// by the ES spec. If this is the case we do the bitwise op and see if the -// result is a Smi. If so, great, otherwise we try to find a heap number to -// write the answer into (either by allocating or by overwriting). -// On entry the operands are in lhs and rhs. On exit the answer is in r0. -void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, - Register lhs, - Register rhs) { - Label slow, result_not_a_smi; - Label rhs_is_smi, lhs_is_smi; - Label done_checking_rhs, done_checking_lhs; - - Register heap_number_map = r6; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - __ tst(lhs, Operand(kSmiTagMask)); - __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - GetInt32(masm, lhs, r3, r5, r4, &slow); - __ jmp(&done_checking_lhs); - __ bind(&lhs_is_smi); - __ mov(r3, Operand(lhs, ASR, 1)); - __ bind(&done_checking_lhs); - - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. - __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); - __ cmp(r4, heap_number_map); - __ b(ne, &slow); - GetInt32(masm, rhs, r2, r5, r4, &slow); - __ jmp(&done_checking_rhs); - __ bind(&rhs_is_smi); - __ mov(r2, Operand(rhs, ASR, 1)); - __ bind(&done_checking_rhs); - - ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); - - // r0 and r1: Original operands (Smi or heap numbers). - // r2 and r3: Signed int32 operands. - switch (op_) { - case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; - case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; - case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; - case Token::SAR: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, ASR, r2)); - break; - case Token::SHR: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSR, r2), SetCC); - // SHR is special because it is required to produce a positive answer. - // The code below for writing into heap numbers isn't capable of writing - // the register as an unsigned int so we go to slow case if we hit this - // case. - if (CpuFeatures::IsSupported(VFP3)) { - __ b(mi, &result_not_a_smi); - } else { - __ b(mi, &slow); - } - break; - case Token::SHL: - // Use only the 5 least significant bits of the shift count. - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSL, r2)); - break; - default: UNREACHABLE(); - } - // check that the *signed* result fits in a smi - __ add(r3, r2, Operand(0x40000000), SetCC); - __ b(mi, &result_not_a_smi); - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); - __ Ret(); - - Label have_to_allocate, got_a_heap_number; - __ bind(&result_not_a_smi); - switch (mode_) { - case OVERWRITE_RIGHT: { - __ tst(rhs, Operand(kSmiTagMask)); - __ b(eq, &have_to_allocate); - __ mov(r5, Operand(rhs)); - break; - } - case OVERWRITE_LEFT: { - __ tst(lhs, Operand(kSmiTagMask)); - __ b(eq, &have_to_allocate); - __ mov(r5, Operand(lhs)); - break; - } - case NO_OVERWRITE: { - // Get a new heap number in r5. r4 and r7 are scratch. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - } - default: break; - } - __ bind(&got_a_heap_number); - // r2: Answer as signed int32. - // r5: Heap number to write answer into. - - // Nothing can go wrong now, so move the heap number to r0, which is the - // result. - __ mov(r0, Operand(r5)); - - if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r2); - if (op_ == Token::SHR) { - __ vcvt_f64_u32(d0, s0); - } else { - __ vcvt_f64_s32(d0, s0); - } - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); - } else { - // Tail call that writes the int32 in r2 to the heap number in r0, using - // r3 as scratch. r0 is preserved and returned. - WriteInt32ToHeapNumberStub stub(r2, r0, r3); - __ TailCallStub(&stub); - } - - if (mode_ != NO_OVERWRITE) { - __ bind(&have_to_allocate); - // Get a new heap number in r5. r4 and r7 are scratch. - __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); - __ jmp(&got_a_heap_number); - } - - // If all else failed then we go to the runtime system. - __ bind(&slow); - __ Push(lhs, rhs); // Restore stack. - switch (op_) { - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_JS); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_JS); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_JS); - break; - default: - UNREACHABLE(); - } -} - - -// Can we multiply by x with max two shifts and an add. -// This answers yes to all integers from 2 to 10. -static bool IsEasyToMultiplyBy(int x) { - if (x < 2) return false; // Avoid special cases. - if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. - if (IsPowerOf2(x)) return true; // Simple shift. - if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. - if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. - return false; -} - - -// Can multiply by anything that IsEasyToMultiplyBy returns true for. -// Source and destination may be the same register. This routine does -// not set carry and overflow the way a mul instruction would. -static void MultiplyByKnownInt(MacroAssembler* masm, - Register source, - Register destination, - int known_int) { - if (IsPowerOf2(known_int)) { - __ mov(destination, Operand(source, LSL, BitPosition(known_int))); - } else if (PopCountLessThanEqual2(known_int)) { - int first_bit = BitPosition(known_int); - int second_bit = BitPosition(known_int ^ (1 << first_bit)); - __ add(destination, source, Operand(source, LSL, second_bit - first_bit)); - if (first_bit != 0) { - __ mov(destination, Operand(destination, LSL, first_bit)); - } - } else { - ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. - int the_bit = BitPosition(known_int + 1); - __ rsb(destination, source, Operand(source, LSL, the_bit)); - } -} - - -// This function (as opposed to MultiplyByKnownInt) takes the known int in a -// a register for the cases where it doesn't know a good trick, and may deliver -// a result that needs shifting. -static void MultiplyByKnownInt2( - MacroAssembler* masm, - Register result, - Register source, - Register known_int_register, // Smi tagged. - int known_int, - int* required_shift) { // Including Smi tag shift - switch (known_int) { - case 3: - __ add(result, source, Operand(source, LSL, 1)); - *required_shift = 1; - break; - case 5: - __ add(result, source, Operand(source, LSL, 2)); - *required_shift = 1; - break; - case 6: - __ add(result, source, Operand(source, LSL, 1)); - *required_shift = 2; - break; - case 7: - __ rsb(result, source, Operand(source, LSL, 3)); - *required_shift = 1; - break; - case 9: - __ add(result, source, Operand(source, LSL, 3)); - *required_shift = 1; - break; - case 10: - __ add(result, source, Operand(source, LSL, 2)); - *required_shift = 2; - break; - default: - ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. - __ mul(result, source, known_int_register); - *required_shift = 0; - } -} - - -// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 -// trick. See http://en.wikipedia.org/wiki/Divisibility_rule -// Takes the sum of the digits base (mask + 1) repeatedly until we have a -// number from 0 to mask. On exit the 'eq' condition flags are set if the -// answer is exactly the mask. -void IntegerModStub::DigitSum(MacroAssembler* masm, - Register lhs, - int mask, - int shift, - Label* entry) { - ASSERT(mask > 0); - ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. - Label loop; - __ bind(&loop); - __ and_(ip, lhs, Operand(mask)); - __ add(lhs, ip, Operand(lhs, LSR, shift)); - __ bind(entry); - __ cmp(lhs, Operand(mask)); - __ b(gt, &loop); -} - - -void IntegerModStub::DigitSum(MacroAssembler* masm, - Register lhs, - Register scratch, - int mask, - int shift1, - int shift2, - Label* entry) { - ASSERT(mask > 0); - ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. - Label loop; - __ bind(&loop); - __ bic(scratch, lhs, Operand(mask)); - __ and_(ip, lhs, Operand(mask)); - __ add(lhs, ip, Operand(lhs, LSR, shift1)); - __ add(lhs, lhs, Operand(scratch, LSR, shift2)); - __ bind(entry); - __ cmp(lhs, Operand(mask)); - __ b(gt, &loop); -} - - -// Splits the number into two halves (bottom half has shift bits). The top -// half is subtracted from the bottom half. If the result is negative then -// rhs is added. -void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, - Register lhs, - int shift, - int rhs) { - int mask = (1 << shift) - 1; - __ and_(ip, lhs, Operand(mask)); - __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); - __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); -} - - -void IntegerModStub::ModReduce(MacroAssembler* masm, - Register lhs, - int max, - int denominator) { - int limit = denominator; - while (limit * 2 <= max) limit *= 2; - while (limit >= denominator) { - __ cmp(lhs, Operand(limit)); - __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); - limit >>= 1; - } -} - - -void IntegerModStub::ModAnswer(MacroAssembler* masm, - Register result, - Register shift_distance, - Register mask_bits, - Register sum_of_digits) { - __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); - __ Ret(); -} - - -// See comment for class. -void IntegerModStub::Generate(MacroAssembler* masm) { - __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); - __ bic(odd_number_, odd_number_, Operand(1)); - __ mov(odd_number_, Operand(odd_number_, LSL, 1)); - // We now have (odd_number_ - 1) * 2 in the register. - // Build a switch out of branches instead of data because it avoids - // having to teach the assembler about intra-code-object pointers - // that are not in relative branch instructions. - Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; - Label mod21, mod23, mod25; - { Assembler::BlockConstPoolScope block_const_pool(masm); - __ add(pc, pc, Operand(odd_number_)); - // When you read pc it is always 8 ahead, but when you write it you always - // write the actual value. So we put in two nops to take up the slack. - __ nop(); - __ nop(); - __ b(&mod3); - __ b(&mod5); - __ b(&mod7); - __ b(&mod9); - __ b(&mod11); - __ b(&mod13); - __ b(&mod15); - __ b(&mod17); - __ b(&mod19); - __ b(&mod21); - __ b(&mod23); - __ b(&mod25); - } - - // For each denominator we find a multiple that is almost only ones - // when expressed in binary. Then we do the sum-of-digits trick for - // that number. If the multiple is not 1 then we have to do a little - // more work afterwards to get the answer into the 0-denominator-1 - // range. - DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. - __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. - ModGetInRangeBySubtraction(masm, lhs_, 2, 5); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. - __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. - ModGetInRangeBySubtraction(masm, lhs_, 3, 9); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. - ModReduce(masm, lhs_, 0x3f, 11); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. - ModReduce(masm, lhs_, 0xff, 13); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. - __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. - ModGetInRangeBySubtraction(masm, lhs_, 4, 17); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. - ModReduce(masm, lhs_, 0xff, 19); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. - ModReduce(masm, lhs_, 0x3f, 21); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. - ModReduce(masm, lhs_, 0xff, 23); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); - - DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. - ModReduce(masm, lhs_, 0x7f, 25); - ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); -} - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int len = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(len); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - - OS::SNPrintF(Vector(name_, len), - "GenericBinaryOpStub_%s_%s%s_%s", - op_name, - overwrite_name, - specialized_on_rhs_ ? "_ConstantRhs" : "", - BinaryOpIC::GetName(runtime_operands_type_)); - return name_; -} - - - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - // lhs_ : x - // rhs_ : y - // r0 : result - - Register result = r0; - Register lhs = lhs_; - Register rhs = rhs_; - - // This code can't cope with other register allocations yet. - ASSERT(result.is(r0) && - ((lhs.is(r0) && rhs.is(r1)) || - (lhs.is(r1) && rhs.is(r0)))); - - Register smi_test_reg = VirtualFrame::scratch0(); - Register scratch = VirtualFrame::scratch1(); - - // All ops need to know whether we are dealing with two Smis. Set up - // smi_test_reg to tell us that. - if (ShouldGenerateSmiCode()) { - __ orr(smi_test_reg, lhs, Operand(rhs)); - } - - switch (op_) { - case Token::ADD: { - Label not_smi; - // Fast path. - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r0, Operand(r1)); // Revert optimistic add. - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); - break; - } - - case Token::SUB: { - Label not_smi; - // Fast path. - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, ¬_smi); - if (lhs.is(r1)) { - __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. - // Return if no overflow. - __ Ret(vc); - __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. - } else { - __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. - // Return if no overflow. - __ Ret(vc); - __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. - } - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); - break; - } - - case Token::MUL: { - Label not_smi, slow; - if (ShouldGenerateSmiCode()) { - STATIC_ASSERT(kSmiTag == 0); // adjust code below - __ tst(smi_test_reg, Operand(kSmiTagMask)); - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - __ b(ne, ¬_smi); - // Remove tag from one operand (but keep sign), so that result is Smi. - __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); - // Do multiplication - // scratch = lower 32 bits of ip * lhs. - __ smull(scratch, scratch2, lhs, ip); - // Go slow on overflows (overflow bit is not set). - __ mov(ip, Operand(scratch, ASR, 31)); - // No overflow if higher 33 bits are identical. - __ cmp(ip, Operand(scratch2)); - __ b(ne, &slow); - // Go slow on zero result to handle -0. - __ tst(scratch, Operand(scratch)); - __ mov(result, Operand(scratch), LeaveCC, ne); - __ Ret(ne); - // We need -0 if we were multiplying a negative number with 0 to get 0. - // We know one of them was zero. - __ add(scratch2, rhs, Operand(lhs), SetCC); - __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); - __ Ret(pl); // Return Smi 0 if the non-zero one was positive. - // Slow case. We fall through here if we multiplied a negative number - // with 0, because that would mean we should produce -0. - __ bind(&slow); - } - HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); - break; - } - - case Token::DIV: - case Token::MOD: { - Label not_smi; - if (ShouldGenerateSmiCode() && specialized_on_rhs_) { - Label lhs_is_unsuitable; - __ BranchOnNotSmi(lhs, ¬_smi); - if (IsPowerOf2(constant_rhs_)) { - if (op_ == Token::MOD) { - __ and_(rhs, - lhs, - Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), - SetCC); - // We now have the answer, but if the input was negative we also - // have the sign bit. Our work is done if the result is - // positive or zero: - if (!rhs.is(r0)) { - __ mov(r0, rhs, LeaveCC, pl); - } - __ Ret(pl); - // A mod of a negative left hand side must return a negative number. - // Unfortunately if the answer is 0 then we must return -0. And we - // already optimistically trashed rhs so we may need to restore it. - __ eor(rhs, rhs, Operand(0x80000000u), SetCC); - // Next two instructions are conditional on the answer being -0. - __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); - __ b(eq, &lhs_is_unsuitable); - // We need to subtract the dividend. Eg. -3 % 4 == -3. - __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); - } else { - ASSERT(op_ == Token::DIV); - __ tst(lhs, - Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); - __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. - int shift = 0; - int d = constant_rhs_; - while ((d & 1) == 0) { - d >>= 1; - shift++; - } - __ mov(r0, Operand(lhs, LSR, shift)); - __ bic(r0, r0, Operand(kSmiTagMask)); - } - } else { - // Not a power of 2. - __ tst(lhs, Operand(0x80000000u)); - __ b(ne, &lhs_is_unsuitable); - // Find a fixed point reciprocal of the divisor so we can divide by - // multiplying. - double divisor = 1.0 / constant_rhs_; - int shift = 32; - double scale = 4294967296.0; // 1 << 32. - uint32_t mul; - // Maximise the precision of the fixed point reciprocal. - while (true) { - mul = static_cast(scale * divisor); - if (mul >= 0x7fffffff) break; - scale *= 2.0; - shift++; - } - mul++; - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - __ mov(scratch2, Operand(mul)); - __ umull(scratch, scratch2, scratch2, lhs); - __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); - // scratch2 is lhs / rhs. scratch2 is not Smi tagged. - // rhs is still the known rhs. rhs is Smi tagged. - // lhs is still the unkown lhs. lhs is Smi tagged. - int required_scratch_shift = 0; // Including the Smi tag shift of 1. - // scratch = scratch2 * rhs. - MultiplyByKnownInt2(masm, - scratch, - scratch2, - rhs, - constant_rhs_, - &required_scratch_shift); - // scratch << required_scratch_shift is now the Smi tagged rhs * - // (lhs / rhs) where / indicates integer division. - if (op_ == Token::DIV) { - __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); - __ b(ne, &lhs_is_unsuitable); // There was a remainder. - __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); - } else { - ASSERT(op_ == Token::MOD); - __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); - } - } - __ Ret(); - __ bind(&lhs_is_unsuitable); - } else if (op_ == Token::MOD && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS) { - // Do generate a bit of smi code for modulus even though the default for - // modulus is not to do it, but as the ARM processor has no coprocessor - // support for modulus checking for smis makes sense. We can handle - // 1 to 25 times any power of 2. This covers over half the numbers from - // 1 to 100 including all of the first 25. (Actually the constants < 10 - // are handled above by reciprocal multiplication. We only get here for - // those cases if the right hand side is not a constant or for cases - // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod - // stub.) - Label slow; - Label not_power_of_2; - ASSERT(!ShouldGenerateSmiCode()); - STATIC_ASSERT(kSmiTag == 0); // Adjust code below. - // Check for two positive smis. - __ orr(smi_test_reg, lhs, Operand(rhs)); - __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); - __ b(ne, &slow); - // Check that rhs is a power of two and not zero. - Register mask_bits = r3; - __ sub(scratch, rhs, Operand(1), SetCC); - __ b(mi, &slow); - __ and_(mask_bits, rhs, Operand(scratch), SetCC); - __ b(ne, ¬_power_of_2); - // Calculate power of two modulus. - __ and_(result, lhs, Operand(scratch)); - __ Ret(); - - __ bind(¬_power_of_2); - __ eor(scratch, scratch, Operand(mask_bits)); - // At least two bits are set in the modulus. The high one(s) are in - // mask_bits and the low one is scratch + 1. - __ and_(mask_bits, scratch, Operand(lhs)); - Register shift_distance = scratch; - scratch = no_reg; - - // The rhs consists of a power of 2 multiplied by some odd number. - // The power-of-2 part we handle by putting the corresponding bits - // from the lhs in the mask_bits register, and the power in the - // shift_distance register. Shift distance is never 0 due to Smi - // tagging. - __ CountLeadingZeros(r4, shift_distance, shift_distance); - __ rsb(shift_distance, r4, Operand(32)); - - // Now we need to find out what the odd number is. The last bit is - // always 1. - Register odd_number = r4; - __ mov(odd_number, Operand(rhs, LSR, shift_distance)); - __ cmp(odd_number, Operand(25)); - __ b(gt, &slow); - - IntegerModStub stub( - result, shift_distance, odd_number, mask_bits, lhs, r5); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. - - __ bind(&slow); - } - HandleBinaryOpSlowCases( - masm, - ¬_smi, - lhs, - rhs, - op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); - break; - } - - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - Label slow; - STATIC_ASSERT(kSmiTag == 0); // adjust code below - __ tst(smi_test_reg, Operand(kSmiTagMask)); - __ b(ne, &slow); - Register scratch2 = smi_test_reg; - smi_test_reg = no_reg; - switch (op_) { - case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; - case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; - case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; - case Token::SAR: - // Remove tags from right operand. - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(result, Operand(lhs, ASR, scratch2)); - // Smi tag result. - __ bic(result, result, Operand(kSmiTagMask)); - break; - case Token::SHR: - // Remove tags from operands. We can't do this on a 31 bit number - // because then the 0s get shifted into bit 30 instead of bit 31. - __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(scratch, Operand(scratch, LSR, scratch2)); - // Unsigned shift is not allowed to produce a negative number, so - // check the sign bit and the sign bit after Smi tagging. - __ tst(scratch, Operand(0xc0000000)); - __ b(ne, &slow); - // Smi tag result. - __ mov(result, Operand(scratch, LSL, kSmiTagSize)); - break; - case Token::SHL: - // Remove tags from operands. - __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x - __ GetLeastBitsFromSmi(scratch2, rhs, 5); - __ mov(scratch, Operand(scratch, LSL, scratch2)); - // Check that the signed result fits in a Smi. - __ add(scratch2, scratch, Operand(0x40000000), SetCC); - __ b(mi, &slow); - __ mov(result, Operand(scratch, LSL, kSmiTagSize)); - break; - default: UNREACHABLE(); - } - __ Ret(); - __ bind(&slow); - HandleNonSmiBitwiseOp(masm, lhs, rhs); - break; - } - - default: UNREACHABLE(); - } - // This code should be unreachable. - __ stop("Unreachable"); - - // Generate an unreachable reference to the DEFAULT stub so that it can be - // found at the end of this stub when clearing ICs at GC. - // TODO(kaznacheev): Check performance impact and get rid of this. - if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { - GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); - __ CallStub(&uninit); - } -} - - -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; - - __ Push(r1, r0); - - __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ mov(r1, Operand(Smi::FromInt(op_))); - __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); - __ Push(r2, r1, r0); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} - - -Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); -} - - -void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Argument is a number and is on stack and in r0. - Label runtime_call; - Label input_not_smi; - Label loaded; - - if (CpuFeatures::IsSupported(VFP3)) { - // Load argument and check if it is a smi. - __ BranchOnNotSmi(r0, &input_not_smi); - - CpuFeatures::Scope scope(VFP3); - // Input is a smi. Convert to double and load the low and high words - // of the double into r2, r3. - __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); - __ b(&loaded); - - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ CheckMap(r0, - r1, - Heap::kHeapNumberMapRootIndex, - &runtime_call, - true); - // Input is a HeapNumber. Load it to a double register and store the - // low and high words into r2, r3. - __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); - - __ bind(&loaded); - // r2 = low 32 bits of double value - // r3 = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ eor(r1, r2, Operand(r3)); - __ eor(r1, r1, Operand(r1, ASR, 16)); - __ eor(r1, r1, Operand(r1, ASR, 8)); - ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); - __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); - - // r2 = low 32 bits of double value. - // r3 = high 32 bits of double value. - // r1 = TranscendentalCache::hash(double value). - __ mov(r0, - Operand(ExternalReference::transcendental_cache_array_address())); - // r0 points to cache array. - __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); - // r0 points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(r0, Operand(0)); - __ b(eq, &runtime_call); - -#ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } -#endif - - // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. - __ add(r1, r1, Operand(r1, LSL, 1)); - __ add(r0, r0, Operand(r1, LSL, 2)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); - __ cmp(r2, r4); - __ b(ne, &runtime_call); - __ cmp(r3, r5); - __ b(ne, &runtime_call); - // Cache hit. Load result, pop argument and return. - __ mov(r0, Operand(r6)); - __ pop(); - __ Ret(); - } - - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); -} - - -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { - switch (type_) { - // Add more cases when necessary. - case TranscendentalCache::SIN: return Runtime::kMath_sin; - case TranscendentalCache::COS: return Runtime::kMath_cos; - default: - UNIMPLEMENTED(); - return Runtime::kAbort; - } -} - - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Do tail-call to runtime routine. Runtime routines expect at least one - // argument, so give it a Smi. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); - - __ StubReturn(1); -} - - -void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; - - Register heap_number_map = r6; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &try_float); - - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - if (negative_zero_ == kStrictNegativeZero) { - // If we have to check for zero, then we can check for the max negative - // smi while we are at it. - __ bic(ip, r0, Operand(0x80000000), SetCC); - __ b(eq, &slow); - __ rsb(r0, r0, Operand(0)); - __ StubReturn(1); - } else { - // The value of the expression is a smi and 0 is OK for -0. Try - // optimistic subtraction '0 - value'. - __ rsb(r0, r0, Operand(0), SetCC); - __ StubReturn(1, vc); - // We don't have to reverse the optimistic neg since the only case - // where we fall through is the minimum negative Smi, which is the case - // where the neg leaves the register unchanged. - __ jmp(&slow); // Go slow on max negative Smi. - } - - __ bind(&try_float); - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, heap_number_map); - __ b(ne, &slow); - // r0 is a heap number. Get a new heap number in r1. - if (overwrite_ == UNARY_OVERWRITE) { - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - } else { - __ AllocateHeapNumber(r1, r2, r3, r6, &slow); - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); - __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. - __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); - __ mov(r0, Operand(r1)); - } - } else if (op_ == Token::BIT_NOT) { - // Check if the operand is a heap number. - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmp(r1, heap_number_map); - __ b(ne, &slow); - - // Convert the heap number is r0 to an untagged integer in r1. - GetInt32(masm, r0, r1, r2, r3, &slow); - - // Do the bitwise operation (move negated) and check if the result - // fits in a smi. - Label try_float; - __ mvn(r1, Operand(r1)); - __ add(r2, r1, Operand(0x40000000), SetCC); - __ b(mi, &try_float); - __ mov(r0, Operand(r1, LSL, kSmiTagSize)); - __ b(&done); - - __ bind(&try_float); - if (!overwrite_ == UNARY_OVERWRITE) { - // Allocate a fresh heap number, but don't overwrite r0 until - // we're sure we can do it without going through the slow case - // that needs the value in r0. - __ AllocateHeapNumber(r2, r3, r4, r6, &slow); - __ mov(r0, Operand(r2)); - } - - if (CpuFeatures::IsSupported(VFP3)) { - // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatures::Scope scope(VFP3); - __ vmov(s0, r1); - __ vcvt_f64_s32(d0, s0); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r2, HeapNumber::kValueOffset); - } else { - // WriteInt32ToHeapNumberStub does not trigger GC, so we do not - // have to set up a frame. - WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ push(lr); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); - } - } else { - UNIMPLEMENTED(); - } - - __ bind(&done); - __ StubReturn(1); - - // Handle the slow case by jumping to the JavaScript builtin. - __ bind(&slow); - __ push(r0); - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); - break; - default: - UNREACHABLE(); - } -} - - -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // r0 holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Restore the next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); - __ ldr(sp, MemOperand(r3)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ ldr(r2, MemOperand(sp, kStateOffset)); - __ cmp(r2, Operand(StackHandler::ENTRY)); - __ b(eq, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ ldr(sp, MemOperand(sp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(r2); - __ str(r2, MemOperand(r3)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(r0, Operand(false)); - __ mov(r2, Operand(external_caught)); - __ str(r0, MemOperand(r2)); - - // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ mov(r0, Operand(reinterpret_cast(out_of_memory))); - __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(r2)); - } - - // Stack layout at this point. See also StackHandlerConstants. - // sp -> state (ENTRY) - // fp - // lr - - // Discard handler state (r2 is not used) and restore frame pointer. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); - __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of a - // JS entry frame. - __ cmp(fp, Operand(0)); - // Set cp to NULL if fp is NULL. - __ mov(cp, Operand(0), LeaveCC, eq); - // Restore cp otherwise. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ pop(pc); -} - - -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate, - int frame_alignment_skew) { - // r0: result parameter for PerformGC, if any - // r4: number of arguments including receiver (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to the first argument (C callee-saved) - - if (do_gc) { - // Passing r0. - __ PrepareCallCFunction(1, r1); - __ CallCFunction(ExternalReference::perform_gc_function(), 1); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(); - if (always_allocate) { - __ mov(r0, Operand(scope_depth)); - __ ldr(r1, MemOperand(r0)); - __ add(r1, r1, Operand(1)); - __ str(r1, MemOperand(r0)); - } - - // Call C built-in. - // r0 = argc, r1 = argv - __ mov(r0, Operand(r4)); - __ mov(r1, Operand(r6)); - - int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - int frame_alignment_mask = frame_alignment - 1; -#if defined(V8_HOST_ARCH_ARM) - if (FLAG_debug_code) { - if (frame_alignment > kPointerSize) { - Label alignment_as_expected; - ASSERT(IsPowerOf2(frame_alignment)); - __ sub(r2, sp, Operand(frame_alignment_skew)); - __ tst(r2, Operand(frame_alignment_mask)); - __ b(eq, &alignment_as_expected); - // Don't use Check here, as it will call Runtime_Abort re-entering here. - __ stop("Unexpected alignment"); - __ bind(&alignment_as_expected); - } - } -#endif - - // Just before the call (jump) below lr is pushed, so the actual alignment is - // adding one to the current skew. - int alignment_before_call = - (frame_alignment_skew + kPointerSize) & frame_alignment_mask; - if (alignment_before_call > 0) { - // Push until the alignment before the call is met. - __ mov(r2, Operand(0)); - for (int i = alignment_before_call; - (i & frame_alignment_mask) != 0; - i += kPointerSize) { - __ push(r2); - } - } - - // TODO(1242173): To let the GC traverse the return address of the exit - // frames, we need to know where the return address is. Right now, - // we push it on the stack to be able to find it again, but we never - // restore from it in case of changes, which makes it impossible to - // support moving the C entry code stub. This should be fixed, but currently - // this is OK because the CEntryStub gets generated so early in the V8 boot - // sequence that it is not moving ever. - masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 - masm->push(lr); - masm->Jump(r5); - - // Restore sp back to before aligning the stack. - if (alignment_before_call > 0) { - __ add(sp, sp, Operand(alignment_before_call)); - } - - if (always_allocate) { - // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 - // though (contain the result). - __ mov(r2, Operand(scope_depth)); - __ ldr(r3, MemOperand(r2)); - __ sub(r3, r3, Operand(1)); - __ str(r3, MemOperand(r2)); - } - - // check for failure result - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - // Lower 2 bits of r2 are 0 iff r0 has failure tag. - __ add(r2, r0, Operand(1)); - __ tst(r2, Operand(kFailureTagMask)); - __ b(eq, &failure_returned); - - // Exit C frame and return. - // r0:r1: result - // sp: stack pointer - // fp: frame pointer - __ LeaveExitFrame(mode_); - - // check if we should retry or throw exception - Label retry; - __ bind(&failure_returned); - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ b(eq, &retry); - - // Special handling of out of memory exceptions. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ cmp(r0, Operand(reinterpret_cast(out_of_memory))); - __ b(eq, throw_out_of_memory_exception); - - // Retrieve the pending exception and clear the variable. - __ mov(ip, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r3, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ ldr(r0, MemOperand(ip)); - __ str(r3, MemOperand(ip)); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - __ cmp(r0, Operand(Factory::termination_exception())); - __ b(eq, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // Called from JavaScript; parameters are on stack as if calling JS function - // r0: number of arguments including receiver - // r1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - - // Result returned in r0 or r0+r1 by default. - - // NOTE: Invocations of builtins may return failure objects - // instead of a proper result. The builtin entry handles - // this by performing a garbage collection and retrying the - // builtin once. - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(mode_); - - // r4: number of arguments (C callee-saved) - // r5: pointer to builtin function (C callee-saved) - // r6: pointer to first argument (C callee-saved) - - Label throw_normal_exception; - Label throw_termination_exception; - Label throw_out_of_memory_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - false, - false, - -kPointerSize); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - false, - 0); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(r0, Operand(reinterpret_cast(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - true, - kPointerSize); - - __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); - - __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); - - __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); -} - - -void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // [sp+0]: argv - - Label invoke, exit; - - // Called from C, so do not pop argc and args on exit (preserve sp) - // No need to save register-passed args - // Save callee-saved registers (incl. cp and fp), sp, and lr - __ stm(db_w, sp, kCalleeSaved | lr.bit()); - - // Get address of argv, see stm above. - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize)); // argv - - // Push a frame with special values setup to mark it as an entry frame. - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // r4: argv - __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. - int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ mov(r7, Operand(Smi::FromInt(marker))); - __ mov(r6, Operand(Smi::FromInt(marker))); - __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); - __ ldr(r5, MemOperand(r5)); - __ Push(r8, r7, r6, r5); - - // Setup frame pointer for the frame to be pushed. - __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); - - // Call a faked try-block that does the invoke. - __ bl(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - // Coming in here the fp will be invalid because the PushTryHandler below - // sets it to 0 to signal the existence of the JSEntry frame. - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r0, MemOperand(ip)); - __ mov(r0, Operand(reinterpret_cast(Failure::Exception()))); - __ b(&exit); - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - // Must preserve r0-r4, r5-r7 are available. - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); - // If an exception not caught by another handler occurs, this handler - // returns control to the code after the bl(&invoke) above, which - // restores all kCalleeSaved registers (including cp and fp) to their - // saved values before returning a failure to C. - - // Clear any pending exceptions. - __ mov(ip, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r5, MemOperand(ip)); - __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); - __ str(r5, MemOperand(ip)); - - // Invoke the function by calling through JS entry trampoline builtin. - // Notice that we cannot store a reference to the trampoline code directly in - // this stub, because runtime stubs are not traversed when doing GC. - - // Expected registers by Builtins::JSEntryTrampoline - // r0: code entry - // r1: function - // r2: receiver - // r3: argc - // r4: argv - if (is_construct) { - ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); - __ mov(ip, Operand(construct_entry)); - } else { - ExternalReference entry(Builtins::JSEntryTrampoline); - __ mov(ip, Operand(entry)); - } - __ ldr(ip, MemOperand(ip)); // deref address - - // Branch and link to JSEntryTrampoline. We don't use the double underscore - // macro for the add instruction because we don't want the coverage tool - // inserting instructions here after we read the pc. - __ mov(lr, Operand(pc)); - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); - - // Unlink this frame from the handler chain. When reading the - // address of the next handler, there is no need to use the address - // displacement since the current stack pointer (sp) points directly - // to the stack handler. - __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); - __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); - __ str(r3, MemOperand(ip)); - // No need to restore registers - __ add(sp, sp, Operand(StackHandlerConstants::kSize)); - - - __ bind(&exit); // r0 holds result - // Restore the top frame descriptors from the stack. - __ pop(r3); - __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); - __ str(r3, MemOperand(ip)); - - // Reset the stack to the callee saved registers. - __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); - - // Restore callee-saved registers and return. -#ifdef DEBUG - if (FLAG_debug_code) { - __ mov(lr, Operand(pc)); - } -#endif - __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); -} - - -// This stub performs an instanceof, calling the builtin function if -// necessary. Uses r1 for the object, r0 for the function that it may -// be an instance of (these are fetched from the stack). -void InstanceofStub::Generate(MacroAssembler* masm) { - // Get the object - slow case for smis (we may need to throw an exception - // depending on the rhs). - Label slow, loop, is_instance, is_not_instance; - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); - __ BranchOnSmi(r0, &slow); - - // Check that the left hand is a JS object and put map in r3. - __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); - __ b(lt, &slow); - __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); - __ b(gt, &slow); - - // Get the prototype of the function (r4 is result, r2 is scratch). - __ ldr(r1, MemOperand(sp, 0)); - // r1 is function, r3 is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); - __ cmp(r1, ip); - __ b(ne, &miss); - __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); - __ cmp(r3, ip); - __ b(ne, &miss); - __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); - - __ bind(&miss); - __ TryGetFunctionPrototype(r1, r4, r2, &slow); - - // Check that the function prototype is a JS object. - __ BranchOnSmi(r4, &slow); - __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); - __ b(lt, &slow); - __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); - __ b(gt, &slow); - - __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex); - __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex); - - // Register mapping: r3 is object map and r4 is function prototype. - // Get prototype of object into r2. - __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - __ bind(&loop); - __ cmp(r2, Operand(r4)); - __ b(eq, &is_instance); - __ LoadRoot(ip, Heap::kNullValueRootIndex); - __ cmp(r2, ip); - __ b(eq, &is_not_instance); - __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ mov(r0, Operand(Smi::FromInt(0))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); // Return. - - __ bind(&is_not_instance); - __ mov(r0, Operand(Smi::FromInt(1))); - __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); - __ pop(); - __ pop(); - __ mov(pc, Operand(lr)); // Return. - - // Slow-case. Tail call builtin. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The displacement is the offset of the last parameter (if any) - // relative to the frame pointer. - static const int kDisplacement = - StandardFrameConstants::kCallerSPOffset - kPointerSize; - - // Check that the key is a smi. - Label slow; - __ BranchOnNotSmi(r1, &slow); - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); - __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor); - - // Check index against formal parameters count limit passed in - // through register r0. Use unsigned comparison to get negative - // check for free. - __ cmp(r1, r0); - __ b(cs, &slow); - - // Read the argument from the stack and return it. - __ sub(r3, r0, r1); - __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(r3, kDisplacement)); - __ Jump(lr); - - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(r1, r0); - __ b(cs, &slow); - - // Read the argument from the adaptor frame and return it. - __ sub(r3, r0, r1); - __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(r0, MemOperand(r3, kDisplacement)); - __ Jump(lr); - - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. - __ bind(&slow); - __ push(r1); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // sp[0] : number of parameters - // sp[4] : receiver displacement - // sp[8] : function - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); - __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ b(eq, &adaptor_frame); - - // Get the length from the frame. - __ ldr(r1, MemOperand(sp, 0)); - __ b(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ str(r1, MemOperand(sp, 0)); - __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); - __ str(r3, MemOperand(sp, 1 * kPointerSize)); - - // Try the new space allocation. Start out with computing the size - // of the arguments object and the elements array in words. - Label add_arguments_object; - __ bind(&try_allocate); - __ cmp(r1, Operand(0)); - __ b(eq, &add_arguments_object); - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); - __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); - __ bind(&add_arguments_object); - __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace( - r1, - r0, - r2, - r3, - &runtime, - static_cast(TAG_OBJECT | SIZE_IN_WORDS)); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); - __ ldr(r4, MemOperand(r4, offset)); - - // Copy the JS object part. - __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); - - // Setup the callee in-object property. - STATIC_ASSERT(Heap::arguments_callee_index == 0); - __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); - __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); - - // Get the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::arguments_length_index == 1); - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); - __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); - - // If there are no actual arguments, we're done. - Label done; - __ cmp(r1, Operand(0)); - __ b(eq, &done); - - // Get the parameters pointer from the stack. - __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); - - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); - __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); - __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); - __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); - __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); - __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop. - - // Copy the fixed array slots. - Label loop; - // Setup r4 to point to the first array slot. - __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ bind(&loop); - // Pre-decrement r2 with kPointerSize on each iteration. - // Pre-decrement in order to skip receiver. - __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); - // Post-increment r4 with kPointerSize on each iteration. - __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); - __ sub(r1, r1, Operand(1)); - __ cmp(r1, Operand(0)); - __ b(ne, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void RegExpExecStub::Generate(MacroAssembler* masm) { - // Just jump directly to runtime if native RegExp is not selected at compile - // time or if regexp entry in generated code is turned off runtime switch or - // at compilation. -#ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } - - // Stack frame on entry. - // sp[0]: last_match_info (expected JSArray) - // sp[4]: previous index - // sp[8]: subject string - // sp[12]: JSRegExp object - - static const int kLastMatchInfoOffset = 0 * kPointerSize; - static const int kPreviousIndexOffset = 1 * kPointerSize; - static const int kSubjectOffset = 2 * kPointerSize; - static const int kJSRegExpOffset = 3 * kPointerSize; - - Label runtime, invoke_regexp; - - // Allocation of registers for this function. These are in callee save - // registers and will be preserved by the call to the native RegExp code, as - // this code is called using the normal C calling convention. When calling - // directly from generated code the native RegExp code will not do a GC and - // therefore the content of these registers are safe to use after the call. - Register subject = r4; - Register regexp_data = r5; - Register last_match_info_elements = r6; - - // Ensure that a RegExp stack is allocated. - ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(); - ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(); - __ mov(r0, Operand(address_of_regexp_stack_memory_size)); - __ ldr(r0, MemOperand(r0, 0)); - __ tst(r0, Operand(r0)); - __ b(eq, &runtime); - - // Check that the first argument is a JSRegExp object. - __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &runtime); - __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); - __ b(ne, &runtime); - - // Check that the RegExp has been compiled (data contains a fixed array). - __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); - if (FLAG_debug_code) { - __ tst(regexp_data, Operand(kSmiTagMask)); - __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); - __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); - __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); - } - - // regexp_data: RegExp data (FixedArray) - // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. - __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); - __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); - __ b(ne, &runtime); - - // regexp_data: RegExp data (FixedArray) - // Check that the number of captures fit in the static offsets vector buffer. - __ ldr(r2, - FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(r2, r2, Operand(2)); // r2 was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); - __ b(hi, &runtime); - - // r2: Number of capture registers - // regexp_data: RegExp data (FixedArray) - // Check that the second argument is a string. - __ ldr(subject, MemOperand(sp, kSubjectOffset)); - __ tst(subject, Operand(kSmiTagMask)); - __ b(eq, &runtime); - Condition is_string = masm->IsObjectStringType(subject, r0); - __ b(NegateCondition(is_string), &runtime); - // Get the length of the string to r3. - __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); - - // r2: Number of capture registers - // r3: Length of subject string as a smi - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); - __ tst(r0, Operand(kSmiTagMask)); - __ b(ne, &runtime); - __ cmp(r3, Operand(r0)); - __ b(ls, &runtime); - - // r2: Number of capture registers - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the fourth object is a JSArray object. - __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ tst(r0, Operand(kSmiTagMask)); - __ b(eq, &runtime); - __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); - __ b(ne, &runtime); - // Check that the JSArray is in fast case. - __ ldr(last_match_info_elements, - FieldMemOperand(r0, JSArray::kElementsOffset)); - __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(r0, ip); - __ b(ne, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ ldr(r0, - FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); - __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); - __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); - __ b(gt, &runtime); - - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_string; - __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // First check for flat string. - __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); - STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); - __ b(eq, &seq_string); - - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check for flat cons string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - STATIC_ASSERT(kExternalStringTag !=0); - STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); - __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); - __ b(ne, &runtime); - __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); - __ LoadRoot(r1, Heap::kEmptyStringRootIndex); - __ cmp(r0, r1); - __ b(ne, &runtime); - __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); - __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // Is first part a flat string? - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(r0, Operand(kStringRepresentationMask)); - __ b(nz, &runtime); - - __ bind(&seq_string); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // r0: Instance type of subject string - STATIC_ASSERT(4 == kAsciiStringTag); - STATIC_ASSERT(kTwoByteStringTag == 0); - // Find the code object based on the assumptions above. - __ and_(r0, r0, Operand(kStringEncodingMask)); - __ mov(r3, Operand(r0, ASR, 2), SetCC); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); - - // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it contains - // the hole. - __ CompareObjectType(r7, r0, r0, CODE_TYPE); - __ b(ne, &runtime); - - // r3: encoding of subject string (1 if ascii, 0 if two_byte); - // r7: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); - - // r1: previous index - // r3: encoding of subject string (1 if ascii, 0 if two_byte); - // r7: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); - - static const int kRegExpExecuteArguments = 7; - __ push(lr); - __ PrepareCallCFunction(kRegExpExecuteArguments, r0); - - // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. - __ mov(r0, Operand(1)); - __ str(r0, MemOperand(sp, 2 * kPointerSize)); - - // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. - __ mov(r0, Operand(address_of_regexp_stack_memory_address)); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r2, Operand(address_of_regexp_stack_memory_size)); - __ ldr(r2, MemOperand(r2, 0)); - __ add(r0, r0, Operand(r2)); - __ str(r0, MemOperand(sp, 1 * kPointerSize)); - - // Argument 5 (sp[0]): static offsets vector buffer. - __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); - __ str(r0, MemOperand(sp, 0 * kPointerSize)); - - // For arguments 4 and 3 get string length, calculate start of string data and - // calculate the shift of the index (0 for ASCII and 1 for two byte). - __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); - __ mov(r0, Operand(r0, ASR, kSmiTagSize)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ eor(r3, r3, Operand(1)); - // Argument 4 (r3): End of string data - // Argument 3 (r2): Start of string data - __ add(r2, r9, Operand(r1, LSL, r3)); - __ add(r3, r9, Operand(r0, LSL, r3)); - - // Argument 2 (r1): Previous index. - // Already there - - // Argument 1 (r0): Subject string. - __ mov(r0, subject); - - // Locate the code entry and call it. - __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r7, kRegExpExecuteArguments); - __ pop(lr); - - // r0: result - // subject: subject string (callee saved) - // regexp_data: RegExp data (callee saved) - // last_match_info_elements: Last match info elements (callee saved) - - // Check the result. - Label success; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); - __ b(eq, &success); - Label failure; - __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); - __ b(eq, &failure); - __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); - // If not exception it can only be retry. Handle that in the runtime system. - __ b(ne, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - __ mov(r0, Operand(ExternalReference::the_hole_value_location())); - __ ldr(r0, MemOperand(r0, 0)); - __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); - __ ldr(r1, MemOperand(r1, 0)); - __ cmp(r0, r1); - __ b(eq, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ mov(r0, Operand(Factory::null_value())); - __ add(sp, sp, Operand(4 * kPointerSize)); - __ Ret(); - - // Process the result from the native regexp code. - __ bind(&success); - __ ldr(r1, - FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(r1, r1, Operand(2)); // r1 was a smi. - - // r1: number of capture registers - // r4: subject string - // Store the capture count. - __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. - __ str(r2, FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastCaptureCountOffset)); - // Store last subject and last input. - __ mov(r3, last_match_info_elements); // Moved up to reduce latency. - __ str(subject, - FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastSubjectOffset)); - __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); - __ str(subject, - FieldMemOperand(last_match_info_elements, - RegExpImpl::kLastInputOffset)); - __ mov(r3, last_match_info_elements); - __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); - - // Get the static offsets vector filled by the native regexp code. - ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(); - __ mov(r2, Operand(address_of_static_offsets_vector)); - - // r1: number of capture registers - // r2: offsets vector - Label next_capture, done; - // Capture register counter starts from number of capture registers and - // counts down until wraping after zero. - __ add(r0, - last_match_info_elements, - Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); - __ bind(&next_capture); - __ sub(r1, r1, Operand(1), SetCC); - __ b(mi, &done); - // Read the value from the static offsets vector buffer. - __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); - // Store the smi value in the last match info. - __ mov(r3, Operand(r3, LSL, kSmiTagSize)); - __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); - __ jmp(&next_capture); - __ bind(&done); - - // Return last match info. - __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ add(sp, sp, Operand(4 * kPointerSize)); - __ Ret(); - - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_INTERPRETED_REGEXP -} - - -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // function, receiver [, arguments] - Label receiver_is_value, receiver_is_js_object; - __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ BranchOnSmi(r1, &receiver_is_value); - - // Check if the receiver is a valid JS object. - __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); - __ b(ge, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(r1); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); - __ LeaveInternalFrame(); - __ str(r0, MemOperand(sp, argc_ * kPointerSize)); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // function, receiver [, arguments] - __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); - - // Check that the function is really a JavaScript function. - // r1: pushed function (to be verified) - __ BranchOnSmi(r1, &slow); - // Get the map of the function object. - __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); - __ b(ne, &slow); - - // Fast-case: Invoke the function now. - // r1: pushed function - ParameterCount actual(argc_); - __ InvokeFunction(r1, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ str(r1, MemOperand(sp, argc_ * kPointerSize)); - __ mov(r0, Operand(argc_)); // Setup the number of arguments. - __ mov(r2, Operand(0)); - __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); - __ Jump(Handle(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), - RelocInfo::CODE_TARGET); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - - const char* cc_name; - switch (cc_) { - case lt: cc_name = "LT"; break; - case gt: cc_name = "GT"; break; - case le: cc_name = "LE"; break; - case ge: cc_name = "GE"; break; - case eq: cc_name = "EQ"; break; - case ne: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - - const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; - const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; - - const char* strict_name = ""; - if (strict_ && (cc_ == eq || cc_ == ne)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s%s%s", - cc_name, - lhs_name, - rhs_name, - strict_name, - never_nan_nan_name, - include_number_compare_name); + OS::SNPrintF(Vector(name_, len), + "GenericBinaryOpStub_%s_%s%s_%s", + op_name, + overwrite_name, + specialized_on_rhs_ ? "_ConstantRhs" : "", + BinaryOpIC::GetName(runtime_operands_type_)); return name_; } -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT((static_cast(cc_) >> 28) < (1 << 12)); - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - return ConditionField::encode(static_cast(cc_) >> 28) - | RegisterField::encode(lhs_.is(r0)) - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); -} - - -// StringCharCodeAtGenerator - -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { - Label flat_string; - Label ascii_string; - Label got_char_code; - - // If the receiver is a smi trigger the non-string case. - __ BranchOnSmi(object_, receiver_not_string_); - - // Fetch the instance type of the receiver into result register. - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - // If the receiver is not a string trigger the non-string case. - __ tst(result_, Operand(kIsNotStringMask)); - __ b(ne, receiver_not_string_); - - // If the index is non-smi trigger the non-smi case. - __ BranchOnNotSmi(index_, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ mov(scratch_, index_); - __ bind(&got_smi_index_); - - // Check for index out of range. - __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); - __ cmp(ip, Operand(scratch_)); - __ b(ls, index_out_of_range_); - - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(eq, &flat_string); - - // Handle non-flat strings. - __ tst(result_, Operand(kIsConsStringMask)); - __ b(eq, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); - __ LoadRoot(ip, Heap::kEmptyStringRootIndex); - __ cmp(result_, Operand(ip)); - __ b(ne, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - // If the first cons component is also non-flat, then go to runtime. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(result_, Operand(kStringRepresentationMask)); - __ b(nz, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT(kAsciiStringTag != 0); - __ tst(result_, Operand(kStringEncodingMask)); - __ b(nz, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. We can - // add without shifting since the smi tag size is the log2 of the - // number of bytes in a two-byte character. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); - __ add(scratch_, object_, Operand(scratch_)); - __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); - __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); - - __ bind(&got_char_code); - __ mov(result_, Operand(result_, LSL, kSmiTagSize)); - __ bind(&exit_); -} - - -void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharCodeAt slow case"); - - // Index is not a smi. - __ bind(&index_not_smi_); - // If index is a heap number, try converting it to an integer. - __ CheckMap(index_, - scratch_, - Heap::kHeapNumberMapRootIndex, - index_not_number_, - true); - call_helper.BeforeCall(masm); - __ Push(object_, index_); - __ push(index_); // Consumed by runtime conversion function. - if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); - } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); - // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); - } - // Save the conversion result before the pop instructions below - // have a chance to overwrite it. - __ Move(scratch_, r0); - __ pop(index_); - __ pop(object_); - // Reload the instance type. - __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); - __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); - call_helper.AfterCall(masm); - // If index is still not a smi, it must be out of range. - __ BranchOnNotSmi(scratch_, index_out_of_range_); - // Otherwise, return to the fast path. - __ jmp(&got_smi_index_); - - // Call runtime. We get here when the receiver is a string and the - // index is a number, but the code of getting the actual character - // is too complex (e.g., when the string needs to be flattened). - __ bind(&call_runtime_); - call_helper.BeforeCall(masm); - __ Push(object_, index_); - __ CallRuntime(Runtime::kStringCharCodeAt, 2); - __ Move(result_, r0); - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharCodeAt slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); - __ tst(code_, - Operand(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); - __ b(nz, &slow_case_); - - __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); - // At this point code register contains smi tagged ascii char code. - STATIC_ASSERT(kSmiTag == 0); - __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(result_, Operand(ip)); - __ b(eq, &slow_case_); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharFromCode slow case"); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kCharFromCode, 1); - __ Move(result_, r0); - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharFromCode slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - Label loop; - Label done; - // This loop just copies one character at a time, as it is only used for very - // short strings. - if (!ascii) { - __ add(count, count, Operand(count), SetCC); - } else { - __ cmp(count, Operand(0)); - } - __ b(eq, &done); - - __ bind(&loop); - __ ldrb(scratch, MemOperand(src, 1, PostIndex)); - // Perform sub between load and dependent store to get the load time to - // complete. - __ sub(count, count, Operand(1), SetCC); - __ strb(scratch, MemOperand(dest, 1, PostIndex)); - // last iteration. - __ b(gt, &loop); - - __ bind(&done); -} - - -enum CopyCharactersFlags { - COPY_ASCII = 1, - DEST_ALWAYS_ALIGNED = 2 -}; - - -void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags) { - bool ascii = (flags & COPY_ASCII) != 0; - bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; - - if (dest_always_aligned && FLAG_debug_code) { - // Check that destination is actually word aligned if the flag says - // that it is. - __ tst(dest, Operand(kPointerAlignmentMask)); - __ Check(eq, "Destination of copy not aligned."); - } - - const int kReadAlignment = 4; - const int kReadAlignmentMask = kReadAlignment - 1; - // Ensure that reading an entire aligned word containing the last character - // of a string will not read outside the allocated area (because we pad up - // to kObjectAlignment). - STATIC_ASSERT(kObjectAlignment >= kReadAlignment); - // Assumes word reads and writes are little endian. - // Nothing to do for zero characters. - Label done; - if (!ascii) { - __ add(count, count, Operand(count), SetCC); - } else { - __ cmp(count, Operand(0)); - } - __ b(eq, &done); - - // Assume that you cannot read (or write) unaligned. - Label byte_loop; - // Must copy at least eight bytes, otherwise just do it one byte at a time. - __ cmp(count, Operand(8)); - __ add(count, dest, Operand(count)); - Register limit = count; // Read until src equals this. - __ b(lt, &byte_loop); - - if (!dest_always_aligned) { - // Align dest by byte copying. Copies between zero and three bytes. - __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); - Label dest_aligned; - __ b(eq, &dest_aligned); - __ cmp(scratch4, Operand(2)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); - __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); - __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); - __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); - __ bind(&dest_aligned); - } - - Label simple_loop; - - __ sub(scratch4, dest, Operand(src)); - __ and_(scratch4, scratch4, Operand(0x03), SetCC); - __ b(eq, &simple_loop); - // Shift register is number of bits in a source word that - // must be combined with bits in the next source word in order - // to create a destination word. - - // Complex loop for src/dst that are not aligned the same way. - { - Label loop; - __ mov(scratch4, Operand(scratch4, LSL, 3)); - Register left_shift = scratch4; - __ and_(src, src, Operand(~3)); // Round down to load previous word. - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - // Store the "shift" most significant bits of scratch in the least - // signficant bits (i.e., shift down by (32-shift)). - __ rsb(scratch2, left_shift, Operand(32)); - Register right_shift = scratch2; - __ mov(scratch1, Operand(scratch1, LSR, right_shift)); - - __ bind(&loop); - __ ldr(scratch3, MemOperand(src, 4, PostIndex)); - __ sub(scratch5, limit, Operand(dest)); - __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - __ mov(scratch1, Operand(scratch3, LSR, right_shift)); - // Loop if four or more bytes left to copy. - // Compare to eight, because we did the subtract before increasing dst. - __ sub(scratch5, scratch5, Operand(8), SetCC); - __ b(ge, &loop); - } - // There is now between zero and three bytes left to copy (negative that - // number is in scratch5), and between one and three bytes already read into - // scratch1 (eight times that number in scratch4). We may have read past - // the end of the string, but because objects are aligned, we have not read - // past the end of the object. - // Find the minimum of remaining characters to move and preloaded characters - // and write those as bytes. - __ add(scratch5, scratch5, Operand(4), SetCC); - __ b(eq, &done); - __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); - // Move minimum of bytes read and bytes left to copy to scratch4. - __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); - // Between one and three (value in scratch5) characters already read into - // scratch ready to write. - __ cmp(scratch5, Operand(2)); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); - __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); - __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); - // Copy any remaining bytes. - __ b(&byte_loop); - - // Simple loop. - // Copy words from src to dst, until less than four bytes left. - // Both src and dest are word aligned. - __ bind(&simple_loop); - { - Label loop; - __ bind(&loop); - __ ldr(scratch1, MemOperand(src, 4, PostIndex)); - __ sub(scratch3, limit, Operand(dest)); - __ str(scratch1, MemOperand(dest, 4, PostIndex)); - // Compare to 8, not 4, because we do the substraction before increasing - // dest. - __ cmp(scratch3, Operand(8)); - __ b(ge, &loop); - } - - // Copy bytes from src to dst until dst hits limit. - __ bind(&byte_loop); - __ cmp(dest, Operand(limit)); - __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); - __ b(ge, &done); - __ strb(scratch1, MemOperand(dest, 1, PostIndex)); - __ b(&byte_loop); - - __ bind(&done); -} - - -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found) { - // Register scratch3 is the general scratch register in this function. - Register scratch = scratch3; - - // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. - Label not_array_index; - __ sub(scratch, c1, Operand(static_cast('0'))); - __ cmp(scratch, Operand(static_cast('9' - '0'))); - __ b(hi, ¬_array_index); - __ sub(scratch, c2, Operand(static_cast('0'))); - __ cmp(scratch, Operand(static_cast('9' - '0'))); - - // If check failed combine both characters into single halfword. - // This is required by the contract of the method: code at the - // not_found branch expects this combination in c1 register - __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); - __ b(ls, not_found); - - __ bind(¬_array_index); - // Calculate the two character string hash. - Register hash = scratch1; - StringHelper::GenerateHashInit(masm, hash, c1); - StringHelper::GenerateHashAddCharacter(masm, hash, c2); - StringHelper::GenerateHashGetHash(masm, hash); - - // Collect the two characters in a register. - Register chars = c1; - __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); - - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string. - - // Load symbol table - // Load address of first element of the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); - - // Load undefined value - Register undefined = scratch4; - __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); - - // Calculate capacity mask from the symbol table capacity. - Register mask = scratch2; - __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); - __ mov(mask, Operand(mask, ASR, 1)); - __ sub(mask, mask, Operand(1)); - - // Calculate untagged address of the first element of the symbol table. - Register first_symbol_table_element = symbol_table; - __ add(first_symbol_table_element, symbol_table, - Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); - - // Registers - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string - // mask: capacity mask - // first_symbol_table_element: address of the first element of - // the symbol table - // scratch: - - - // Perform a number of probes in the symbol table. - static const int kProbes = 4; - Label found_in_symbol_table; - Label next_probe[kProbes]; - for (int i = 0; i < kProbes; i++) { - Register candidate = scratch5; // Scratch register contains candidate. - - // Calculate entry in symbol table. - if (i > 0) { - __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); - } else { - __ mov(candidate, hash); - } - - __ and_(candidate, candidate, Operand(mask)); - - // Load the entry from the symble table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); - __ ldr(candidate, - MemOperand(first_symbol_table_element, - candidate, - LSL, - kPointerSizeLog2)); - - // If entry is undefined no string with this hash can be found. - __ cmp(candidate, undefined); - __ b(eq, not_found); - - // If length is not 2 the string is not a candidate. - __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); - __ cmp(scratch, Operand(Smi::FromInt(2))); - __ b(ne, &next_probe[i]); - - // Check that the candidate is a non-external ascii string. - __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, - &next_probe[i]); - - // Check if the two characters match. - // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); - __ cmp(chars, scratch); - __ b(eq, &found_in_symbol_table); - __ bind(&next_probe[i]); - } - - // No matching 2 character string found by probing. - __ jmp(not_found); - - // Scratch register contains result when we fall through to here. - Register result = scratch; - __ bind(&found_in_symbol_table); - __ Move(r0, result); -} - - -void StringHelper::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character) { - // hash = character + (character << 10); - __ add(hash, character, Operand(character, LSL, 10)); - // hash ^= hash >> 6; - __ eor(hash, hash, Operand(hash, ASR, 6)); -} - - -void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character) { - // hash += character; - __ add(hash, hash, Operand(character)); - // hash += hash << 10; - __ add(hash, hash, Operand(hash, LSL, 10)); - // hash ^= hash >> 6; - __ eor(hash, hash, Operand(hash, ASR, 6)); -} - - -void StringHelper::GenerateHashGetHash(MacroAssembler* masm, - Register hash) { - // hash += hash << 3; - __ add(hash, hash, Operand(hash, LSL, 3)); - // hash ^= hash >> 11; - __ eor(hash, hash, Operand(hash, ASR, 11)); - // hash += hash << 15; - __ add(hash, hash, Operand(hash, LSL, 15), SetCC); - - // if (hash == 0) hash = 27; - __ mov(hash, Operand(27), LeaveCC, nz); -} - - -void SubStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // lr: return address - // sp[0]: to - // sp[4]: from - // sp[8]: string - - // This stub is called from the native-call %_SubString(...), so - // nothing can be assumed about the arguments. It is tested that: - // "string" is a sequential string, - // both "from" and "to" are smis, and - // 0 <= from <= to <= string.length. - // If any of these assumptions fail, we call the runtime system. - - static const int kToOffset = 0 * kPointerSize; - static const int kFromOffset = 1 * kPointerSize; - static const int kStringOffset = 2 * kPointerSize; - - - // Check bounds and smi-ness. - __ ldr(r7, MemOperand(sp, kToOffset)); - __ ldr(r6, MemOperand(sp, kFromOffset)); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(r7, ASR, 1), SetCC); - __ mov(r3, Operand(r6, ASR, 1), SetCC, cc); - // If either r2 or r6 had the smi tag bit set, then carry is set now. - __ b(cs, &runtime); // Either "from" or "to" is not a smi. - __ b(mi, &runtime); // From is negative. - - __ sub(r2, r2, Operand(r3), SetCC); - __ b(mi, &runtime); // Fail if from > to. - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. - __ cmp(r2, Operand(2)); - __ b(lt, &runtime); - - // r2: length - // r3: from index (untaged smi) - // r6: from (smi) - // r7: to (smi) - - // Make sure first argument is a sequential (or flat) string. - __ ldr(r5, MemOperand(sp, kStringOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ tst(r5, Operand(kSmiTagMask)); - __ b(eq, &runtime); - Condition is_string = masm->IsObjectStringType(r5, r1); - __ b(NegateCondition(is_string), &runtime); - - // r1: instance type - // r2: length - // r3: from index (untaged smi) - // r5: string - // r6: from (smi) - // r7: to (smi) - Label seq_string; - __ and_(r4, r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag < kConsStringTag); - STATIC_ASSERT(kConsStringTag < kExternalStringTag); - __ cmp(r4, Operand(kConsStringTag)); - __ b(gt, &runtime); // External strings go to runtime. - __ b(lt, &seq_string); // Sequential strings are handled directly. - - // Cons string. Try to recurse (once) on the first substring. - // (This adds a little more generality than necessary to handle flattened - // cons strings, but not much). - __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); - __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); - __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ tst(r1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(kSeqStringTag == 0); - __ b(ne, &runtime); // Cons and External strings go to runtime. - - // Definitly a sequential string. - __ bind(&seq_string); - - // r1: instance type. - // r2: length - // r3: from index (untaged smi) - // r5: string - // r6: from (smi) - // r7: to (smi) - __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); - __ cmp(r4, Operand(r7)); - __ b(lt, &runtime); // Fail if to > length. - - // r1: instance type. - // r2: result string length. - // r3: from index (untaged smi) - // r5: string. - // r6: from offset (smi) - // Check for flat ascii string. - Label non_ascii_flat; - __ tst(r1, Operand(kStringEncodingMask)); - STATIC_ASSERT(kTwoByteStringTag == 0); - __ b(eq, &non_ascii_flat); - - Label result_longer_than_two; - __ cmp(r2, Operand(2)); - __ b(gt, &result_longer_than_two); - - // Sub string of length 2 requested. - // Get the two characters forming the sub string. - __ add(r5, r5, Operand(r3)); - __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); - __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // r2: result string length. - // r3: two characters combined into halfword in little endian byte order. - __ bind(&make_two_character_string); - __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); - __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&result_longer_than_two); - - // Allocate the result. - __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); - - // r0: result string. - // r2: result string length. - // r5: string. - // r6: from offset (smi) - // Locate first character of result. - __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate 'from' character of string. - __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(r5, r5, Operand(r6, ASR, 1)); - - // r0: result string. - // r1: first character of result string. - // r2: result string length. - // r5: first character of sub string to copy. - STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - COPY_ASCII | DEST_ALWAYS_ALIGNED); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii_flat); - // r2: result string length. - // r5: string. - // r6: from offset (smi) - // Check for flat two byte string. - - // Allocate the result. - __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); - - // r0: result string. - // r2: result string length. - // r5: string. - // Locate first character of result. - __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate 'from' character of string. - __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // As "from" is a smi it is 2 times the value which matches the size of a two - // byte character. - __ add(r5, r5, Operand(r6)); - - // r0: result string. - // r1: first character of result. - // r2: result length. - // r5: first character of string to copy. - STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, - DEST_ALWAYS_ALIGNED); - __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); - __ Ret(); - - // Just jump to runtime to create the sub string. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); -} - - -void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4) { - Label compare_lengths; - // Find minimum length and length difference. - __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); - __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); - __ sub(scratch3, scratch1, Operand(scratch2), SetCC); - Register length_delta = scratch3; - __ mov(scratch1, scratch2, LeaveCC, gt); - Register min_length = scratch1; - STATIC_ASSERT(kSmiTag == 0); - __ tst(min_length, Operand(min_length)); - __ b(eq, &compare_lengths); - - // Untag smi. - __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); - - // Setup registers so that we only need to increment one register - // in the loop. - __ add(scratch2, min_length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ add(left, left, Operand(scratch2)); - __ add(right, right, Operand(scratch2)); - // Registers left and right points to the min_length character of strings. - __ rsb(min_length, min_length, Operand(-1)); - Register index = min_length; - // Index starts at -min_length. - - { - // Compare loop. - Label loop; - __ bind(&loop); - // Compare characters. - __ add(index, index, Operand(1), SetCC); - __ ldrb(scratch2, MemOperand(left, index), ne); - __ ldrb(scratch4, MemOperand(right, index), ne); - // Skip to compare lengths with eq condition true. - __ b(eq, &compare_lengths); - __ cmp(scratch2, scratch4); - __ b(eq, &loop); - // Fallthrough with eq condition false. - } - // Compare lengths - strings up to min-length are equal. - __ bind(&compare_lengths); - ASSERT(Smi::FromInt(EQUAL) == static_cast(0)); - // Use zero length_delta as result. - __ mov(r0, Operand(length_delta), SetCC, eq); - // Fall through to here if characters compare not-equal. - __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); - __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); - __ Ret(); -} - - -void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // sp[0]: right string - // sp[4]: left string - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right - - Label not_same; - __ cmp(r0, r1); - __ b(ne, ¬_same); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ mov(r0, Operand(Smi::FromInt(EQUAL))); - __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(¬_same); - - // Check that both objects are sequential ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime); - - // Compare flat ascii strings natively. Remove arguments from stack first. - __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5); - - // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); -} - - -void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; - // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. - - // Load the two arguments. - __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. - __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. - - // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfEitherSmi(r0, r1, &string_add_runtime); - // Load instance types. - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kStringTag == 0); - // If either is not a string, go to runtime. - __ tst(r4, Operand(kIsNotStringMask)); - __ tst(r5, Operand(kIsNotStringMask), eq); - __ b(ne, &string_add_runtime); - } - - // Both arguments are strings. - // r0: first string - // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - { - Label strings_not_empty; - // Check if either of the strings are empty. In that case return the other. - __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); - __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. - __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. - STATIC_ASSERT(kSmiTag == 0); - // Else test if second string is empty. - __ cmp(r3, Operand(Smi::FromInt(0)), ne); - __ b(ne, &strings_not_empty); // If either string was empty, return r0. - - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&strings_not_empty); - } - - __ mov(r2, Operand(r2, ASR, kSmiTagSize)); - __ mov(r3, Operand(r3, ASR, kSmiTagSize)); - // Both strings are non-empty. - // r0: first string - // r1: second string - // r2: length of first string - // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - // Look at the length of the result of adding the two strings. - Label string_add_flat_result, longer_than_two; - // Adding two lengths can't overflow. - STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); - __ add(r6, r2, Operand(r3)); - // Use the runtime system when adding two one character strings, as it - // contains optimizations for this specific case using the symbol table. - __ cmp(r6, Operand(2)); - __ b(ne, &longer_than_two); - - // Check that both strings are non-external ascii strings. - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, - &string_add_runtime); - - // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); - - // Try to lookup two character string in symbol table. If it is not found - // just allocate a new one. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&make_two_character_string); - // Resulting string has length 2 and first chars of two strings - // are combined into single halfword in r2 register. - // So we can fill resulting string without two loops by a single - // halfword store instruction (which assumes that processor is - // in a little endian mode) - __ mov(r6, Operand(2)); - __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); - __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&longer_than_two); - // Check if resulting string will be flat. - __ cmp(r6, Operand(String::kMinNonFlatLength)); - __ b(lt, &string_add_flat_result); - // Handle exceptionally long strings in the runtime system. - STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); - ASSERT(IsPowerOf2(String::kMaxLength + 1)); - // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. - __ cmp(r6, Operand(String::kMaxLength + 1)); - __ b(hs, &string_add_runtime); - - // If result is not supposed to be flat, allocate a cons string object. - // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - Label non_ascii, allocated, ascii_data; - STATIC_ASSERT(kTwoByteStringTag == 0); - __ tst(r4, Operand(kStringEncodingMask)); - __ tst(r5, Operand(kStringEncodingMask), ne); - __ b(eq, &non_ascii); - - // Allocate an ASCII cons string. - __ bind(&ascii_data); - __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); - __ bind(&allocated); - // Fill the fields of the cons string. - __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); - __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii); - // At least one of the strings is two-byte. Check whether it happens - // to contain only ascii characters. - // r4: first instance type. - // r5: second instance type. - __ tst(r4, Operand(kAsciiDataHintMask)); - __ tst(r5, Operand(kAsciiDataHintMask), ne); - __ b(ne, &ascii_data); - __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ b(eq, &ascii_data); - - // Allocate a two byte cons string. - __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); - __ jmp(&allocated); - - // Handle creating a flat result. First check that both strings are - // sequential and that they have the same encoding. - // r0: first string - // r1: second string - // r2: length of first string - // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) - // r6: sum of lengths. - __ bind(&string_add_flat_result); - if (!string_check_) { - __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); - __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); - } - // Check that both strings are sequential. - STATIC_ASSERT(kSeqStringTag == 0); - __ tst(r4, Operand(kStringRepresentationMask)); - __ tst(r5, Operand(kStringRepresentationMask), eq); - __ b(ne, &string_add_runtime); - // Now check if both strings have the same encoding (ASCII/Two-byte). - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: sum of lengths.. - Label non_ascii_string_add_flat_result; - ASSERT(IsPowerOf2(kStringEncodingMask)); // Just one bit to test. - __ eor(r7, r4, Operand(r5)); - __ tst(r7, Operand(kStringEncodingMask)); - __ b(ne, &string_add_runtime); - // And see if it's ASCII or two-byte. - __ tst(r4, Operand(kStringEncodingMask)); - __ b(eq, &non_ascii_string_add_flat_result); - - // Both strings are sequential ASCII strings. We also know that they are - // short (since the sum of the lengths is less than kMinNonFlatLength). - // r6: length of resulting flat string - __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); - // Locate first character of result. - __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r0: first character of first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); - - // Load second argument and locate first character. - __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // r1: first character of second string. - // r3: length of second string. - // r6: next character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - __ bind(&non_ascii_string_add_flat_result); - // Both strings are sequential two byte strings. - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: sum of length of strings. - __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); - // r0: first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r7: result string. - - // Locate first character of result. - __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Locate first character of first argument. - __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r0: first character of first string. - // r1: second string. - // r2: length of first string. - // r3: length of second string. - // r6: first character of result. - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); - - // Locate first character of second argument. - __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - - // r1: first character of second string. - // r3: length of second string. - // r6: next character of result (after copy of first string). - // r7: result string. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); - - __ mov(r0, Operand(r7)); - __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); - __ add(sp, sp, Operand(2 * kPointerSize)); - __ Ret(); - - // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); -} - - #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 029d59900d..162d97fd98 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -28,8 +28,9 @@ #ifndef V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_ -#include "ic-inl.h" #include "ast.h" +#include "code-stubs-arm.h" +#include "ic-inl.h" namespace v8 { namespace internal { @@ -270,15 +271,13 @@ class CodeGenerator: public AstVisitor { void AddDeferred(DeferredCode* code) { deferred_.Add(code); } - static const int kUnknownIntValue = -1; - // If the name is an inline runtime function call return the number of // expected arguments. Otherwise return -1. static int InlineRuntimeCallArgumentsCount(Handle name); // Constants related to patching of inlined load/store. static int GetInlinedKeyedLoadInstructionsAfterPatch() { - return FLAG_debug_code ? 27 : 13; + return FLAG_debug_code ? 32 : 13; } static const int kInlinedKeyedStoreInstructionsAfterPatch = 5; static int GetInlinedNamedStoreInstructionsAfterPatch() { @@ -420,7 +419,8 @@ class CodeGenerator: public AstVisitor { void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, GenerateInlineSmi inline_smi, - int known_rhs = kUnknownIntValue); + int known_rhs = + GenericBinaryOpStub::kUnknownIntValue); void Comparison(Condition cc, Expression* left, Expression* right, @@ -455,9 +455,6 @@ class CodeGenerator: public AstVisitor { static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name); bool CheckForInlineRuntimeCall(CallRuntime* node); - static bool PatchInlineRuntimeEntry(Handle name, - const InlineRuntimeLUT& new_entry, - InlineRuntimeLUT* old_entry); static Handle ComputeLazyCompile(int argc); void ProcessDeclarations(ZoneList* declarations); @@ -528,6 +525,8 @@ class CodeGenerator: public AstVisitor { void GenerateRegExpConstructResult(ZoneList* args); + void GenerateRegExpCloneResult(ZoneList* args); + // Support for fast native caches. void GenerateGetFromCache(ZoneList* args); @@ -548,6 +547,9 @@ class CodeGenerator: public AstVisitor { void GenerateIsRegExpEquivalent(ZoneList* args); + void GenerateHasCachedArrayIndex(ZoneList* args); + void GenerateGetCachedArrayIndex(ZoneList* args); + // Simple condition analysis. enum ConditionAnalysis { ALWAYS_TRUE, @@ -610,510 +612,6 @@ class CodeGenerator: public AstVisitor { }; -// Compute a transcendental math function natively, or call the -// TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { - public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} - void Generate(MacroAssembler* masm); - private: - TranscendentalCache::Type type_; - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } - Runtime::FunctionId RuntimeFunction(); -}; - - -class ToBooleanStub: public CodeStub { - public: - explicit ToBooleanStub(Register tos) : tos_(tos) { } - - void Generate(MacroAssembler* masm); - - private: - Register tos_; - Major MajorKey() { return ToBoolean; } - int MinorKey() { return tos_.code(); } -}; - - -class GenericBinaryOpStub : public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - Register lhs, - Register rhs, - int constant_rhs = CodeGenerator::kUnknownIntValue) - : op_(op), - mode_(mode), - lhs_(lhs), - rhs_(rhs), - constant_rhs_(constant_rhs), - specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)), - runtime_operands_type_(BinaryOpIC::DEFAULT), - name_(NULL) { } - - GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - lhs_(LhsRegister(RegisterBits::decode(key))), - rhs_(RhsRegister(RegisterBits::decode(key))), - constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))), - specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)), - runtime_operands_type_(type_info), - name_(NULL) { } - - private: - Token::Value op_; - OverwriteMode mode_; - Register lhs_; - Register rhs_; - int constant_rhs_; - bool specialized_on_rhs_; - BinaryOpIC::TypeInfo runtime_operands_type_; - char* name_; - - static const int kMaxKnownRhs = 0x40000000; - static const int kKnownRhsKeyBits = 6; - - // Minor key encoding in 17 bits. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class TypeInfoBits: public BitField {}; - class RegisterBits: public BitField {}; - class KnownIntBits: public BitField {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - // Encode the parameters in a unique 18 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | KnownIntBits::encode(MinorKeyForKnownInt()) - | TypeInfoBits::encode(runtime_operands_type_) - | RegisterBits::encode(lhs_.is(r0)); - } - - void Generate(MacroAssembler* masm); - void HandleNonSmiBitwiseOp(MacroAssembler* masm, - Register lhs, - Register rhs); - void HandleBinaryOpSlowCases(MacroAssembler* masm, - Label* not_smi, - Register lhs, - Register rhs, - const Builtins::JavaScript& builtin); - void GenerateTypeTransition(MacroAssembler* masm); - - static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { - if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; - if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; - if (op == Token::MOD) { - if (constant_rhs <= 1) return false; - if (constant_rhs <= 10) return true; - if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; - return false; - } - return false; - } - - int MinorKeyForKnownInt() { - if (!specialized_on_rhs_) return 0; - if (constant_rhs_ <= 10) return constant_rhs_ + 1; - ASSERT(IsPowerOf2(constant_rhs_)); - int key = 12; - int d = constant_rhs_; - while ((d & 1) == 0) { - key++; - d >>= 1; - } - ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits)); - return key; - } - - int KnownBitsForMinorKey(int key) { - if (!key) return 0; - if (key <= 11) return key - 1; - int d = 1; - while (key != 12) { - key--; - d <<= 1; - } - return d; - } - - Register LhsRegister(bool lhs_is_r0) { - return lhs_is_r0 ? r0 : r1; - } - - Register RhsRegister(bool lhs_is_r0) { - return lhs_is_r0 ? r1 : r0; - } - - bool ShouldGenerateSmiCode() { - return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - bool ShouldGenerateFPCode() { - return runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(runtime_operands_type_); - } - - const char* GetName(); - -#ifdef DEBUG - void Print() { - if (!specialized_on_rhs_) { - PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); - } else { - PrintF("GenericBinaryOpStub (%s by %d)\n", - Token::String(op_), - constant_rhs_); - } - } -#endif -}; - - -class StringHelper : public AllStatic { - public: - // Generate code for copying characters using a simple loop. This should only - // be used in places where the number of characters is small and the - // additional setup and checking in GenerateCopyCharactersLong adds too much - // overhead. Copying of overlapping regions is not supported. - // Dest register ends at the position after the last character written. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii); - - // Generate code for copying a large number of characters. This function - // is allowed to spend extra time setting up conditions to make copying - // faster. Copying of overlapping regions is not supported. - // Dest register ends at the position after the last character written. - static void GenerateCopyCharactersLong(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - int flags); - - - // Probe the symbol table for a two character string. If the string is - // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the - // string is found the code falls through with the string in register r0. - // Contents of both c1 and c2 registers are modified. At the exit c1 is - // guaranteed to contain halfword with low and high bytes equal to - // initial contents of c1 and c2 respectively. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Register scratch5, - Label* not_found); - - // Generate string hash. - static void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character); - - static void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character); - - static void GenerateHashGetHash(MacroAssembler* masm, - Register hash); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); -}; - - -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. -}; - - -class StringAddStub: public CodeStub { - public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } - - private: - Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } - - void Generate(MacroAssembler* masm); - - // Should the stub check whether arguments are strings? - bool string_check_; -}; - - -class SubStringStub: public CodeStub { - public: - SubStringStub() {} - - private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - - -class StringCompareStub: public CodeStub { - public: - StringCompareStub() { } - - // Compare two flat ASCII strings and returns result in r0. - // Does not use the stack. - static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4); - - private: - Major MajorKey() { return StringCompare; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -// This stub can do a fast mod operation without using fp. -// It is tail called from the GenericBinaryOpStub and it always -// returns an answer. It never causes GC so it doesn't need a real frame. -// -// The inputs are always positive Smis. This is never called -// where the denominator is a power of 2. We handle that separately. -// -// If we consider the denominator as an odd number multiplied by a power of 2, -// then: -// * The exponent (power of 2) is in the shift_distance register. -// * The odd number is in the odd_number register. It is always in the range -// of 3 to 25. -// * The bits from the numerator that are to be copied to the answer (there are -// shift_distance of them) are in the mask_bits register. -// * The other bits of the numerator have been shifted down and are in the lhs -// register. -class IntegerModStub : public CodeStub { - public: - IntegerModStub(Register result, - Register shift_distance, - Register odd_number, - Register mask_bits, - Register lhs, - Register scratch) - : result_(result), - shift_distance_(shift_distance), - odd_number_(odd_number), - mask_bits_(mask_bits), - lhs_(lhs), - scratch_(scratch) { - // We don't code these in the minor key, so they should always be the same. - // We don't really want to fix that since this stub is rather large and we - // don't want many copies of it. - ASSERT(shift_distance_.is(r9)); - ASSERT(odd_number_.is(r4)); - ASSERT(mask_bits_.is(r3)); - ASSERT(scratch_.is(r5)); - } - - private: - Register result_; - Register shift_distance_; - Register odd_number_; - Register mask_bits_; - Register lhs_; - Register scratch_; - - // Minor key encoding in 16 bits. - class ResultRegisterBits: public BitField {}; - class LhsRegisterBits: public BitField {}; - - Major MajorKey() { return IntegerMod; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return ResultRegisterBits::encode(result_.code()) - | LhsRegisterBits::encode(lhs_.code()); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "IntegerModStub"; } - - // Utility functions. - void DigitSum(MacroAssembler* masm, - Register lhs, - int mask, - int shift, - Label* entry); - void DigitSum(MacroAssembler* masm, - Register lhs, - Register scratch, - int mask, - int shift1, - int shift2, - Label* entry); - void ModGetInRangeBySubtraction(MacroAssembler* masm, - Register lhs, - int shift, - int rhs); - void ModReduce(MacroAssembler* masm, - Register lhs, - int max, - int denominator); - void ModAnswer(MacroAssembler* masm, - Register result, - Register shift_distance, - Register mask_bits, - Register sum_of_digits); - - -#ifdef DEBUG - void Print() { PrintF("IntegerModStub\n"); } -#endif -}; - - -// This stub can convert a signed int32 to a heap number (double). It does -// not work for int32s that are in Smi range! No GC occurs during this stub -// so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { - public: - WriteInt32ToHeapNumberStub(Register the_int, - Register the_heap_number, - Register scratch) - : the_int_(the_int), - the_heap_number_(the_heap_number), - scratch_(scratch) { } - - private: - Register the_int_; - Register the_heap_number_; - Register scratch_; - - // Minor key encoding in 16 bits. - class IntRegisterBits: public BitField {}; - class HeapNumberRegisterBits: public BitField {}; - class ScratchRegisterBits: public BitField {}; - - Major MajorKey() { return WriteInt32ToHeapNumber; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return IntRegisterBits::encode(the_int_.code()) - | HeapNumberRegisterBits::encode(the_heap_number_.code()) - | ScratchRegisterBits::encode(scratch_.code()); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "WriteInt32ToHeapNumberStub"; } - -#ifdef DEBUG - void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } -#endif -}; - - -class NumberToStringStub: public CodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - bool object_is_smi, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif -}; - - -class RecordWriteStub : public CodeStub { - public: - RecordWriteStub(Register object, Register offset, Register scratch) - : object_(object), offset_(offset), scratch_(scratch) { } - - void Generate(MacroAssembler* masm); - - private: - Register object_; - Register offset_; - Register scratch_; - -#ifdef DEBUG - void Print() { - PrintF("RecordWriteStub (object reg %d), (offset reg %d)," - " (scratch reg %d)\n", - object_.code(), offset_.code(), scratch_.code()); - } -#endif - - // Minor key encoding in 12 bits. 4 bits for each of the three - // registers (object, offset and scratch) OOOOAAAASSSS. - class ScratchBits: public BitField {}; - class OffsetBits: public BitField {}; - class ObjectBits: public BitField {}; - - Major MajorKey() { return RecordWrite; } - - int MinorKey() { - // Encode the registers. - return ObjectBits::encode(object_.code()) | - OffsetBits::encode(offset_.code()) | - ScratchBits::encode(scratch_.code()); - } -}; - - } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 2ac9a41326..b2b5cb56b0 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -194,6 +194,13 @@ enum SoftwareInterruptCodes { }; +// Type of VFP register. Determines register encoding. +enum VFPRegPrecision { + kSinglePrecision = 0, + kDoublePrecision = 1 +}; + + typedef int32_t instr_t; @@ -269,6 +276,15 @@ class Instr { inline int VCField() const { return Bit(8); } inline int VAField() const { return Bits(23, 21); } inline int VBField() const { return Bits(6, 5); } + inline int VFPNRegCode(VFPRegPrecision pre) { + return VFPGlueRegCode(pre, 16, 7); + } + inline int VFPMRegCode(VFPRegPrecision pre) { + return VFPGlueRegCode(pre, 0, 5); + } + inline int VFPDRegCode(VFPRegPrecision pre) { + return VFPGlueRegCode(pre, 12, 22); + } // Fields used in Data processing instructions inline Opcode OpcodeField() const { @@ -343,6 +359,17 @@ class Instr { static Instr* At(byte* pc) { return reinterpret_cast(pc); } private: + // Join split register codes, depending on single or double precision. + // four_bit is the position of the least-significant bit of the four + // bit specifier. one_bit is the position of the additional single bit + // specifier. + inline int VFPGlueRegCode(VFPRegPrecision pre, int four_bit, int one_bit) { + if (pre == kSinglePrecision) { + return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit); + } + return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit); + } + // We need to prevent the creation of instances of class Instr. DISALLOW_IMPLICIT_CONSTRUCTORS(Instr); }; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index 3a948451b4..8128f7deaf 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -130,41 +130,58 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, - RegList pointer_regs) { - // Save the content of all general purpose registers in memory. This copy in - // memory is later pushed onto the JS expression stack for the fake JS frame - // generated and also to the C frame generated on top of that. In the JS - // frame ONLY the registers containing pointers will be pushed on the - // expression stack. This causes the GC to update these pointers so that - // they will have the correct value when returning from the debugger. - __ SaveRegistersToMemory(kJSCallerSaved); - + RegList object_regs, + RegList non_object_regs) { __ EnterInternalFrame(); - // Store the registers containing object pointers on the expression stack to - // make sure that these are correctly updated during GC. - // Use sp as base to push. - __ CopyRegistersFromMemoryToStack(sp, pointer_regs); + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + if ((object_regs | non_object_regs) != 0) { + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ tst(reg, Operand(0xc0000000)); + __ Assert(eq, "Unable to encode value as smi"); + } + __ mov(reg, Operand(reg, LSL, kSmiTagSize)); + } + } + __ stm(db_w, sp, object_regs | non_object_regs); + } #ifdef DEBUG __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0)); // no arguments + __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments __ mov(r1, Operand(ExternalReference::debug_break())); - CEntryStub ceb(1, ExitFrame::MODE_DEBUG); + CEntryStub ceb(1); __ CallStub(&ceb); - // Restore the register values containing object pointers from the expression - // stack in the reverse order as they where pushed. - // Use sp as base to pop. - __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs); + // Restore the register values from the expression stack. + if ((object_regs | non_object_regs) != 0) { + __ ldm(ia_w, sp, object_regs | non_object_regs); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((non_object_regs & (1 << r)) != 0) { + __ mov(reg, Operand(reg, LSR, kSmiTagSize)); + } + if (FLAG_debug_code && + (((object_regs |non_object_regs) & (1 << r)) == 0)) { + __ mov(reg, Operand(kDebugZapValue)); + } + } + } __ LeaveInternalFrame(); - // Finally restore all registers. - __ RestoreRegistersFromMemory(kJSCallerSaved); - // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. @@ -184,7 +201,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { // ----------------------------------- // Registers r0 and r2 contain objects that need to be pushed on the // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit()); + Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0); } @@ -198,7 +215,7 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { // ----------------------------------- // Registers r0, r1, and r2 contain objects that need to be pushed on the // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit()); + Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0); } @@ -206,9 +223,8 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key - // -- sp[0] : key - // -- sp[4] : receiver - Generate_DebugBreakCallHelper(masm, r0.bit()); + // -- r1 : receiver + Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0); } @@ -218,31 +234,24 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { // -- r1 : key // -- r2 : receiver // -- lr : return address - Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit()); + Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0); } void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { // Calling convention for IC call (from ic-arm.cc) // ----------- S t a t e ------------- - // -- r0: number of arguments - // -- r1: receiver - // -- lr: return address + // -- r2 : name // ----------------------------------- - // Register r1 contains an object that needs to be pushed on the expression - // stack of the fake JS frame. r0 is the actual number of arguments not - // encoded as a smi, therefore it cannot be on the expression stack of the - // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be - // pushed on the stack of the C frame and restored from there. - Generate_DebugBreakCallHelper(masm, r1.bit()); + Generate_DebugBreakCallHelper(masm, r2.bit(), 0); } void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) { - // In places other than IC call sites it is expected that r0 is TOS which - // is an object - this is not generally the case so this should be used with - // care. - Generate_DebugBreakCallHelper(masm, r0.bit()); + // Calling convention for construct call (from builtins-arm.cc) + // -- r0 : number of arguments (not smi) + // -- r1 : constructor function + Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit()); } @@ -250,7 +259,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { // In places other than IC call sites it is expected that r0 is TOS which // is an object - this is not generally the case so this should be used with // care. - Generate_DebugBreakCallHelper(masm, r0.bit()); + Generate_DebugBreakCallHelper(masm, r0.bit(), 0); } @@ -258,7 +267,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { // ----------- S t a t e ------------- // No registers used on entry. // ----------------------------------- - Generate_DebugBreakCallHelper(masm, 0); + Generate_DebugBreakCallHelper(masm, 0, 0); } @@ -280,7 +289,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) { void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. - Generate_DebugBreakCallHelper(masm, 0); + Generate_DebugBreakCallHelper(masm, 0, 0); } diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 0029ed168b..5122f437b9 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -463,7 +463,7 @@ int Decoder::FormatOption(Instr* instr, const char* format) { ASSERT((width + lsb) <= 32); out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, - "#%d", + "%d", instr->Bits(width + lsb - 1, lsb)); return 8; } @@ -931,7 +931,7 @@ void Decoder::DecodeType3(Instr* instr) { if (instr->HasW()) { ASSERT(instr->Bits(5, 4) == 0x1); if (instr->Bit(22) == 0x1) { - Format(instr, "usat 'rd, 'imm05@16, 'rm'shift_sat"); + Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat"); } else { UNREACHABLE(); // SSAT. } @@ -1269,17 +1269,19 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) { if (instr->CoprocessorField() == 0xA) { switch (instr->OpcodeField()) { case 0x8: + case 0xA: if (instr->HasL()) { - Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]"); + Format(instr, "vldr'cond 'Sd, ['rn - 4*'imm08@00]"); } else { - Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]"); + Format(instr, "vstr'cond 'Sd, ['rn - 4*'imm08@00]"); } break; case 0xC: + case 0xE: if (instr->HasL()) { - Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]"); + Format(instr, "vldr'cond 'Sd, ['rn + 4*'imm08@00]"); } else { - Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]"); + Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]"); } break; default: @@ -1300,16 +1302,16 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) { break; case 0x8: if (instr->HasL()) { - Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]"); + Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]"); } else { - Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]"); + Format(instr, "vstr'cond 'Dd, ['rn - 4*'imm08@00]"); } break; case 0xC: if (instr->HasL()) { - Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]"); + Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]"); } else { - Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]"); + Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]"); } break; default: diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 271e4a6f0a..47434392df 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -37,87 +37,20 @@ namespace v8 { namespace internal { -StackFrame::Type StackFrame::ComputeType(State* state) { - ASSERT(state->fp != NULL); - if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) { - return ARGUMENTS_ADAPTOR; - } - // The marker and function offsets overlap. If the marker isn't a - // smi then the frame is a JavaScript frame -- and the marker is - // really the function. - const int offset = StandardFrameConstants::kMarkerOffset; - Object* marker = Memory::Object_at(state->fp + offset); - if (!marker->IsSmi()) return JAVA_SCRIPT; - return static_cast(Smi::cast(marker)->value()); -} - - StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { if (fp == 0) return NONE; // Compute frame type and stack pointer. - Address sp = fp + ExitFrameConstants::kSPDisplacement; - const int offset = ExitFrameConstants::kCodeOffset; - Object* code = Memory::Object_at(fp + offset); - bool is_debug_exit = code->IsSmi(); - if (is_debug_exit) { - sp -= kNumJSCallerSaved * kPointerSize; - } + Address sp = fp + ExitFrameConstants::kSPOffset; + // Fill in the state. state->sp = sp; state->fp = fp; state->pc_address = reinterpret_cast(sp - 1 * kPointerSize); + ASSERT(*state->pc_address != NULL); return EXIT; } -void ExitFrame::Iterate(ObjectVisitor* v) const { - v->VisitPointer(&code_slot()); - // The arguments are traversed as part of the expression stack of - // the calling frame. -} - - -int JavaScriptFrame::GetProvidedParametersCount() const { - return ComputeParametersCount(); -} - - -Address JavaScriptFrame::GetCallerStackPointer() const { - int arguments; - if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) { - // The arguments for cooked frames are traversed as if they were - // expression stack elements of the calling frame. The reason for - // this rather strange decision is that we cannot access the - // function during mark-compact GCs when the stack is cooked. - // In fact accessing heap objects (like function->shared() below) - // at all during GC is problematic. - arguments = 0; - } else { - // Compute the number of arguments by getting the number of formal - // parameters of the function. We must remember to take the - // receiver into account (+1). - JSFunction* function = JSFunction::cast(this->function()); - arguments = function->shared()->formal_parameter_count() + 1; - } - const int offset = StandardFrameConstants::kCallerSPOffset; - return fp() + offset + (arguments * kPointerSize); -} - - -Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { - const int arguments = Smi::cast(GetExpression(0))->value(); - const int offset = StandardFrameConstants::kCallerSPOffset; - return fp() + offset + (arguments + 1) * kPointerSize; -} - - -Address InternalFrame::GetCallerStackPointer() const { - // Internal frames have no arguments. The stack pointer of the - // caller is at a fixed offset from the frame pointer. - return fp() + StandardFrameConstants::kCallerSPOffset; -} - - } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 4924c1aeb9..5847a6a2a0 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -96,11 +96,8 @@ class EntryFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic { public: - // Exit frames have a debug marker on the stack. - static const int kSPDisplacement = -1 * kPointerSize; - - // The debug marker is just above the frame pointer. static const int kCodeOffset = -1 * kPointerSize; + static const int kSPOffset = -1 * kPointerSize; static const int kSavedRegistersOffset = 0 * kPointerSize; diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index b58a4a5854..f32da6dfb1 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_ARM) +#include "code-stubs.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -245,6 +246,13 @@ void FullCodeGenerator::EmitReturnSequence() { } +FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( + Token::Value op, Expression* left, Expression* right) { + ASSERT(ShouldInlineSmiCase(op)); + return kNoConstants; +} + + void FullCodeGenerator::Apply(Expression::Context context, Register reg) { switch (context) { case Expression::kUninitialized: @@ -266,16 +274,10 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) { } break; - case Expression::kValueTest: - case Expression::kTestValue: - // Push an extra copy of the value in case it's needed. - __ push(reg); - // Fall through. - case Expression::kTest: - // We always call the runtime on ARM, so push the value as argument. - __ push(reg); - DoTest(context); + // For simplicity we always test the accumulator register. + if (!reg.is(result_register())) __ mov(result_register(), reg); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -290,8 +292,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) { break; case Expression::kValue: case Expression::kTest: - case Expression::kValueTest: - case Expression::kTestValue: // On ARM we have to move the value into a register to do anything // with it. Move(result_register(), slot); @@ -310,8 +310,6 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) { // Nothing to do. case Expression::kValue: case Expression::kTest: - case Expression::kValueTest: - case Expression::kTestValue: // On ARM we have to move the value into a register to do anything // with it. __ mov(result_register(), Operand(lit->handle())); @@ -340,15 +338,9 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) { } break; - case Expression::kValueTest: - case Expression::kTestValue: - // Duplicate the value on the stack in case it's needed. - __ ldr(ip, MemOperand(sp)); - __ push(ip); - // Fall through. - case Expression::kTest: - DoTest(context); + __ pop(result_register()); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -381,54 +373,9 @@ void FullCodeGenerator::DropAndApply(int count, break; case Expression::kTest: - if (count > 1) __ Drop(count - 1); - __ str(reg, MemOperand(sp)); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - if (count == 1) { - __ str(reg, MemOperand(sp)); - __ push(reg); - } else { // count > 1 - __ Drop(count - 2); - __ str(reg, MemOperand(sp, kPointerSize)); - __ str(reg, MemOperand(sp)); - } - DoTest(context); - break; - } -} - -void FullCodeGenerator::PrepareTest(Label* materialize_true, - Label* materialize_false, - Label** if_true, - Label** if_false) { - switch (context_) { - case Expression::kUninitialized: - UNREACHABLE(); - break; - case Expression::kEffect: - // In an effect context, the true and the false case branch to the - // same label. - *if_true = *if_false = materialize_true; - break; - case Expression::kValue: - *if_true = materialize_true; - *if_false = materialize_false; - break; - case Expression::kTest: - *if_true = true_label_; - *if_false = false_label_; - break; - case Expression::kValueTest: - *if_true = materialize_true; - *if_false = false_label_; - break; - case Expression::kTestValue: - *if_true = true_label_; - *if_false = materialize_false; + __ Drop(count); + if (!reg.is(result_register())) __ mov(result_register(), reg); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -471,34 +418,6 @@ void FullCodeGenerator::Apply(Expression::Context context, case Expression::kTest: break; - - case Expression::kValueTest: - __ bind(materialize_true); - switch (location_) { - case kAccumulator: - __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); - break; - case kStack: - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ push(ip); - break; - } - __ jmp(true_label_); - break; - - case Expression::kTestValue: - __ bind(materialize_false); - switch (location_) { - case kAccumulator: - __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); - break; - case kStack: - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ push(ip); - break; - } - __ jmp(false_label_); - break; } } @@ -527,103 +446,40 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) { break; } case Expression::kTest: - __ b(flag ? true_label_ : false_label_); - break; - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - // If value is false it's needed. - if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); - break; - case kStack: - // If value is false it's needed. - if (!flag) { - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ push(ip); - } - break; - } - __ b(flag ? true_label_ : false_label_); - break; - case Expression::kValueTest: - switch (location_) { - case kAccumulator: - // If value is true it's needed. - if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); - break; - case kStack: - // If value is true it's needed. - if (flag) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ push(ip); - } - break; + if (flag) { + if (true_label_ != fall_through_) __ b(true_label_); + } else { + if (false_label_ != fall_through_) __ b(false_label_); } - __ b(flag ? true_label_ : false_label_); break; } } -void FullCodeGenerator::DoTest(Expression::Context context) { - // The value to test is pushed on the stack, and duplicated on the stack - // if necessary (for value/test and test/value contexts). - ASSERT_NE(NULL, true_label_); - ASSERT_NE(NULL, false_label_); - +void FullCodeGenerator::DoTest(Label* if_true, + Label* if_false, + Label* fall_through) { // Call the runtime to find the boolean value of the source and then // translate it into control flow to the pair of labels. + __ push(result_register()); __ CallRuntime(Runtime::kToBool, 1); __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(r0, ip); + Split(eq, if_true, if_false, fall_through); +} - // Complete based on the context. - switch (context) { - case Expression::kUninitialized: - case Expression::kEffect: - case Expression::kValue: - UNREACHABLE(); - - case Expression::kTest: - __ b(eq, true_label_); - __ jmp(false_label_); - break; - - case Expression::kValueTest: { - Label discard; - switch (location_) { - case kAccumulator: - __ b(ne, &discard); - __ pop(result_register()); - __ jmp(true_label_); - break; - case kStack: - __ b(eq, true_label_); - break; - } - __ bind(&discard); - __ Drop(1); - __ jmp(false_label_); - break; - } - case Expression::kTestValue: { - Label discard; - switch (location_) { - case kAccumulator: - __ b(eq, &discard); - __ pop(result_register()); - __ jmp(false_label_); - break; - case kStack: - __ b(ne, false_label_); - break; - } - __ bind(&discard); - __ Drop(1); - __ jmp(true_label_); - break; - } +void FullCodeGenerator::Split(Condition cc, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (if_false == fall_through) { + __ b(cc, if_true); + } else if (if_true == fall_through) { + __ b(NegateCondition(cc), if_false); + } else { + __ b(cc, if_true); + __ b(if_false); } } @@ -816,22 +672,23 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Compile the label expression. VisitForValue(clause->label(), kAccumulator); - // Perform the comparison as if via '==='. The comparison stub expects - // the smi vs. smi case to be handled before it is called. - Label slow_case; + // Perform the comparison as if via '==='. __ ldr(r1, MemOperand(sp, 0)); // Switch value. - __ orr(r2, r1, r0); - __ tst(r2, Operand(kSmiTagMask)); - __ b(ne, &slow_case); - __ cmp(r1, r0); - __ b(ne, &next_test); - __ Drop(1); // Switch value is no longer needed. - __ b(clause->body_target()->entry_label()); - + if (ShouldInlineSmiCase(Token::EQ_STRICT)) { + Label slow_case; + __ orr(r2, r1, r0); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, &slow_case); + __ cmp(r1, r0); + __ b(ne, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ b(clause->body_target()->entry_label()); __ bind(&slow_case); + } + CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0); __ CallStub(&stub); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. __ b(clause->body_target()->entry_label()); @@ -1107,28 +964,33 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { // r2 = RegExp pattern // r1 = RegExp flags // r0 = temp + materialized value (RegExp literal) - __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); - __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); + __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); int literal_offset = - FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; + FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; __ ldr(r0, FieldMemOperand(r4, literal_offset)); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(r0, ip); __ b(ne, &materialized); + + // Create regexp literal using runtime function. + // Result will be in r0. __ mov(r3, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r2, Operand(expr->pattern())); __ mov(r1, Operand(expr->flags())); __ Push(r4, r3, r2, r1); __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); + __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; __ push(r0); __ mov(r0, Operand(Smi::FromInt(size))); __ push(r0); __ CallRuntime(Runtime::kAllocateInNewSpace, 1); + // After this, registers are used as follows: // r0: Newly allocated regexp. - // r1: Materialized regexp + // r1: Materialized regexp. // r2: temp. __ pop(r1); __ CopyFields(r0, r1, r2.bit(), size / kPointerSize); @@ -1223,12 +1085,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(expr->constant_elements())); __ Push(r3, r2, r1); - if (expr->depth() > 1) { + if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + __ CallStub(&stub); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1, r1, r2); + } else if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); __ CallStub(&stub); } @@ -1283,10 +1151,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; LhsKind assign_type = VARIABLE; - Property* prop = expr->target()->AsProperty(); - if (prop != NULL) { - assign_type = - (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; + Property* property = expr->target()->AsProperty(); + if (property != NULL) { + assign_type = (property->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; } // Evaluate LHS expression. @@ -1297,58 +1166,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { case NAMED_PROPERTY: if (expr->is_compound()) { // We need the receiver both on the stack and in the accumulator. - VisitForValue(prop->obj(), kAccumulator); + VisitForValue(property->obj(), kAccumulator); __ push(result_register()); } else { - VisitForValue(prop->obj(), kStack); + VisitForValue(property->obj(), kStack); } break; case KEYED_PROPERTY: - // We need the key and receiver on both the stack and in r0 and r1. if (expr->is_compound()) { - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kAccumulator); + VisitForValue(property->obj(), kStack); + VisitForValue(property->key(), kAccumulator); __ ldr(r1, MemOperand(sp, 0)); __ push(r0); } else { - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + VisitForValue(property->obj(), kStack); + VisitForValue(property->key(), kStack); } break; } - // If we have a compound assignment: Get value of LHS expression and - // store in on top of the stack. if (expr->is_compound()) { Location saved_location = location_; - location_ = kStack; + location_ = kAccumulator; switch (assign_type) { case VARIABLE: EmitVariableLoad(expr->target()->AsVariableProxy()->var(), Expression::kValue); break; case NAMED_PROPERTY: - EmitNamedPropertyLoad(prop); - __ push(result_register()); + EmitNamedPropertyLoad(property); break; case KEYED_PROPERTY: - EmitKeyedPropertyLoad(prop); - __ push(result_register()); + EmitKeyedPropertyLoad(property); break; } - location_ = saved_location; - } - // Evaluate RHS expression. - Expression* rhs = expr->value(); - VisitForValue(rhs, kAccumulator); + Token::Value op = expr->binary_op(); + ConstantOperand constant = ShouldInlineSmiCase(op) + ? GetConstantOperand(op, expr->target(), expr->value()) + : kNoConstants; + ASSERT(constant == kRightConstant || constant == kNoConstants); + if (constant == kNoConstants) { + __ push(r0); // Left operand goes on the stack. + VisitForValue(expr->value(), kAccumulator); + } - // If we have a compound assignment: Apply operator. - if (expr->is_compound()) { - Location saved_location = location_; - location_ = kAccumulator; - EmitBinaryOp(expr->binary_op(), Expression::kValue); + OverwriteMode mode = expr->value()->ResultOverwriteAllowed() + ? OVERWRITE_RIGHT + : NO_OVERWRITE; + SetSourcePosition(expr->position() + 1); + if (ShouldInlineSmiCase(op)) { + EmitInlineSmiBinaryOp(expr, + op, + Expression::kValue, + mode, + expr->target(), + expr->value(), + constant); + } else { + EmitBinaryOp(op, Expression::kValue, mode); + } location_ = saved_location; + + } else { + VisitForValue(expr->value(), kAccumulator); } // Record source position before possible IC call. @@ -1389,10 +1270,23 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { } +void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Expression* left, + Expression* right, + ConstantOperand constant) { + ASSERT(constant == kNoConstants); // Only handled case. + EmitBinaryOp(op, context, mode); +} + + void FullCodeGenerator::EmitBinaryOp(Token::Value op, - Expression::Context context) { + Expression::Context context, + OverwriteMode mode) { __ pop(r1); - GenericBinaryOpStub stub(op, NO_OVERWRITE, r1, r0); + GenericBinaryOpStub stub(op, mode, r1, r0); __ CallStub(&stub); Apply(context, r0); } @@ -1821,12 +1715,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the // arguments. - // Push function on the stack. + + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. VisitForValue(expr->expression(), kStack); - // Push global object (receiver). - __ ldr(r0, CodeGenerator::GlobalObject()); - __ push(r0); // Push the arguments ("left-to-right") on the stack. ZoneList* args = expr->arguments(); int arg_count = args->length(); @@ -1838,16 +1732,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { // constructor invocation. SetSourcePosition(expr->position()); - // Load function, arg_count into r1 and r0. + // Load function and argument count into r1 and r0. __ mov(r0, Operand(arg_count)); - // Function is in sp[arg_count + 1]. - __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); + __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); Handle construct_builtin(Builtins::builtin(Builtins::JSConstructCall)); __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL); - - // Replace function on TOS with result in r0, or pop it. - DropAndApply(1, context_, r0); + Apply(context_, r0); } @@ -1859,7 +1750,9 @@ void FullCodeGenerator::EmitIsSmi(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_true); __ b(if_false); @@ -1876,11 +1769,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ tst(r0, Operand(kSmiTagMask | 0x80000000)); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1894,7 +1788,10 @@ void FullCodeGenerator::EmitIsObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + __ BranchOnSmi(r0, if_false); __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(r0, ip); @@ -1908,8 +1805,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList* args) { __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); __ b(lt, if_false); __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); - __ b(le, if_true); - __ b(if_false); + Split(le, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1923,12 +1819,13 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); - __ b(ge, if_true); - __ b(if_false); + Split(ge, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1942,14 +1839,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_false); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); __ tst(r1, Operand(1 << Map::kIsUndetectable)); - __ b(ne, if_true); - __ b(if_false); + Split(ne, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1965,7 +1863,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only // used in a few functions in runtime.js which should not normally be hit by @@ -1983,12 +1883,13 @@ void FullCodeGenerator::EmitIsFunction(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2002,12 +1903,13 @@ void FullCodeGenerator::EmitIsArray(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2021,12 +1923,13 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ BranchOnSmi(r0, if_false); __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2039,7 +1942,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); // Get the frame pointer for the calling frame. __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -2055,8 +1960,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { __ bind(&check_frame_marker); __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset)); __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2072,12 +1976,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ pop(r1); __ cmp(r0, r1); - __ b(eq, if_true); - __ b(if_false); + Split(eq, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2227,7 +2132,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList* args) { // Move 0x41300000xxxxxxxx (x = random bits) to VFP. __ vmov(d7, r0, r1); // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand(0)); + __ mov(r0, Operand(0, RelocInfo::NONE)); __ vmov(d8, r0, r1); // Subtract and store the result in the heap number. __ vsub(d7, d7, d8); @@ -2634,6 +2539,35 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList* args) { } +void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList* args) { + VisitForValue(args->at(0), kAccumulator); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); + __ tst(r0, Operand(String::kContainsCachedArrayIndexMask)); + + __ b(eq, if_true); + __ b(if_false); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + VisitForValue(args->at(0), kAccumulator); + __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); + __ IndexFromHash(r0, r0); + Apply(context_, r0); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { Handle name = expr->name(); if (name->length() > 0 && name->Get(0) == '_') { @@ -2738,19 +2672,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { break; } break; - case Expression::kTestValue: - // Value is false so it's needed. - __ LoadRoot(result_register(), Heap::kUndefinedValueRootIndex); - switch (location_) { - case kAccumulator: - break; - case kStack: - __ push(result_register()); - break; - } - // Fall through. case Expression::kTest: - case Expression::kValueTest: __ jmp(false_label_); break; } @@ -2762,42 +2684,19 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; + Label* fall_through = NULL; // Notice that the labels are swapped. - PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true); - - VisitForControl(expr->expression(), if_true, if_false); - + PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + VisitForControl(expr->expression(), if_true, if_false, fall_through); Apply(context_, if_false, if_true); // Labels swapped. break; } case Token::TYPEOF: { Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); - VariableProxy* proxy = expr->expression()->AsVariableProxy(); - if (proxy != NULL && - !proxy->var()->is_this() && - proxy->var()->is_global()) { - Comment cmnt(masm_, "Global variable"); - __ ldr(r0, CodeGenerator::GlobalObject()); - __ mov(r2, Operand(proxy->name())); - Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - // Use a regular load, not a contextual load, to avoid a reference - // error. - __ Call(ic, RelocInfo::CODE_TARGET); - __ push(r0); - } else if (proxy != NULL && - proxy->var()->slot() != NULL && - proxy->var()->slot()->type() == Slot::LOOKUP) { - __ mov(r0, Operand(proxy->name())); - __ Push(cp, r0); - __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); - __ push(r0); - } else { - // This expression cannot throw a reference error at the top level. - VisitForValue(expr->expression(), kStack); - } - + VisitForTypeofValue(expr->expression(), kStack); __ CallRuntime(Runtime::kTypeof, 1); Apply(context_, r0); break; @@ -2818,9 +2717,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::SUB: { Comment cmt(masm_, "[ UnaryOperation (SUB)"); - bool can_overwrite = - (expr->expression()->AsBinaryOperation() != NULL && - expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; GenericUnaryOpStub stub(Token::SUB, overwrite); @@ -2834,28 +2731,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::BIT_NOT: { Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)"); - bool can_overwrite = - (expr->expression()->AsBinaryOperation() != NULL && - expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); - UnaryOverwriteMode overwrite = - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); - // GenericUnaryOpStub expects the argument to be in the - // accumulator register r0. + // The generic unary operation stub expects the argument to be + // in the accumulator register r0. VisitForValue(expr->expression(), kAccumulator); - // Avoid calling the stub for Smis. - Label smi, done; - __ BranchOnSmi(result_register(), &smi); - // Non-smi: call stub leaving result in accumulator register. + Label done; + if (ShouldInlineSmiCase(expr->op())) { + Label call_stub; + __ BranchOnNotSmi(r0, &call_stub); + __ mvn(r0, Operand(r0)); + // Bit-clear inverted smi-tag. + __ bic(r0, r0, Operand(kSmiTagMask)); + __ b(&done); + __ bind(&call_stub); + } + bool overwrite = expr->expression()->ResultOverwriteAllowed(); + UnaryOverwriteMode mode = + overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + GenericUnaryOpStub stub(Token::BIT_NOT, mode); __ CallStub(&stub); - __ b(&done); - // Perform operation directly on Smis. - __ bind(&smi); - __ mvn(result_register(), Operand(result_register())); - // Bit-clear inverted smi-tag. - __ bic(result_register(), result_register(), Operand(kSmiTagMask)); __ bind(&done); - Apply(context_, result_register()); + Apply(context_, r0); break; } @@ -2867,6 +2762,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Comment cmnt(masm_, "[ CountOperation"); + SetSourcePosition(expr->position()); + // Invalid left-hand sides are rewritten to have a 'throw ReferenceError' // as the left-hand side. if (!expr->expression()->IsValidLeftHandSide()) { @@ -2931,8 +2828,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { break; case Expression::kValue: case Expression::kTest: - case Expression::kValueTest: - case Expression::kTestValue: // Save the result on the stack. If we have a named or keyed property // we store the result under the receiver that is currently on top // of the stack. @@ -2955,7 +2850,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Inline smi case if we are in a loop. Label stub_call, done; int count_value = expr->op() == Token::INC ? 1 : -1; - if (loop_depth() > 0) { + if (ShouldInlineSmiCase(expr->op())) { __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); __ b(vs, &stub_call); // We could eliminate this smi check if we split the code at @@ -3020,68 +2915,126 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } -void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { - Comment cmnt(masm_, "[ BinaryOperation"); - switch (expr->op()) { - case Token::COMMA: - VisitForEffect(expr->left()); - Visit(expr->right()); - break; - - case Token::OR: - case Token::AND: - EmitLogicalOperation(expr); - break; - - case Token::ADD: - case Token::SUB: - case Token::DIV: - case Token::MOD: - case Token::MUL: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SHL: - case Token::SHR: - case Token::SAR: - VisitForValue(expr->left(), kStack); - VisitForValue(expr->right(), kAccumulator); - EmitBinaryOp(expr->op(), context_); - break; - - default: - UNREACHABLE(); +void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) { + VariableProxy* proxy = expr->AsVariableProxy(); + if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) { + Comment cmnt(masm_, "Global variable"); + __ ldr(r0, CodeGenerator::GlobalObject()); + __ mov(r2, Operand(proxy->name())); + Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + // Use a regular load, not a contextual load, to avoid a reference + // error. + __ Call(ic, RelocInfo::CODE_TARGET); + if (where == kStack) __ push(r0); + } else if (proxy != NULL && + proxy->var()->slot() != NULL && + proxy->var()->slot()->type() == Slot::LOOKUP) { + __ mov(r0, Operand(proxy->name())); + __ Push(cp, r0); + __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + if (where == kStack) __ push(r0); + } else { + // This expression cannot throw a reference error at the top level. + VisitForValue(expr, where); } } -void FullCodeGenerator::EmitNullCompare(bool strict, - Register obj, - Register null_const, - Label* if_true, - Label* if_false, - Register scratch) { - __ cmp(obj, null_const); - if (strict) { +bool FullCodeGenerator::TryLiteralCompare(Token::Value op, + Expression* left, + Expression* right, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (op != Token::EQ && op != Token::EQ_STRICT) return false; + + // Check for the pattern: typeof == . + Literal* right_literal = right->AsLiteral(); + if (right_literal == NULL) return false; + Handle right_literal_value = right_literal->handle(); + if (!right_literal_value->IsString()) return false; + UnaryOperation* left_unary = left->AsUnaryOperation(); + if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false; + Handle check = Handle::cast(right_literal_value); + + VisitForTypeofValue(left_unary->expression(), kAccumulator); + if (check->Equals(Heap::number_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); __ b(eq, if_true); - } else { + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(r0, ip); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::string_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + // Check for undetectable objects => false. + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); + __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); + __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + __ b(eq, if_false); + __ ldrb(r1, FieldMemOperand(r0, Map::kInstanceTypeOffset)); + __ cmp(r1, Operand(FIRST_NONSTRING_TYPE)); + Split(lt, if_true, if_false, fall_through); + } else if (check->Equals(Heap::boolean_symbol())) { + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(r0, ip); __ b(eq, if_true); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r0, ip); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::undefined_symbol())) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(obj, ip); + __ cmp(r0, ip); __ b(eq, if_true); - __ BranchOnSmi(obj, if_false); - // It can be an undetectable object. - __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); - __ tst(scratch, Operand(1 << Map::kIsUndetectable)); - __ b(ne, if_true); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + // Check for undetectable objects => true. + __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); + __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); + __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::function_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + __ CompareObjectType(r0, r1, r0, JS_FUNCTION_TYPE); + __ b(eq, if_true); + // Regular expressions => 'function' (they are callable). + __ CompareInstanceType(r1, r0, JS_REGEXP_TYPE); + Split(eq, if_true, if_false, fall_through); + } else if (check->Equals(Heap::object_symbol())) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + __ LoadRoot(ip, Heap::kNullValueRootIndex); + __ cmp(r0, ip); + __ b(eq, if_true); + // Regular expressions => 'function', not 'object'. + __ CompareObjectType(r0, r1, r0, JS_REGEXP_TYPE); + __ b(eq, if_false); + // Check for undetectable objects => false. + __ ldrb(r0, FieldMemOperand(r1, Map::kBitFieldOffset)); + __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); + __ cmp(r0, Operand(1 << Map::kIsUndetectable)); + __ b(eq, if_false); + // Check for JS objects => true. + __ ldrb(r0, FieldMemOperand(r1, Map::kInstanceTypeOffset)); + __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(lt, if_false); + __ cmp(r0, Operand(LAST_JS_OBJECT_TYPE)); + Split(le, if_true, if_false, fall_through); + } else { + if (if_false != fall_through) __ jmp(if_false); } - __ jmp(if_false); + + return true; } void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); + SetSourcePosition(expr->position()); // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. @@ -3089,26 +3042,37 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + Token::Value op = expr->op(); + Expression* left = expr->left(); + Expression* right = expr->right(); + if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) { + Apply(context_, if_true, if_false); + return; + } VisitForValue(expr->left(), kStack); - switch (expr->op()) { + switch (op) { case Token::IN: VisitForValue(expr->right(), kStack); __ InvokeBuiltin(Builtins::IN, CALL_JS); __ LoadRoot(ip, Heap::kTrueValueRootIndex); __ cmp(r0, ip); - __ b(eq, if_true); - __ jmp(if_false); + Split(eq, if_true, if_false, fall_through); break; case Token::INSTANCEOF: { VisitForValue(expr->right(), kStack); InstanceofStub stub; __ CallStub(&stub); + // The stub returns 0 for true. __ tst(r0, r0); - __ b(eq, if_true); // The stub returns 0 for true. - __ jmp(if_false); + Split(eq, if_true, if_false, fall_through); break; } @@ -3116,28 +3080,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { VisitForValue(expr->right(), kAccumulator); Condition cc = eq; bool strict = false; - switch (expr->op()) { + switch (op) { case Token::EQ_STRICT: strict = true; // Fall through - case Token::EQ: { + case Token::EQ: cc = eq; __ pop(r1); - // If either operand is constant null we do a fast compare - // against null. - Literal* right_literal = expr->right()->AsLiteral(); - Literal* left_literal = expr->left()->AsLiteral(); - if (right_literal != NULL && right_literal->handle()->IsNull()) { - EmitNullCompare(strict, r1, r0, if_true, if_false, r2); - Apply(context_, if_true, if_false); - return; - } else if (left_literal != NULL && left_literal->handle()->IsNull()) { - EmitNullCompare(strict, r0, r1, if_true, if_false, r2); - Apply(context_, if_true, if_false); - return; - } break; - } case Token::LT: cc = lt; __ pop(r1); @@ -3164,21 +3114,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { UNREACHABLE(); } - // The comparison stub expects the smi vs. smi case to be handled - // before it is called. - Label slow_case; - __ orr(r2, r0, Operand(r1)); - __ BranchOnNotSmi(r2, &slow_case); - __ cmp(r1, r0); - __ b(cc, if_true); - __ jmp(if_false); + if (ShouldInlineSmiCase(op)) { + Label slow_case; + __ orr(r2, r0, Operand(r1)); + __ BranchOnNotSmi(r2, &slow_case); + __ cmp(r1, r0); + Split(cc, if_true, if_false, NULL); + __ bind(&slow_case); + } - __ bind(&slow_case); CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0); __ CallStub(&stub); - __ cmp(r0, Operand(0)); - __ b(cc, if_true); - __ jmp(if_false); + __ cmp(r0, Operand(0, RelocInfo::NONE)); + Split(cc, if_true, if_false, fall_through); } } @@ -3188,6 +3136,38 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } +void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { + Comment cmnt(masm_, "[ CompareToNull"); + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + VisitForValue(expr->expression(), kAccumulator); + __ LoadRoot(r1, Heap::kNullValueRootIndex); + __ cmp(r0, r1); + if (expr->is_strict()) { + Split(eq, if_true, if_false, fall_through); + } else { + __ b(eq, if_true); + __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r1); + __ b(eq, if_true); + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, if_false); + // It can be an undetectable object. + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); + __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); + __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); + __ cmp(r1, Operand(1 << Map::kIsUndetectable)); + Split(eq, if_true, if_false, fall_through); + } + Apply(context_, if_true, if_false); +} + + void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); Apply(context_, r0); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 1fd7098254..1a76db2ce3 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -30,7 +30,7 @@ #if defined(V8_TARGET_ARCH_ARM) #include "assembler-arm.h" -#include "codegen.h" +#include "code-stubs.h" #include "codegen-inl.h" #include "disasm.h" #include "ic-inl.h" @@ -414,17 +414,17 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, Register receiver, - Register scratch1, - Register scratch2, + Register map, + Register scratch, int interceptor_bit, Label* slow) { // Check that the object isn't a smi. __ BranchOnSmi(receiver, slow); // Get the map of the receiver. - __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); // Check bit field. - __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); - __ tst(scratch2, + __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); + __ tst(scratch, Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); __ b(nz, slow); // Check that the object is some kind of JS object EXCEPT JS Value type. @@ -432,13 +432,14 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // we enter the runtime system to make sure that indexing into string // objects work as intended. ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); - __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); - __ cmp(scratch1, Operand(JS_OBJECT_TYPE)); + __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); + __ cmp(scratch, Operand(JS_OBJECT_TYPE)); __ b(lt, slow); } // Loads an indexed element from a fast case array. +// If not_fast_array is NULL, doesn't perform the elements map check. static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -471,11 +472,15 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // scratch2 - used to hold the loaded value. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). - __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); - __ cmp(scratch1, ip); - __ b(ne, not_fast_array); + if (not_fast_array != NULL) { + // Check that the object is in fast mode and writable. + __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + __ cmp(scratch1, ip); + __ b(ne, not_fast_array); + } else { + __ AssertFastElements(elements); + } // Check that the key (index) is within bounds. __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ cmp(key, Operand(scratch1)); @@ -522,32 +527,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm, } -// Picks out an array index from the hash field. -static void GenerateIndexFromHash(MacroAssembler* masm, - Register key, - Register hash) { - // Register use: - // key - holds the overwritten key on exit. - // hash - holds the key's hash. Clobbered. - - // If the hash field contains an array index pick it out. The assert checks - // that the constants for the maximum number of digits for an array index - // cached in the hash field and the number of bits reserved for it does not - // conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < - (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - ASSERT(String::kHashShift >= kSmiTagSize); - // Here we actually clobber the key which will be used if calling into - // runtime later. However as the new key is the numeric value of a string key - // there is no difference in using either key. - ASSERT(String::kHashShift >= kSmiTagSize); - __ Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); - __ mov(key, Operand(hash, LSL, kSmiTagSize)); -} - - // Defined in ic.cc. Object* CallIC_Miss(Arguments args); @@ -847,7 +826,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { GenerateMiss(masm, argc); __ bind(&index_string); - GenerateIndexFromHash(masm, r2, r3); + __ IndexFromHash(r3, r2); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); } @@ -1120,16 +1099,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateKeyedLoadReceiverCheck( masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow); + // Check the "has fast elements" bit in the receiver's map which is + // now in r2. + __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset)); + __ tst(r3, Operand(1 << Map::kHasFastElements)); + __ b(eq, &check_pixel_array); + GenerateFastArrayLoad( - masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow); + masm, receiver, key, r4, r3, r2, r0, NULL, &slow); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3); __ Ret(); // Check whether the elements is a pixel array. // r0: key - // r3: elements map - // r4: elements + // r1: receiver __ bind(&check_pixel_array); + __ ldr(r4, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); __ cmp(r3, ip); __ b(ne, &check_number_dictionary); @@ -1237,7 +1223,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ Ret(); __ bind(&index_string); - GenerateIndexFromHash(masm, key, r3); + __ IndexFromHash(r3, key); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); } @@ -1306,7 +1292,7 @@ static void GenerateUInt2Double(MacroAssembler* masm, __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); } else { - __ mov(loword, Operand(0)); + __ mov(loword, Operand(0, RelocInfo::NONE)); __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); } @@ -1690,7 +1676,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // Object case: Check key against length in the elements array. __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). + // Check that the object is in fast mode and writable. __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r4, ip); @@ -1748,8 +1734,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ b(&fast); // Array case: Get the length and the elements array from the JS - // array. Check that the array is in fast mode; if it is the - // length is always a smi. + // array. Check that the array is in fast mode (and writable); if it + // is the length is always a smi. __ bind(&array); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset)); @@ -1779,19 +1765,22 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } -// Convert int passed in register ival to IEE 754 single precision -// floating point value and store it into register fval. +// Convert and store int passed in register ival to IEEE 754 single precision +// floating point value at memory location (dst + 4 * wordoffset) // If VFP3 is available use it for conversion. -static void ConvertIntToFloat(MacroAssembler* masm, - Register ival, - Register fval, - Register scratch1, - Register scratch2) { +static void StoreIntAsFloat(MacroAssembler* masm, + Register dst, + Register wordoffset, + Register ival, + Register fval, + Register scratch1, + Register scratch2) { if (CpuFeatures::IsSupported(VFP3)) { CpuFeatures::Scope scope(VFP3); __ vmov(s0, ival); + __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); __ vcvt_f32_s32(s0, s0); - __ vmov(fval, s0); + __ vstr(s0, scratch1, 0); } else { Label not_special, done; // Move sign bit from source to destination. This works because the sign @@ -1801,7 +1790,7 @@ static void ConvertIntToFloat(MacroAssembler* masm, __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); // Negate value if it is negative. - __ rsb(ival, ival, Operand(0), LeaveCC, ne); + __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); // We have -1, 0 or 1, which we treat specially. Register ival contains // absolute value: it is either equal to 1 (special case of -1 and 1), @@ -1841,6 +1830,7 @@ static void ConvertIntToFloat(MacroAssembler* masm, Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); __ bind(&done); + __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); } } @@ -1935,9 +1925,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, __ str(r5, MemOperand(r3, r4, LSL, 2)); break; case kExternalFloatArray: - // Need to perform int-to-float conversion. - ConvertIntToFloat(masm, r5, r6, r7, r9); - __ str(r6, MemOperand(r3, r4, LSL, 2)); + // Perform int-to-float conversion and store to memory. + StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); break; default: UNREACHABLE(); @@ -1971,9 +1960,9 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // include -kHeapObjectTag into it. __ sub(r5, r0, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); + __ add(r5, r3, Operand(r4, LSL, 2)); __ vcvt_f32_f64(s0, d0); - __ vmov(r5, s0); - __ str(r5, MemOperand(r3, r4, LSL, 2)); + __ vstr(s0, r5, 0); } else { // Need to perform float-to-int conversion. // Test for NaN or infinity (both give zero). @@ -2086,18 +2075,18 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // and infinities. All these should be converted to 0. __ mov(r7, Operand(HeapNumber::kExponentMask)); __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand(0), LeaveCC, eq); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); __ b(eq, &done); __ teq(r9, Operand(r7)); - __ mov(r5, Operand(0), LeaveCC, eq); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); __ b(eq, &done); // Unbias exponent. __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); // If exponent is negative than result is 0. - __ mov(r5, Operand(0), LeaveCC, mi); + __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); __ b(mi, &done); // If exponent is too big than result is minimal value. @@ -2113,14 +2102,14 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); __ b(pl, &sign); - __ rsb(r9, r9, Operand(0)); + __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); __ mov(r5, Operand(r5, LSL, r9)); __ rsb(r9, r9, Operand(meaningfull_bits)); __ orr(r5, r5, Operand(r6, LSR, r9)); __ bind(&sign); - __ teq(r7, Operand(0)); - __ rsb(r5, r5, Operand(0), LeaveCC, ne); + __ teq(r7, Operand(0, RelocInfo::NONE)); + __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); __ bind(&done); switch (array_type) { @@ -2217,6 +2206,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { __ b(ne, &miss); // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); __ b(ne, &miss); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 38c7c28c9d..35544312f1 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -25,6 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include // For LONG_MIN, LONG_MAX. + #include "v8.h" #if defined(V8_TARGET_ARCH_ARM) @@ -224,7 +226,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2, } int32_t immediate = src2.immediate(); if (immediate == 0) { - mov(dst, Operand(0), LeaveCC, cond); + mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); return; } if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) { @@ -303,7 +305,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, } tst(dst, Operand(~satval)); b(eq, &done); - mov(dst, Operand(0), LeaveCC, mi); // 0 if negative. + mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative. mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. bind(&done); } else { @@ -513,7 +515,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { } -void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { +void MacroAssembler::EnterExitFrame() { // Compute the argv pointer and keep it in a callee-saved register. // r0 is argc. add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); @@ -556,16 +558,6 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { // Setup argc and the builtin function in callee-saved registers. mov(r4, Operand(r0)); mov(r5, Operand(r1)); - - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Save the state of all registers to the stack from the memory - // location. This is needed to allow nested break points. - if (mode == ExitFrame::MODE_DEBUG) { - // Use sp as base to push. - CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); - } -#endif } @@ -600,21 +592,9 @@ int MacroAssembler::ActivationFrameAlignment() { } -void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { -#ifdef ENABLE_DEBUGGER_SUPPORT - // Restore the memory copy of the registers by digging them out from - // the stack. This is needed to allow nested break points. - if (mode == ExitFrame::MODE_DEBUG) { - // This code intentionally clobbers r2 and r3. - const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; - const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; - add(r3, fp, Operand(kOffset)); - CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved); - } -#endif - +void MacroAssembler::LeaveExitFrame() { // Clear top frame. - mov(r3, Operand(0)); + mov(r3, Operand(0, RelocInfo::NONE)); mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); str(r3, MemOperand(ip)); @@ -757,8 +737,7 @@ void MacroAssembler::InvokeFunction(Register fun, SharedFunctionInfo::kFormalParameterCountOffset)); mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); ldr(code_reg, - MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag)); - add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); + FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); ParameterCount expected(expected_reg); InvokeCode(code_reg, expected, actual, flag); @@ -780,69 +759,11 @@ void MacroAssembler::InvokeFunction(JSFunction* function, InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag); } -#ifdef ENABLE_DEBUGGER_SUPPORT -void MacroAssembler::SaveRegistersToMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of registers to memory location. - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - Register reg = { r }; - mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); - str(reg, MemOperand(ip)); - } - } -} - - -void MacroAssembler::RestoreRegistersFromMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of memory location to registers. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - Register reg = { r }; - mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); - ldr(reg, MemOperand(ip)); - } - } -} - - -void MacroAssembler::CopyRegistersFromMemoryToStack(Register base, - RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of the memory location to the stack and adjust base. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); - ldr(ip, MemOperand(ip)); - str(ip, MemOperand(base, 4, NegPreIndex)); - } - } -} - - -void MacroAssembler::CopyRegistersFromStackToMemory(Register base, - Register scratch, - RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of the stack to the memory location and adjust base. - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); - ldr(scratch, MemOperand(base, 4, PostIndex)); - str(scratch, MemOperand(ip)); - } - } -} - +#ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { ASSERT(allow_stub_calls()); - mov(r0, Operand(0)); + mov(r0, Operand(0, RelocInfo::NONE)); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak))); CEntryStub ces(1); Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); @@ -878,7 +799,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location, // The frame pointer does not point to a JS frame so we save NULL // for fp. We expect the code throwing an exception to check fp // before dereferencing it to restore the context. - mov(ip, Operand(0)); // To save a NULL frame pointer. + mov(ip, Operand(0, RelocInfo::NONE)); // To save a NULL frame pointer. mov(r6, Operand(StackHandler::ENTRY)); ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize && StackHandlerConstants::kFPOffset == 2 * kPointerSize @@ -917,7 +838,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); // In debug mode, make sure the lexical context is set. #ifdef DEBUG - cmp(scratch, Operand(0)); + cmp(scratch, Operand(0, RelocInfo::NONE)); Check(ne, "we should not have an empty lexical context"); #endif @@ -1338,6 +1259,21 @@ void MacroAssembler::IllegalOperation(int num_arguments) { } +void MacroAssembler::IndexFromHash(Register hash, Register index) { + // If the hash field contains an array index pick it out. The assert checks + // that the constants for the maximum number of digits for an array index + // cached in the hash field and the number of bits reserved for it does not + // conflict. + ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << String::kArrayIndexValueBits)); + // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in + // the low kHashShift bits. + STATIC_ASSERT(kSmiTag == 0); + Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); + mov(index, Operand(hash, LSL, kSmiTagSize)); +} + + void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, Register outHighReg, Register outLowReg) { @@ -1399,6 +1335,104 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi, } +// Tries to get a signed int32 out of a double precision floating point heap +// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the +// 32bits signed integer range. +void MacroAssembler::ConvertToInt32(Register source, + Register dest, + Register scratch, + Register scratch2, + Label *not_int32) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + sub(scratch, source, Operand(kHeapObjectTag)); + vldr(d0, scratch, HeapNumber::kValueOffset); + vcvt_s32_f64(s0, d0); + vmov(dest, s0); + // Signed vcvt instruction will saturate to the minimum (0x80000000) or + // maximun (0x7fffffff) signed 32bits integer when the double is out of + // range. When substracting one, the minimum signed integer becomes the + // maximun signed integer. + sub(scratch, dest, Operand(1)); + cmp(scratch, Operand(LONG_MAX - 1)); + // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. + b(ge, not_int32); + } else { + // This code is faster for doubles that are in the ranges -0x7fffffff to + // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to + // the range of signed int32 values that are not Smis. Jumps to the label + // 'not_int32' if the double isn't in the range -0x80000000.0 to + // 0x80000000.0 (excluding the endpoints). + Label right_exponent, done; + // Get exponent word. + ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); + // Get exponent alone in scratch2. + Ubfx(scratch2, + scratch, + HeapNumber::kExponentShift, + HeapNumber::kExponentBits); + // Load dest with zero. We use this either for the final shift or + // for the answer. + mov(dest, Operand(0, RelocInfo::NONE)); + // Check whether the exponent matches a 32 bit signed int that is not a Smi. + // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is + // the exponent that we are fastest at and also the highest exponent we can + // handle here. + const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; + // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we + // split it up to avoid a constant pool entry. You can't do that in general + // for cmp because of the overflow flag, but we know the exponent is in the + // range 0-2047 so there is no overflow. + int fudge_factor = 0x400; + sub(scratch2, scratch2, Operand(fudge_factor)); + cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); + // If we have a match of the int32-but-not-Smi exponent then skip some + // logic. + b(eq, &right_exponent); + // If the exponent is higher than that then go to slow case. This catches + // numbers that don't fit in a signed int32, infinities and NaNs. + b(gt, not_int32); + + // We know the exponent is smaller than 30 (biased). If it is less than + // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie + // it rounds to zero. + const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; + sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); + // Dest already has a Smi zero. + b(lt, &done); + + // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to + // get how much to shift down. + rsb(dest, scratch2, Operand(30)); + + bind(&right_exponent); + // Get the top bits of the mantissa. + and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); + // Put back the implicit 1. + orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We just orred in the implicit bit so that took care of one and + // we want to leave the sign bit 0 so we subtract 2 bits from the shift + // distance. + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + mov(scratch2, Operand(scratch2, LSL, shift_distance)); + // Put sign in zero flag. + tst(scratch, Operand(HeapNumber::kSignMask)); + // Get the second half of the double. For some exponents we don't + // actually need this because the bits get shifted out again, but + // it's probably slower to test than just to do it. + ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 22 bits to get the last 10 bits. + orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); + // Move down according to the exponent. + mov(dest, Operand(scratch, LSR, dest)); + // Fix sign if sign bit was set. + rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); + bind(&done); + } +} + + void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits) { @@ -1490,30 +1524,22 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, } -void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(r1)); - +void MacroAssembler::GetBuiltinFunction(Register target, + Builtins::JavaScript id) { // Load the builtins object into target register. ldr(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); - // Load the JavaScript builtin function from the builtins object. - ldr(r1, FieldMemOperand(target, + ldr(target, FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); +} + +void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { + ASSERT(!target.is(r1)); + GetBuiltinFunction(r1, id); // Load the code entry point from the builtins object. - ldr(target, FieldMemOperand(target, - JSBuiltinsObject::OffsetOfCodeWithId(id))); - if (FLAG_debug_code) { - // Make sure the code objects in the builtins object and in the - // builtin function are the same. - push(r1); - ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset)); - cmp(r1, target); - Assert(eq, "Builtin code object changed"); - pop(r1); - } - add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); + ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); } @@ -1567,6 +1593,25 @@ void MacroAssembler::AssertRegisterIsRoot(Register reg, } +void MacroAssembler::AssertFastElements(Register elements) { + if (FLAG_debug_code) { + ASSERT(!elements.is(ip)); + Label ok; + push(elements); + ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); + LoadRoot(ip, Heap::kFixedArrayMapRootIndex); + cmp(elements, ip); + b(eq, &ok); + LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); + cmp(elements, ip); + b(eq, &ok); + Abort("JSObject with fast elements map has slow elements"); + bind(&ok); + pop(elements); + } +} + + void MacroAssembler::Check(Condition cc, const char* msg) { Label L; b(cc, &L); @@ -1773,7 +1818,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. #ifdef CAN_USE_ARMV5_INSTRUCTIONS clz(zeros, source); // This instruction is only supported after ARM5. #else - mov(zeros, Operand(0)); + mov(zeros, Operand(0, RelocInfo::NONE)); Move(scratch, source); // Top 16. tst(scratch, Operand(0xffff0000)); diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 836ed74994..febd87e8a6 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -250,14 +250,14 @@ class MacroAssembler: public Assembler { void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame; either normal or debug mode. + // Enter exit frame. // Expects the number of arguments in register r0 and // the builtin function to call in register r1. Exits with argc in // r4, argv in r6, and and the builtin function to call in r5. - void EnterExitFrame(ExitFrame::Mode mode); + void EnterExitFrame(); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(ExitFrame::Mode mode); + void LeaveExitFrame(); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -294,12 +294,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Debugger Support - void SaveRegistersToMemory(RegList regs); - void RestoreRegistersFromMemory(RegList regs); - void CopyRegistersFromMemoryToStack(Register base, RegList regs); - void CopyRegistersFromStackToMemory(Register base, - Register scratch, - RegList regs); void DebugBreak(); #endif @@ -475,6 +469,12 @@ class MacroAssembler: public Assembler { // occurred. void IllegalOperation(int num_arguments); + // Picks out an array index from the hash field. + // Register use: + // hash - holds the index's hash. Clobbered. + // index - holds the overwritten index on exit. + void IndexFromHash(Register hash, Register index); + // Get the number of least significant bits from a register void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); @@ -504,6 +504,15 @@ class MacroAssembler: public Assembler { Register scratch1, SwVfpRegister scratch2); + // Convert the HeapNumber pointed to by source to a 32bits signed integer + // dest. If the HeapNumber does not fit into a 32bits signed integer branch + // to not_int32 label. + void ConvertToInt32(Register source, + Register dest, + Register scratch, + Register scratch2, + Label *not_int32); + // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer // for 0 (31 instead of 32). Source and scratch can be the same in which case @@ -576,6 +585,9 @@ class MacroAssembler: public Assembler { // setup the function in r1. void GetBuiltinEntry(Register target, Builtins::JavaScript id); + // Store the function for the given builtin in the target register. + void GetBuiltinFunction(Register target, Builtins::JavaScript id); + Handle CodeObject() { return code_object_; } @@ -597,6 +609,7 @@ class MacroAssembler: public Assembler { // Use --debug_code to enable. void Assert(Condition cc, const char* msg); void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); + void AssertFastElements(Register elements); // Like Assert(), but always enabled. void Check(Condition cc, const char* msg); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index c67c7aacaa..8f45886d92 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -31,12 +31,10 @@ #include "unicode.h" #include "log.h" -#include "ast.h" #include "code-stubs.h" #include "regexp-stack.h" #include "macro-assembler.h" #include "regexp-macro-assembler.h" -#include "arm/macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h" namespace v8 { @@ -191,7 +189,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); BranchOrBacktrack(eq, ¬_at_start); // If we did, are we still at the start of the input? @@ -206,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); BranchOrBacktrack(eq, on_not_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); @@ -366,7 +364,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( __ CallCFunction(function, argument_count); // Check if function returned non-zero for success or zero for failure. - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); BranchOrBacktrack(eq, on_no_match); // On success, increment position by length of capture. __ add(current_input_offset(), current_input_offset(), Operand(r4)); @@ -636,7 +634,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ bind(&stack_limit_hit); CallCheckStackGuardState(r0); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returned value is non-zero, we exit with the returned value as result. __ b(ne, &exit_label_); @@ -663,7 +661,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // string, and store that value in a local variable. __ tst(r1, Operand(r1)); __ mov(r1, Operand(1), LeaveCC, eq); - __ mov(r1, Operand(0), LeaveCC, ne); + __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne); __ str(r1, MemOperand(frame_pointer(), kAtStart)); if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. @@ -686,7 +684,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { // Load previous char as initial value of current character register. Label at_start; __ ldr(r0, MemOperand(frame_pointer(), kAtStart)); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); __ b(ne, &at_start); LoadCurrentCharacterUnchecked(-1, 1); // Load previous char. __ jmp(&start_label_); @@ -753,7 +751,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { SafeCallTarget(&check_preempt_label_); CallCheckStackGuardState(r0); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); // If returning non-zero, we should end execution with the given // result as return value. __ b(ne, &exit_label_); @@ -780,7 +778,7 @@ Handle RegExpMacroAssemblerARM::GetCode(Handle source) { __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand(0, RelocInfo::NONE)); __ b(eq, &exit_with_exception); // Otherwise use return value as new stack pointer. __ mov(backtrack_stackpointer(), r0); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 2c0a8d84d4..93a74d7ca4 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -242,22 +242,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { Label stack_overflow_label_; }; - -// Enter C code from generated RegExp code in a way that allows -// the C code to fix the return address in case of a GC. -// Currently only needed on ARM. -class RegExpCEntryStub: public CodeStub { - public: - RegExpCEntryStub() {} - virtual ~RegExpCEntryStub() {} - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return RegExpCEntry; } - int MinorKey() { return 0; } - const char* GetName() { return "RegExpCEntryStub"; } -}; - #endif // V8_INTERPRETED_REGEXP diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index c4cc8d46cb..64262b2b81 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -727,6 +727,10 @@ void Simulator::set_register(int reg, int32_t value) { // the special case of accessing the PC register. int32_t Simulator::get_register(int reg) const { ASSERT((reg >= 0) && (reg < num_registers)); + // Stupid code added to avoid bug in GCC. + // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949 + if (reg >= num_registers) return 0; + // End stupid code. return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0); } @@ -1378,7 +1382,9 @@ void Simulator::HandleRList(Instr* instr, bool load) { } case 3: { // Print("ib"); - UNIMPLEMENTED(); + start_address = rn_val + 4; + end_address = rn_val + (num_regs * 4); + rn_val = end_address; break; } default: { @@ -2275,13 +2281,6 @@ void Simulator::DecodeUnconditional(Instr* instr) { } -// Depending on value of last_bit flag glue register code from vm and m values -// (where m is expected to be a single bit). -static int GlueRegCode(bool last_bit, int vm, int m) { - return last_bit ? ((vm << 1) | m) : ((m << 4) | vm); -} - - // void Simulator::DecodeTypeVFP(Instr* instr) // The Following ARMv7 VFPv instructions are currently supported. // vmov :Sn = Rt @@ -2299,9 +2298,10 @@ void Simulator::DecodeTypeVFP(Instr* instr) { ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); ASSERT(instr->Bits(11, 9) == 0x5); - int vm = instr->VmField(); - int vd = instr->VdField(); - int vn = instr->VnField(); + // Obtain double precision register codes. + int vm = instr->VFPMRegCode(kDoublePrecision); + int vd = instr->VFPDRegCode(kDoublePrecision); + int vn = instr->VFPNRegCode(kDoublePrecision); if (instr->Bit(4) == 0) { if (instr->Opc1Field() == 0x7) { @@ -2309,9 +2309,13 @@ void Simulator::DecodeTypeVFP(Instr* instr) { if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) { // vmov register to register. if (instr->SzField() == 0x1) { - set_d_register_from_double(vd, get_double_from_d_register(vm)); + int m = instr->VFPMRegCode(kDoublePrecision); + int d = instr->VFPDRegCode(kDoublePrecision); + set_d_register_from_double(d, get_double_from_d_register(m)); } else { - set_s_register_from_float(vd, get_float_from_s_register(vm)); + int m = instr->VFPMRegCode(kSinglePrecision); + int d = instr->VFPDRegCode(kSinglePrecision); + set_s_register_from_float(d, get_float_from_s_register(m)); } } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); @@ -2404,7 +2408,7 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) { (instr->VAField() == 0x0)); int t = instr->RtField(); - int n = GlueRegCode(true, instr->VnField(), instr->NField()); + int n = instr->VFPNRegCode(kSinglePrecision); bool to_arm_register = (instr->VLField() == 0x1); if (to_arm_register) { @@ -2421,22 +2425,25 @@ void Simulator::DecodeVCMP(Instr* instr) { ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && (instr->Opc3Field() & 0x1)); - // Comparison. - bool dp_operation = (instr->SzField() == 1); + + VFPRegPrecision precision = kSinglePrecision; + if (instr->SzField() == 1) { + precision = kDoublePrecision; + } if (instr->Bit(7) != 0) { // Raising exceptions for quiet NaNs are not supported. UNIMPLEMENTED(); // Not used by V8. } - int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField()); + int d = instr->VFPDRegCode(precision); int m = 0; if (instr->Opc2Field() == 0x4) { - m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField()); + m = instr->VFPMRegCode(precision); } - if (dp_operation) { + if (precision == kDoublePrecision) { double dd_value = get_double_from_d_register(d); double dm_value = 0.0; if (instr->Opc2Field() == 0x4) { @@ -2454,11 +2461,17 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) { ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)); - bool double_to_single = (instr->SzField() == 1); - int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField()); - int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField()); + VFPRegPrecision dst_precision = kDoublePrecision; + VFPRegPrecision src_precision = kSinglePrecision; + if (instr->SzField() == 1) { + dst_precision = kSinglePrecision; + src_precision = kDoublePrecision; + } + + int dst = instr->VFPDRegCode(dst_precision); + int src = instr->VFPMRegCode(src_precision); - if (double_to_single) { + if (dst_precision == kSinglePrecision) { double val = get_double_from_d_register(src); set_s_register_from_float(dst, static_cast(val)); } else { @@ -2474,13 +2487,13 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1))); // Conversion between floating-point and integer. - int vd = instr->VdField(); - int d = instr->DField(); - int vm = instr->VmField(); - int m = instr->MField(); - bool to_integer = (instr->Bit(18) == 1); - bool dp_operation = (instr->SzField() == 1); + + VFPRegPrecision src_precision = kSinglePrecision; + if (instr->SzField() == 1) { + src_precision = kDoublePrecision; + } + if (to_integer) { bool unsigned_integer = (instr->Bit(16) == 0); if (instr->Bit(7) != 1) { @@ -2488,10 +2501,10 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { UNIMPLEMENTED(); // Not used by V8. } - int dst = GlueRegCode(true, vd, d); - int src = GlueRegCode(!dp_operation, vm, m); + int dst = instr->VFPDRegCode(kSinglePrecision); + int src = instr->VFPMRegCode(src_precision); - if (dp_operation) { + if (src_precision == kDoublePrecision) { double val = get_double_from_d_register(src); int sint = unsigned_integer ? static_cast(val) : @@ -2509,12 +2522,12 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { } else { bool unsigned_integer = (instr->Bit(7) == 0); - int dst = GlueRegCode(!dp_operation, vd, d); - int src = GlueRegCode(true, vm, m); + int dst = instr->VFPDRegCode(src_precision); + int src = instr->VFPMRegCode(kSinglePrecision); int val = get_sinteger_from_s_register(src); - if (dp_operation) { + if (src_precision == kDoublePrecision) { if (unsigned_integer) { set_d_register_from_double(dst, static_cast((uint32_t)val)); @@ -2545,9 +2558,11 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) { if (instr->CoprocessorField() == 0xA) { switch (instr->OpcodeField()) { case 0x8: - case 0xC: { // Load and store float to memory. + case 0xA: + case 0xC: + case 0xE: { // Load and store single precision float to memory. int rn = instr->RnField(); - int vd = instr->VdField(); + int vd = instr->VFPDRegCode(kSinglePrecision); int offset = instr->Immed8Field(); if (!instr->HasU()) { offset = -offset; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index fa90ca7d11..344cb6fb90 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -1297,11 +1297,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, // Check that the maps haven't changed. CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); - if (object->IsGlobalObject()) { - __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); - __ str(r3, MemOperand(sp, argc * kPointerSize)); - } - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), argc + 1, 1); @@ -1349,11 +1344,6 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, // Check that the maps haven't changed. CheckPrototypes(JSObject::cast(object), r1, holder, r3, r0, r4, name, &miss); - if (object->IsGlobalObject()) { - __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); - __ str(r3, MemOperand(sp, argc * kPointerSize)); - } - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop), argc + 1, 1); @@ -1373,8 +1363,68 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object, JSFunction* function, String* name, CheckType check) { - // TODO(722): implement this. - return Heap::undefined_value(); + // ----------- S t a t e ------------- + // -- r2 : function name + // -- lr : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) + // -- ... + // -- sp[argc * 4] : receiver + // ----------------------------------- + + // If object is not a string, bail out to regular call. + if (!object->IsString()) return Heap::undefined_value(); + + const int argc = arguments().immediate(); + + Label miss; + Label index_out_of_range; + GenerateNameCheck(name, &miss); + + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype(masm(), + Context::STRING_FUNCTION_INDEX, + r0); + ASSERT(object != holder); + CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, + r1, r3, r4, name, &miss); + + Register receiver = r1; + Register index = r4; + Register scratch = r3; + Register result = r0; + __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); + if (argc > 0) { + __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); + } else { + __ LoadRoot(index, Heap::kUndefinedValueRootIndex); + } + + StringCharCodeAtGenerator char_code_at_generator(receiver, + index, + scratch, + result, + &miss, // When not a string. + &miss, // When not a number. + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + char_code_at_generator.GenerateFast(masm()); + __ Drop(argc + 1); + __ Ret(); + + ICRuntimeCallHelper call_helper; + char_code_at_generator.GenerateSlow(masm(), call_helper); + + __ bind(&index_out_of_range); + __ LoadRoot(r0, Heap::kNanValueRootIndex); + __ Drop(argc + 1); + __ Ret(); + + __ bind(&miss); + Object* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; + + // Return the generated code. + return GetCode(function); } @@ -1383,8 +1433,71 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object, JSFunction* function, String* name, CheckType check) { - // TODO(722): implement this. - return Heap::undefined_value(); + // ----------- S t a t e ------------- + // -- r2 : function name + // -- lr : return address + // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) + // -- ... + // -- sp[argc * 4] : receiver + // ----------------------------------- + + // If object is not a string, bail out to regular call. + if (!object->IsString()) return Heap::undefined_value(); + + const int argc = arguments().immediate(); + + Label miss; + Label index_out_of_range; + + GenerateNameCheck(name, &miss); + + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype(masm(), + Context::STRING_FUNCTION_INDEX, + r0); + ASSERT(object != holder); + CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, + r1, r3, r4, name, &miss); + + Register receiver = r0; + Register index = r4; + Register scratch1 = r1; + Register scratch2 = r3; + Register result = r0; + __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); + if (argc > 0) { + __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); + } else { + __ LoadRoot(index, Heap::kUndefinedValueRootIndex); + } + + StringCharAtGenerator char_at_generator(receiver, + index, + scratch1, + scratch2, + result, + &miss, // When not a string. + &miss, // When not a number. + &index_out_of_range, + STRING_INDEX_IS_NUMBER); + char_at_generator.GenerateFast(masm()); + __ Drop(argc + 1); + __ Ret(); + + ICRuntimeCallHelper call_helper; + char_at_generator.GenerateSlow(masm(), call_helper); + + __ bind(&index_out_of_range); + __ LoadRoot(r0, Heap::kEmptyStringRootIndex); + __ Drop(argc + 1); + __ Ret(); + + __ bind(&miss); + Object* obj = GenerateMissBranch(); + if (obj->IsFailure()) return obj; + + // Return the generated code. + return GetCode(function); } diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index f3c0697b99..e12df64143 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -566,13 +566,6 @@ function ArraySlice(start, end) { function ArraySplice(start, delete_count) { var num_arguments = %_ArgumentsLength(); - // SpiderMonkey and JSC return undefined in the case where no - // arguments are given instead of using the implicit undefined - // arguments. This does not follow ECMA-262, but we do the same for - // compatibility. - // TraceMonkey follows ECMA-262 though. - if (num_arguments == 0) return; - var len = TO_UINT32(this.length); var start_i = TO_INTEGER(start); @@ -953,7 +946,8 @@ function ArrayMap(f, receiver) { function ArrayIndexOf(element, index) { - var length = this.length; + var length = TO_UINT32(this.length); + if (length == 0) return -1; if (IS_UNDEFINED(index)) { index = 0; } else { @@ -963,13 +957,13 @@ function ArrayIndexOf(element, index) { // If index is still negative, search the entire array. if (index < 0) index = 0; } + // Lookup through the array. if (!IS_UNDEFINED(element)) { for (var i = index; i < length; i++) { if (this[i] === element) return i; } return -1; } - // Lookup through the array. for (var i = index; i < length; i++) { if (IS_UNDEFINED(this[i]) && i in this) { return i; @@ -980,7 +974,8 @@ function ArrayIndexOf(element, index) { function ArrayLastIndexOf(element, index) { - var length = this.length; + var length = TO_UINT32(this.length); + if (length == 0) return -1; if (%_ArgumentsLength() < 2) { index = length - 1; } else { diff --git a/deps/v8/src/ast-inl.h b/deps/v8/src/ast-inl.h index 717f68d063..f0a25c171f 100644 --- a/deps/v8/src/ast-inl.h +++ b/deps/v8/src/ast-inl.h @@ -64,8 +64,7 @@ ForStatement::ForStatement(ZoneStringList* labels) cond_(NULL), next_(NULL), may_have_function_literal_(true), - loop_variable_(NULL), - peel_this_loop_(false) { + loop_variable_(NULL) { } diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 92df990063..9ff1be73bc 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -28,7 +28,6 @@ #include "v8.h" #include "ast.h" -#include "data-flow.h" #include "parser.h" #include "scopes.h" #include "string-stream.h" @@ -78,18 +77,17 @@ VariableProxy::VariableProxy(Handle name, var_(NULL), is_this_(is_this), inside_with_(inside_with), - is_trivial_(false), - reaching_definitions_(NULL), - is_primitive_(false) { + is_trivial_(false) { // names must be canonicalized for fast equality checks ASSERT(name->IsSymbol()); } VariableProxy::VariableProxy(bool is_this) - : is_this_(is_this), - reaching_definitions_(NULL), - is_primitive_(false) { + : var_(NULL), + is_this_(is_this), + inside_with_(false), + is_trivial_(false) { } @@ -237,6 +235,59 @@ bool Expression::GuaranteedSmiResult() { return false; } + +void Expression::CopyAnalysisResultsFrom(Expression* other) { + bitfields_ = other->bitfields_; + type_ = other->type_; +} + + +bool UnaryOperation::ResultOverwriteAllowed() { + switch (op_) { + case Token::BIT_NOT: + case Token::SUB: + return true; + default: + return false; + } +} + + +bool BinaryOperation::ResultOverwriteAllowed() { + switch (op_) { + case Token::COMMA: + case Token::OR: + case Token::AND: + return false; + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::SHL: + case Token::SAR: + case Token::SHR: + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + return true; + default: + UNREACHABLE(); + } + return false; +} + + +BinaryOperation::BinaryOperation(Assignment* assignment) { + ASSERT(assignment->is_compound()); + op_ = assignment->binary_op(); + left_ = assignment->target(); + right_ = assignment->value(); + pos_ = assignment->position(); + CopyAnalysisResultsFrom(assignment); +} + + // ---------------------------------------------------------------------------- // Implementation of AstVisitor @@ -575,218 +626,6 @@ RegExpAlternative::RegExpAlternative(ZoneList* nodes) } } -// IsPrimitive implementation. IsPrimitive is true if the value of an -// expression is known at compile-time to be any JS type other than Object -// (e.g, it is Undefined, Null, Boolean, String, or Number). - -// The following expression types are never primitive because they express -// Object values. -bool FunctionLiteral::IsPrimitive() { return false; } -bool SharedFunctionInfoLiteral::IsPrimitive() { return false; } -bool RegExpLiteral::IsPrimitive() { return false; } -bool ObjectLiteral::IsPrimitive() { return false; } -bool ArrayLiteral::IsPrimitive() { return false; } -bool CatchExtensionObject::IsPrimitive() { return false; } -bool CallNew::IsPrimitive() { return false; } -bool ThisFunction::IsPrimitive() { return false; } - - -// The following expression types are not always primitive because we do not -// have enough information to conclude that they are. -bool Property::IsPrimitive() { return false; } -bool Call::IsPrimitive() { return false; } -bool CallRuntime::IsPrimitive() { return false; } - - -// A variable use is not primitive unless the primitive-type analysis -// determines otherwise. -bool VariableProxy::IsPrimitive() { - ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated())); - return is_primitive_; -} - -// The value of a conditional is the value of one of the alternatives. It's -// always primitive if both alternatives are always primitive. -bool Conditional::IsPrimitive() { - return then_expression()->IsPrimitive() && else_expression()->IsPrimitive(); -} - - -// A literal is primitive when it is not a JSObject. -bool Literal::IsPrimitive() { return !handle()->IsJSObject(); } - - -// The value of an assignment is the value of its right-hand side. -bool Assignment::IsPrimitive() { - switch (op()) { - case Token::INIT_VAR: - case Token::INIT_CONST: - case Token::ASSIGN: - return value()->IsPrimitive(); - - default: - // {|=, ^=, &=, <<=, >>=, >>>=, +=, -=, *=, /=, %=} - // Arithmetic operations are always primitive. They express Numbers - // with the exception of +, which expresses a Number or a String. - return true; - } -} - - -// Throw does not express a value, so it's trivially always primitive. -bool Throw::IsPrimitive() { return true; } - - -// Unary operations always express primitive values. delete and ! express -// Booleans, void Undefined, typeof String, +, -, and ~ Numbers. -bool UnaryOperation::IsPrimitive() { return true; } - - -// Count operations (pre- and post-fix increment and decrement) always -// express primitive values (Numbers). See ECMA-262-3, 11.3.1, 11.3.2, -// 11.4.4, ane 11.4.5. -bool CountOperation::IsPrimitive() { return true; } - - -// Binary operations depend on the operator. -bool BinaryOperation::IsPrimitive() { - switch (op()) { - case Token::COMMA: - // Value is the value of the right subexpression. - return right()->IsPrimitive(); - - case Token::OR: - case Token::AND: - // Value is the value one of the subexpressions. - return left()->IsPrimitive() && right()->IsPrimitive(); - - default: - // {|, ^, &, <<, >>, >>>, +, -, *, /, %} - // Arithmetic operations are always primitive. They express Numbers - // with the exception of +, which expresses a Number or a String. - return true; - } -} - - -// Compare operations always express Boolean values. -bool CompareOperation::IsPrimitive() { return true; } - - -// Overridden IsCritical member functions. IsCritical is true for AST nodes -// whose evaluation is absolutely required (they are never dead) because -// they are externally visible. - -// References to global variables or lookup slots are critical because they -// may have getters. All others, including parameters rewritten to explicit -// property references, are not critical. -bool VariableProxy::IsCritical() { - Variable* var = AsVariable(); - return var != NULL && - (var->slot() == NULL || var->slot()->type() == Slot::LOOKUP); -} - - -// Literals are never critical. -bool Literal::IsCritical() { return false; } - - -// Property assignments and throwing of reference errors are always -// critical. Assignments to escaping variables are also critical. In -// addition the operation of compound assignments is critical if either of -// its operands is non-primitive (the arithmetic operations all use one of -// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands). -// In this case, we mark the entire AST node as critical because there is -// no binary operation node to mark. -bool Assignment::IsCritical() { - Variable* var = AssignedVariable(); - return var == NULL || - !var->IsStackAllocated() || - (is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive())); -} - - -// Property references are always critical, because they may have getters. -bool Property::IsCritical() { return true; } - - -// Calls are always critical. -bool Call::IsCritical() { return true; } - - -// +,- use ToNumber on the value of their operand. -bool UnaryOperation::IsCritical() { - ASSERT(op() == Token::ADD || op() == Token::SUB); - return !expression()->IsPrimitive(); -} - - -// Count operations targeting properties and reference errors are always -// critical. Count operations on escaping variables are critical. Count -// operations targeting non-primitives are also critical because they use -// ToNumber. -bool CountOperation::IsCritical() { - Variable* var = AssignedVariable(); - return var == NULL || - !var->IsStackAllocated() || - !expression()->IsPrimitive(); -} - - -// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or -// ToUint32 on each of their operands. -bool BinaryOperation::IsCritical() { - ASSERT(op() != Token::COMMA); - ASSERT(op() != Token::OR); - ASSERT(op() != Token::AND); - return !left()->IsPrimitive() || !right()->IsPrimitive(); -} - - -// <, >, <=, and >= all use ToPrimitive on both their operands. -bool CompareOperation::IsCritical() { - ASSERT(op() != Token::EQ); - ASSERT(op() != Token::NE); - ASSERT(op() != Token::EQ_STRICT); - ASSERT(op() != Token::NE_STRICT); - ASSERT(op() != Token::INSTANCEOF); - ASSERT(op() != Token::IN); - return !left()->IsPrimitive() || !right()->IsPrimitive(); -} - - -// Implementation of a copy visitor. The visitor create a deep copy -// of ast nodes. Nodes that do not require a deep copy are copied -// with the default copy constructor. - -AstNode::AstNode(AstNode* other) : num_(kNoNumber) { - // AST node number should be unique. Assert that we only copy AstNodes - // before node numbers are assigned. - ASSERT(other->num_ == kNoNumber); -} - - -Statement::Statement(Statement* other) - : AstNode(other), statement_pos_(other->statement_pos_) {} - - -Expression::Expression(Expression* other) - : AstNode(other), - bitfields_(other->bitfields_), - type_(other->type_) {} - - -BreakableStatement::BreakableStatement(BreakableStatement* other) - : Statement(other), labels_(other->labels_), type_(other->type_) {} - - -Block::Block(Block* other, ZoneList* statements) - : BreakableStatement(other), - statements_(statements->length()), - is_initializer_block_(other->is_initializer_block_) { - statements_.AddAll(*statements); -} - WhileStatement::WhileStatement(ZoneStringList* labels) : IterationStatement(labels), @@ -795,358 +634,8 @@ WhileStatement::WhileStatement(ZoneStringList* labels) } -ExpressionStatement::ExpressionStatement(ExpressionStatement* other, - Expression* expression) - : Statement(other), expression_(expression) {} - - -IfStatement::IfStatement(IfStatement* other, - Expression* condition, - Statement* then_statement, - Statement* else_statement) - : Statement(other), - condition_(condition), - then_statement_(then_statement), - else_statement_(else_statement) {} - - -EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {} - - -IterationStatement::IterationStatement(IterationStatement* other, - Statement* body) - : BreakableStatement(other), body_(body) {} - - CaseClause::CaseClause(Expression* label, ZoneList* statements) : label_(label), statements_(statements) { } - -ForStatement::ForStatement(ForStatement* other, - Statement* init, - Expression* cond, - Statement* next, - Statement* body) - : IterationStatement(other, body), - init_(init), - cond_(cond), - next_(next), - may_have_function_literal_(other->may_have_function_literal_), - loop_variable_(other->loop_variable_), - peel_this_loop_(other->peel_this_loop_) {} - - -Assignment::Assignment(Assignment* other, - Expression* target, - Expression* value) - : Expression(other), - op_(other->op_), - target_(target), - value_(value), - pos_(other->pos_), - block_start_(other->block_start_), - block_end_(other->block_end_) {} - - -Property::Property(Property* other, Expression* obj, Expression* key) - : Expression(other), - obj_(obj), - key_(key), - pos_(other->pos_), - type_(other->type_) {} - - -Call::Call(Call* other, - Expression* expression, - ZoneList* arguments) - : Expression(other), - expression_(expression), - arguments_(arguments), - pos_(other->pos_) {} - - -UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression) - : Expression(other), op_(other->op_), expression_(expression) {} - - -BinaryOperation::BinaryOperation(Expression* other, - Token::Value op, - Expression* left, - Expression* right) - : Expression(other), op_(op), left_(left), right_(right) {} - - -CountOperation::CountOperation(CountOperation* other, Expression* expression) - : Expression(other), - is_prefix_(other->is_prefix_), - op_(other->op_), - expression_(expression) {} - - -CompareOperation::CompareOperation(CompareOperation* other, - Expression* left, - Expression* right) - : Expression(other), - op_(other->op_), - left_(left), - right_(right) {} - - -Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) { - expr_ = NULL; - if (expr != NULL) Visit(expr); - return expr_; -} - - -Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) { - stmt_ = NULL; - if (stmt != NULL) Visit(stmt); - return stmt_; -} - - -ZoneList* CopyAstVisitor::DeepCopyExprList( - ZoneList* expressions) { - ZoneList* copy = - new ZoneList(expressions->length()); - for (int i = 0; i < expressions->length(); i++) { - copy->Add(DeepCopyExpr(expressions->at(i))); - } - return copy; -} - - -ZoneList* CopyAstVisitor::DeepCopyStmtList( - ZoneList* statements) { - ZoneList* copy = new ZoneList(statements->length()); - for (int i = 0; i < statements->length(); i++) { - copy->Add(DeepCopyStmt(statements->at(i))); - } - return copy; -} - - -void CopyAstVisitor::VisitBlock(Block* stmt) { - stmt_ = new Block(stmt, - DeepCopyStmtList(stmt->statements())); -} - - -void CopyAstVisitor::VisitExpressionStatement( - ExpressionStatement* stmt) { - stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression())); -} - - -void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) { - stmt_ = new EmptyStatement(stmt); -} - - -void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) { - stmt_ = new IfStatement(stmt, - DeepCopyExpr(stmt->condition()), - DeepCopyStmt(stmt->then_statement()), - DeepCopyStmt(stmt->else_statement())); -} - - -void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitWithEnterStatement( - WithEnterStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitForStatement(ForStatement* stmt) { - stmt_ = new ForStatement(stmt, - DeepCopyStmt(stmt->init()), - DeepCopyExpr(stmt->cond()), - DeepCopyStmt(stmt->next()), - DeepCopyStmt(stmt->body())); -} - - -void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitTryFinallyStatement( - TryFinallyStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitDebuggerStatement( - DebuggerStatement* stmt) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitConditional(Conditional* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) { - expr_ = new VariableProxy(*expr); -} - - -void CopyAstVisitor::VisitLiteral(Literal* expr) { - expr_ = new Literal(*expr); -} - - -void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitAssignment(Assignment* expr) { - expr_ = new Assignment(expr, - DeepCopyExpr(expr->target()), - DeepCopyExpr(expr->value())); -} - - -void CopyAstVisitor::VisitThrow(Throw* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitProperty(Property* expr) { - expr_ = new Property(expr, - DeepCopyExpr(expr->obj()), - DeepCopyExpr(expr->key())); -} - - -void CopyAstVisitor::VisitCall(Call* expr) { - expr_ = new Call(expr, - DeepCopyExpr(expr->expression()), - DeepCopyExprList(expr->arguments())); -} - - -void CopyAstVisitor::VisitCallNew(CallNew* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) { - expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression())); -} - - -void CopyAstVisitor::VisitCountOperation(CountOperation* expr) { - expr_ = new CountOperation(expr, - DeepCopyExpr(expr->expression())); -} - - -void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) { - expr_ = new BinaryOperation(expr, - expr->op(), - DeepCopyExpr(expr->left()), - DeepCopyExpr(expr->right())); -} - - -void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) { - expr_ = new CompareOperation(expr, - DeepCopyExpr(expr->left()), - DeepCopyExpr(expr->right())); -} - - -void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) { - SetStackOverflow(); -} - - -void CopyAstVisitor::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - } } // namespace v8::internal diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b9a7a3dd7a..9fcf25672f 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -89,9 +89,11 @@ namespace internal { V(CallNew) \ V(CallRuntime) \ V(UnaryOperation) \ + V(IncrementOperation) \ V(CountOperation) \ V(BinaryOperation) \ V(CompareOperation) \ + V(CompareToNull) \ V(ThisFunction) #define AST_NODE_LIST(V) \ @@ -118,12 +120,6 @@ typedef ZoneList > ZoneObjectList; class AstNode: public ZoneObject { public: - static const int kNoNumber = -1; - - AstNode() : num_(kNoNumber) {} - - explicit AstNode(AstNode* other); - virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; @@ -151,20 +147,6 @@ class AstNode: public ZoneObject { virtual ObjectLiteral* AsObjectLiteral() { return NULL; } virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual CompareOperation* AsCompareOperation() { return NULL; } - - // True if the AST node is critical (its execution is needed or externally - // visible in some way). - virtual bool IsCritical() { - UNREACHABLE(); - return true; - } - - int num() { return num_; } - void set_num(int n) { num_ = n; } - - private: - // Support for ast node numbering. - int num_; }; @@ -172,8 +154,6 @@ class Statement: public AstNode { public: Statement() : statement_pos_(RelocInfo::kNoPosition) {} - explicit Statement(Statement* other); - virtual Statement* AsStatement() { return this; } virtual ReturnStatement* AsReturnStatement() { return NULL; } @@ -201,48 +181,33 @@ class Expression: public AstNode { // Evaluated for its value (and side effects). kValue, // Evaluated for control flow (and side effects). - kTest, - // Evaluated for control flow and side effects. Value is also - // needed if true. - kValueTest, - // Evaluated for control flow and side effects. Value is also - // needed if false. - kTestValue + kTest }; Expression() : bitfields_(0) {} - explicit Expression(Expression* other); - virtual Expression* AsExpression() { return this; } + virtual bool IsTrivial() { return false; } virtual bool IsValidLeftHandSide() { return false; } - virtual Variable* AssignedVariable() { return NULL; } - // Symbols that cannot be parsed as array indices are considered property // names. We do not treat symbols that can be array indexes as property // names because [] for string objects is handled only by keyed ICs. virtual bool IsPropertyName() { return false; } - // True if the expression does not have (evaluated) subexpressions. - // Function literals are leaves because their subexpressions are not - // evaluated. - virtual bool IsLeaf() { return false; } - - // True if the expression has no side effects and is safe to - // evaluate out of order. - virtual bool IsTrivial() { return false; } - - // True if the expression always has one of the non-Object JS types - // (Undefined, Null, Boolean, String, or Number). - virtual bool IsPrimitive() = 0; - // Mark the expression as being compiled as an expression // statement. This is used to transform postfix increments to // (faster) prefix increments. virtual void MarkAsStatement() { /* do nothing */ } + // True iff the result can be safely overwritten (to avoid allocation). + // False for operations that can return one of their operands. + virtual bool ResultOverwriteAllowed() { return false; } + + // True iff the expression is a literal represented as a smi. + virtual bool IsSmiLiteral() { return false; } + // Static type information for this expression. StaticType* type() { return &type_; } @@ -259,7 +224,8 @@ class Expression: public AstNode { // top operation is a bit operation with a mask, or a shift. bool GuaranteedSmiResult(); - // AST analysis results + // AST analysis results. + void CopyAnalysisResultsFrom(Expression* other); // True if the expression rooted at this node can be compiled by the // side-effect free compiler. @@ -320,11 +286,6 @@ class ValidLeftHandSideSentinel: public Expression { virtual void Accept(AstVisitor* v) { UNREACHABLE(); } static ValidLeftHandSideSentinel* instance() { return &instance_; } - virtual bool IsPrimitive() { - UNREACHABLE(); - return false; - } - private: static ValidLeftHandSideSentinel instance_; }; @@ -353,8 +314,6 @@ class BreakableStatement: public Statement { protected: inline BreakableStatement(ZoneStringList* labels, Type type); - explicit BreakableStatement(BreakableStatement* other); - private: ZoneStringList* labels_; Type type_; @@ -366,10 +325,6 @@ class Block: public BreakableStatement { public: inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block); - // Construct a clone initialized from the original block and - // a deep copy of all statements of the original block. - Block(Block* other, ZoneList* statements); - virtual void Accept(AstVisitor* v); virtual Block* AsBlock() { return this; } @@ -433,10 +388,6 @@ class IterationStatement: public BreakableStatement { protected: explicit inline IterationStatement(ZoneStringList* labels); - // Construct a clone initialized from original and - // a deep copy of the original body. - IterationStatement(IterationStatement* other, Statement* body); - void Initialize(Statement* body) { body_ = body; } @@ -486,13 +437,14 @@ class WhileStatement: public IterationStatement { bool may_have_function_literal() const { return may_have_function_literal_; } + void set_may_have_function_literal(bool value) { + may_have_function_literal_ = value; + } private: Expression* cond_; // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; - - friend class AstOptimizer; }; @@ -500,14 +452,6 @@ class ForStatement: public IterationStatement { public: explicit inline ForStatement(ZoneStringList* labels); - // Construct a for-statement initialized from another for-statement - // and deep copies of all parts of the original statement. - ForStatement(ForStatement* other, - Statement* init, - Expression* cond, - Statement* next, - Statement* body); - virtual ForStatement* AsForStatement() { return this; } void Initialize(Statement* init, @@ -528,17 +472,18 @@ class ForStatement: public IterationStatement { void set_cond(Expression* expr) { cond_ = expr; } Statement* next() const { return next_; } void set_next(Statement* stmt) { next_ = stmt; } + bool may_have_function_literal() const { return may_have_function_literal_; } + void set_may_have_function_literal(bool value) { + may_have_function_literal_ = value; + } bool is_fast_smi_loop() { return loop_variable_ != NULL; } Variable* loop_variable() { return loop_variable_; } void set_loop_variable(Variable* var) { loop_variable_ = var; } - bool peel_this_loop() { return peel_this_loop_; } - void set_peel_this_loop(bool b) { peel_this_loop_ = b; } - private: Statement* init_; Expression* cond_; @@ -546,9 +491,6 @@ class ForStatement: public IterationStatement { // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; Variable* loop_variable_; - bool peel_this_loop_; - - friend class AstOptimizer; }; @@ -578,10 +520,6 @@ class ExpressionStatement: public Statement { explicit ExpressionStatement(Expression* expression) : expression_(expression) { } - // Construct an expression statement initialized from another - // expression statement and a deep copy of the original expression. - ExpressionStatement(ExpressionStatement* other, Expression* expression); - virtual void Accept(AstVisitor* v); // Type testing & conversion. @@ -721,13 +659,6 @@ class IfStatement: public Statement { then_statement_(then_statement), else_statement_(else_statement) { } - // Construct an if-statement initialized from another if-statement - // and deep copies of all parts of the original. - IfStatement(IfStatement* other, - Expression* condition, - Statement* then_statement, - Statement* else_statement); - virtual void Accept(AstVisitor* v); bool HasThenStatement() const { return !then_statement()->IsEmpty(); } @@ -834,8 +765,6 @@ class EmptyStatement: public Statement { public: EmptyStatement() {} - explicit EmptyStatement(EmptyStatement* other); - virtual void Accept(AstVisitor* v); // Type testing & conversion. @@ -848,6 +777,8 @@ class Literal: public Expression { explicit Literal(Handle handle) : handle_(handle) { } virtual void Accept(AstVisitor* v); + virtual bool IsTrivial() { return true; } + virtual bool IsSmiLiteral() { return handle_->IsSmi(); } // Type testing & conversion. virtual Literal* AsLiteral() { return this; } @@ -865,11 +796,6 @@ class Literal: public Expression { return false; } - virtual bool IsLeaf() { return true; } - virtual bool IsTrivial() { return true; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - // Identity testers. bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); } @@ -916,7 +842,6 @@ class ObjectLiteral: public MaterializedLiteral { // to the code generator. class Property: public ZoneObject { public: - enum Kind { CONSTANT, // Property with constant value (compile time). COMPUTED, // Property with computed value (execution time). @@ -954,10 +879,6 @@ class ObjectLiteral: public MaterializedLiteral { virtual ObjectLiteral* AsObjectLiteral() { return this; } virtual void Accept(AstVisitor* v); - virtual bool IsLeaf() { return properties()->is_empty(); } - - virtual bool IsPrimitive(); - Handle constant_properties() const { return constant_properties_; } @@ -984,10 +905,6 @@ class RegExpLiteral: public MaterializedLiteral { virtual void Accept(AstVisitor* v); - virtual bool IsLeaf() { return true; } - - virtual bool IsPrimitive(); - Handle pattern() const { return pattern_; } Handle flags() const { return flags_; } @@ -1012,10 +929,6 @@ class ArrayLiteral: public MaterializedLiteral { virtual void Accept(AstVisitor* v); virtual ArrayLiteral* AsArrayLiteral() { return this; } - virtual bool IsLeaf() { return values()->is_empty(); } - - virtual bool IsPrimitive(); - Handle constant_elements() const { return constant_elements_; } ZoneList* values() const { return values_; } @@ -1036,8 +949,6 @@ class CatchExtensionObject: public Expression { virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - Literal* key() const { return key_; } VariableProxy* value() const { return value_; } @@ -1055,7 +966,10 @@ class VariableProxy: public Expression { virtual Property* AsProperty() { return var_ == NULL ? NULL : var_->AsProperty(); } - virtual VariableProxy* AsVariableProxy() { return this; } + + virtual VariableProxy* AsVariableProxy() { + return this; + } Variable* AsVariable() { return this == NULL || var_ == NULL ? NULL : var_->AsVariable(); @@ -1065,20 +979,12 @@ class VariableProxy: public Expression { return var_ == NULL ? true : var_->IsValidLeftHandSide(); } - virtual bool IsLeaf() { - ASSERT(var_ != NULL); // Variable must be resolved. - return var()->is_global() || var()->rewrite()->IsLeaf(); + virtual bool IsTrivial() { + // Reading from a mutable variable is a side effect, but the + // variable for 'this' is immutable. + return is_this_ || is_trivial_; } - // Reading from a mutable variable is a side effect, but 'this' is - // immutable. - virtual bool IsTrivial() { return is_trivial_; } - - virtual bool IsPrimitive(); - virtual bool IsCritical(); - - void SetIsPrimitive(bool value) { is_primitive_ = value; } - bool IsVariable(Handle n) { return !is_this() && name().is_identical_to(n); } @@ -1092,11 +998,8 @@ class VariableProxy: public Expression { Variable* var() const { return var_; } bool is_this() const { return is_this_; } bool inside_with() const { return inside_with_; } - bool is_trivial() { return is_trivial_; } - void set_is_trivial(bool b) { is_trivial_ = b; } - BitVector* reaching_definitions() { return reaching_definitions_; } - void set_reaching_definitions(BitVector* rd) { reaching_definitions_ = rd; } + void MarkAsTrivial() { is_trivial_ = true; } // Bind this proxy to the variable var. void BindTo(Variable* var); @@ -1107,8 +1010,6 @@ class VariableProxy: public Expression { bool is_this_; bool inside_with_; bool is_trivial_; - BitVector* reaching_definitions_; - bool is_primitive_; VariableProxy(Handle name, bool is_this, bool inside_with); explicit VariableProxy(bool is_this); @@ -1125,11 +1026,6 @@ class VariableProxySentinel: public VariableProxy { return &identifier_proxy_; } - virtual bool IsPrimitive() { - UNREACHABLE(); - return false; - } - private: explicit VariableProxySentinel(bool is_this) : VariableProxy(is_this) { } static VariableProxySentinel this_proxy_; @@ -1171,13 +1067,6 @@ class Slot: public Expression { // Type testing & conversion virtual Slot* AsSlot() { return this; } - virtual bool IsLeaf() { return true; } - - virtual bool IsPrimitive() { - UNREACHABLE(); - return false; - } - bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; } // Accessors @@ -1203,8 +1092,6 @@ class Property: public Expression { Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) : obj_(obj), key_(key), pos_(pos), type_(type) { } - Property(Property* other, Expression* obj, Expression* key); - virtual void Accept(AstVisitor* v); // Type testing & conversion @@ -1212,9 +1099,6 @@ class Property: public Expression { virtual bool IsValidLeftHandSide() { return true; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - Expression* obj() const { return obj_; } Expression* key() const { return key_; } int position() const { return pos_; } @@ -1240,16 +1124,11 @@ class Call: public Expression { Call(Expression* expression, ZoneList* arguments, int pos) : expression_(expression), arguments_(arguments), pos_(pos) { } - Call(Call* other, Expression* expression, ZoneList* arguments); - virtual void Accept(AstVisitor* v); // Type testing and conversion. virtual Call* AsCall() { return this; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - Expression* expression() const { return expression_; } ZoneList* arguments() const { return arguments_; } int position() { return pos_; } @@ -1272,8 +1151,6 @@ class CallNew: public Expression { virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - Expression* expression() const { return expression_; } ZoneList* arguments() const { return arguments_; } int position() { return pos_; } @@ -1298,8 +1175,6 @@ class CallRuntime: public Expression { virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - Handle name() const { return name_; } Runtime::Function* function() const { return function_; } ZoneList* arguments() const { return arguments_; } @@ -1319,16 +1194,12 @@ class UnaryOperation: public Expression { ASSERT(Token::IsUnaryOp(op)); } - UnaryOperation(UnaryOperation* other, Expression* expression); - virtual void Accept(AstVisitor* v); + virtual bool ResultOverwriteAllowed(); // Type testing & conversion virtual UnaryOperation* AsUnaryOperation() { return this; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - Token::Value op() const { return op_; } Expression* expression() const { return expression_; } @@ -1340,120 +1211,102 @@ class UnaryOperation: public Expression { class BinaryOperation: public Expression { public: - BinaryOperation(Token::Value op, Expression* left, Expression* right) - : op_(op), left_(left), right_(right) { + BinaryOperation(Token::Value op, + Expression* left, + Expression* right, + int pos) + : op_(op), left_(left), right_(right), pos_(pos) { ASSERT(Token::IsBinaryOp(op)); } - // Construct a binary operation with a given operator and left and right - // subexpressions. The rest of the expression state is copied from - // another expression. - BinaryOperation(Expression* other, - Token::Value op, - Expression* left, - Expression* right); + // Create the binary operation corresponding to a compound assignment. + explicit BinaryOperation(Assignment* assignment); virtual void Accept(AstVisitor* v); + virtual bool ResultOverwriteAllowed(); // Type testing & conversion virtual BinaryOperation* AsBinaryOperation() { return this; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - - // True iff the result can be safely overwritten (to avoid allocation). - // False for operations that can return one of their operands. - bool ResultOverwriteAllowed() { - switch (op_) { - case Token::COMMA: - case Token::OR: - case Token::AND: - return false; - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SHL: - case Token::SAR: - case Token::SHR: - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - return true; - default: - UNREACHABLE(); - } - return false; - } - Token::Value op() const { return op_; } Expression* left() const { return left_; } Expression* right() const { return right_; } + int position() const { return pos_; } private: Token::Value op_; Expression* left_; Expression* right_; + int pos_; }; -class CountOperation: public Expression { +class IncrementOperation: public Expression { public: - CountOperation(bool is_prefix, Token::Value op, Expression* expression) - : is_prefix_(is_prefix), op_(op), expression_(expression) { + IncrementOperation(Token::Value op, Expression* expr) + : op_(op), expression_(expr) { ASSERT(Token::IsCountOp(op)); } - CountOperation(CountOperation* other, Expression* expression); + Token::Value op() const { return op_; } + bool is_increment() { return op_ == Token::INC; } + Expression* expression() const { return expression_; } virtual void Accept(AstVisitor* v); - virtual CountOperation* AsCountOperation() { return this; } + private: + Token::Value op_; + Expression* expression_; + int pos_; +}; - virtual Variable* AssignedVariable() { - return expression()->AsVariableProxy()->AsVariable(); - } - virtual bool IsPrimitive(); - virtual bool IsCritical(); +class CountOperation: public Expression { + public: + CountOperation(bool is_prefix, IncrementOperation* increment, int pos) + : is_prefix_(is_prefix), increment_(increment), pos_(pos) { } + + virtual void Accept(AstVisitor* v); + + virtual CountOperation* AsCountOperation() { return this; } bool is_prefix() const { return is_prefix_; } bool is_postfix() const { return !is_prefix_; } - Token::Value op() const { return op_; } + + Token::Value op() const { return increment_->op(); } Token::Value binary_op() { - return op_ == Token::INC ? Token::ADD : Token::SUB; + return (op() == Token::INC) ? Token::ADD : Token::SUB; } - Expression* expression() const { return expression_; } + + Expression* expression() const { return increment_->expression(); } + IncrementOperation* increment() const { return increment_; } + int position() const { return pos_; } virtual void MarkAsStatement() { is_prefix_ = true; } private: bool is_prefix_; - Token::Value op_; - Expression* expression_; + IncrementOperation* increment_; + int pos_; }; class CompareOperation: public Expression { public: - CompareOperation(Token::Value op, Expression* left, Expression* right) - : op_(op), left_(left), right_(right) { + CompareOperation(Token::Value op, + Expression* left, + Expression* right, + int pos) + : op_(op), left_(left), right_(right), pos_(pos) { ASSERT(Token::IsCompareOp(op)); } - CompareOperation(CompareOperation* other, - Expression* left, - Expression* right); - virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - virtual bool IsCritical(); - Token::Value op() const { return op_; } Expression* left() const { return left_; } Expression* right() const { return right_; } + int position() const { return pos_; } // Type testing & conversion virtual CompareOperation* AsCompareOperation() { return this; } @@ -1462,6 +1315,24 @@ class CompareOperation: public Expression { Token::Value op_; Expression* left_; Expression* right_; + int pos_; +}; + + +class CompareToNull: public Expression { + public: + CompareToNull(bool is_strict, Expression* expression) + : is_strict_(is_strict), expression_(expression) { } + + virtual void Accept(AstVisitor* v); + + bool is_strict() const { return is_strict_; } + Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; } + Expression* expression() const { return expression_; } + + private: + bool is_strict_; + Expression* expression_; }; @@ -1480,8 +1351,6 @@ class Conditional: public Expression { virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - Expression* condition() const { return condition_; } Expression* then_expression() const { return then_expression_; } Expression* else_expression() const { return else_expression_; } @@ -1506,20 +1375,11 @@ class Assignment: public Expression { ASSERT(Token::IsAssignmentOp(op)); } - Assignment(Assignment* other, Expression* target, Expression* value); - virtual void Accept(AstVisitor* v); virtual Assignment* AsAssignment() { return this; } - virtual bool IsPrimitive(); - virtual bool IsCritical(); - Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } - virtual Variable* AssignedVariable() { - return target()->AsVariableProxy()->AsVariable(); - } - Token::Value binary_op() const; Token::Value op() const { return op_; } @@ -1555,8 +1415,6 @@ class Throw: public Expression { virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - Expression* exception() const { return exception_; } int position() const { return pos_; } @@ -1578,7 +1436,8 @@ class FunctionLiteral: public Expression { int num_parameters, int start_position, int end_position, - bool is_expression) + bool is_expression, + bool contains_loops) : name_(name), scope_(scope), body_(body), @@ -1591,6 +1450,7 @@ class FunctionLiteral: public Expression { start_position_(start_position), end_position_(end_position), is_expression_(is_expression), + contains_loops_(contains_loops), function_token_position_(RelocInfo::kNoPosition), inferred_name_(Heap::empty_string()), try_full_codegen_(false) { @@ -1604,10 +1464,6 @@ class FunctionLiteral: public Expression { // Type testing & conversion virtual FunctionLiteral* AsFunctionLiteral() { return this; } - virtual bool IsLeaf() { return true; } - - virtual bool IsPrimitive(); - Handle name() const { return name_; } Scope* scope() const { return scope_; } ZoneList* body() const { return body_; } @@ -1616,6 +1472,7 @@ class FunctionLiteral: public Expression { int start_position() const { return start_position_; } int end_position() const { return end_position_; } bool is_expression() const { return is_expression_; } + bool contains_loops() const { return contains_loops_; } int materialized_literal_count() { return materialized_literal_count_; } int expected_property_count() { return expected_property_count_; } @@ -1656,6 +1513,7 @@ class FunctionLiteral: public Expression { int start_position_; int end_position_; bool is_expression_; + bool contains_loops_; int function_token_position_; Handle inferred_name_; bool try_full_codegen_; @@ -1675,12 +1533,8 @@ class SharedFunctionInfoLiteral: public Expression { return shared_function_info_; } - virtual bool IsLeaf() { return true; } - virtual void Accept(AstVisitor* v); - virtual bool IsPrimitive(); - private: Handle shared_function_info_; }; @@ -1689,8 +1543,6 @@ class SharedFunctionInfoLiteral: public Expression { class ThisFunction: public Expression { public: virtual void Accept(AstVisitor* v); - virtual bool IsLeaf() { return true; } - virtual bool IsPrimitive(); }; @@ -1895,7 +1747,7 @@ class RegExpText: public RegExpTree { void AddElement(TextElement elm) { elements_.Add(elm); length_ += elm.length(); - }; + } ZoneList* elements() { return &elements_; } private: ZoneList elements_; @@ -2078,29 +1930,6 @@ class AstVisitor BASE_EMBEDDED { bool stack_overflow_; }; - -class CopyAstVisitor : public AstVisitor { - public: - Expression* DeepCopyExpr(Expression* expr); - - Statement* DeepCopyStmt(Statement* stmt); - - private: - ZoneList* DeepCopyExprList(ZoneList* expressions); - - ZoneList* DeepCopyStmtList(ZoneList* statements); - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - // Holds the result of copying an expression. - Expression* expr_; - // Holds the result of copying a statement. - Statement* stmt_; -}; - } } // namespace v8::internal #endif // V8_AST_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index ce8e98d6a5..a82d1d6966 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -232,6 +232,7 @@ class Genesis BASE_EMBEDDED { bool InstallNatives(); void InstallCustomCallGenerators(); void InstallJSFunctionResultCaches(); + void InitializeNormalizedMapCaches(); // Used both for deserialized and from-scratch contexts to add the extensions // provided. static bool InstallExtensions(Handle global_context, @@ -719,6 +720,8 @@ void Genesis::InitializeGlobal(Handle inner_global, InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize, Top::initial_object_prototype(), Builtins::Illegal, true); + string_fun->shared()->set_construct_stub( + Builtins::builtin(Builtins::StringConstructCode)); global_context()->set_string_function(*string_fun); // Add 'length' property to strings. Handle string_descriptors = @@ -1400,6 +1403,13 @@ void Genesis::InstallJSFunctionResultCaches() { } +void Genesis::InitializeNormalizedMapCaches() { + Handle array( + Factory::NewFixedArray(NormalizedMapCache::kEntries, TENURED)); + global_context()->set_normalized_map_cache(NormalizedMapCache::cast(*array)); +} + + int BootstrapperActive::nesting_ = 0; @@ -1768,6 +1778,7 @@ Genesis::Genesis(Handle global_object, HookUpGlobalProxy(inner_global, global_proxy); InitializeGlobal(inner_global, empty_function); InstallJSFunctionResultCaches(); + InitializeNormalizedMapCaches(); if (!InstallNatives()) return; MakeFunctionInstancePrototypeWritable(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 3a0393efbc..b4f4a0611a 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -243,7 +243,7 @@ BUILTIN(ArrayCodeGeneric) { } -static Object* AllocateJSArray() { +MUST_USE_RESULT static Object* AllocateJSArray() { JSFunction* array_function = Top::context()->global_context()->array_function(); Object* result = Heap::AllocateJSObject(array_function); @@ -252,7 +252,7 @@ static Object* AllocateJSArray() { } -static Object* AllocateEmptyJSArray() { +MUST_USE_RESULT static Object* AllocateEmptyJSArray() { Object* result = AllocateJSArray(); if (result->IsFailure()) return result; JSArray* result_array = JSArray::cast(result); @@ -269,6 +269,7 @@ static void CopyElements(AssertNoAllocation* no_gc, int src_index, int len) { ASSERT(dst != src); // Use MoveElements instead. + ASSERT(dst->map() != Heap::fixed_cow_array_map()); ASSERT(len > 0); CopyWords(dst->data_start() + dst_index, src->data_start() + src_index, @@ -286,6 +287,7 @@ static void MoveElements(AssertNoAllocation* no_gc, FixedArray* src, int src_index, int len) { + ASSERT(dst->map() != Heap::fixed_cow_array_map()); memmove(dst->data_start() + dst_index, src->data_start() + src_index, len * kPointerSize); @@ -297,17 +299,17 @@ static void MoveElements(AssertNoAllocation* no_gc, static void FillWithHoles(FixedArray* dst, int from, int to) { + ASSERT(dst->map() != Heap::fixed_cow_array_map()); MemsetPointer(dst->data_start() + from, Heap::the_hole_value(), to - from); } static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { - // For now this trick is only applied to fixed arrays in new space. + ASSERT(elms->map() != Heap::fixed_cow_array_map()); + // For now this trick is only applied to fixed arrays in new and paged space. // In large object space the object's start must coincide with chunk // and thus the trick is just not applicable. - // In old space we do not use this trick to avoid dealing with - // region dirty marks. - ASSERT(Heap::new_space()->Contains(elms)); + ASSERT(!Heap::lo_space()->Contains(elms)); STATIC_ASSERT(FixedArray::kMapOffset == 0); STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); @@ -317,6 +319,17 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { const int len = elms->length(); + if (to_trim > FixedArray::kHeaderSize / kPointerSize && + !Heap::new_space()->Contains(elms)) { + // If we are doing a big trim in old space then we zap the space that was + // formerly part of the array so that the GC (aided by the card-based + // remembered set) won't find pointers to new-space there. + Object** zap = reinterpret_cast(elms->address()); + zap++; // Header of filler must be at least one word so skip that. + for (int i = 1; i < to_trim; i++) { + *zap++ = Smi::FromInt(0); + } + } // Technically in new space this write might be omitted (except for // debug mode which iterates through the heap), but to play safer // we still do it. @@ -325,9 +338,8 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) { former_start[to_trim] = Heap::fixed_array_map(); former_start[to_trim + 1] = Smi::FromInt(len - to_trim); - ASSERT_EQ(elms->address() + to_trim * kPointerSize, - (elms + to_trim * kPointerSize)->address()); - return elms + to_trim * kPointerSize; + return FixedArray::cast(HeapObject::FromAddress( + elms->address() + to_trim * kPointerSize)); } @@ -348,33 +360,24 @@ static bool ArrayPrototypeHasNoElements(Context* global_context, } -static bool IsJSArrayWithFastElements(Object* receiver, - FixedArray** elements) { - if (!receiver->IsJSArray()) { - return false; - } - +static inline Object* EnsureJSArrayWithWritableFastElements(Object* receiver) { + if (!receiver->IsJSArray()) return NULL; JSArray* array = JSArray::cast(receiver); - HeapObject* elms = HeapObject::cast(array->elements()); - if (elms->map() != Heap::fixed_array_map()) { - return false; + if (elms->map() == Heap::fixed_array_map()) return elms; + if (elms->map() == Heap::fixed_cow_array_map()) { + return array->EnsureWritableFastElements(); } - - *elements = FixedArray::cast(elms); - return true; + return NULL; } -static bool IsFastElementMovingAllowed(Object* receiver, - FixedArray** elements) { - if (!IsJSArrayWithFastElements(receiver, elements)) return false; - +static inline bool IsJSArrayFastElementMovingAllowed(JSArray* receiver) { Context* global_context = Top::context()->global_context(); JSObject* array_proto = JSObject::cast(global_context->array_function()->prototype()); - if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false; - return ArrayPrototypeHasNoElements(global_context, array_proto); + return receiver->GetPrototype() == array_proto && + ArrayPrototypeHasNoElements(global_context, array_proto); } @@ -405,10 +408,10 @@ static Object* CallJsBuiltin(const char* name, BUILTIN(ArrayPush) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms)) { - return CallJsBuiltin("ArrayPush", args); - } + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj == NULL) return CallJsBuiltin("ArrayPush", args); + if (elms_obj->IsFailure()) return elms_obj; + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); int len = Smi::cast(array->length())->value(); @@ -454,10 +457,10 @@ BUILTIN(ArrayPush) { BUILTIN(ArrayPop) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsJSArrayWithFastElements(receiver, &elms)) { - return CallJsBuiltin("ArrayPop", args); - } + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj == NULL) return CallJsBuiltin("ArrayPop", args); + if (elms_obj->IsFailure()) return elms_obj; + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); int len = Smi::cast(array->length())->value(); @@ -483,10 +486,13 @@ BUILTIN(ArrayPop) { BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsFastElementMovingAllowed(receiver, &elms)) { + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj->IsFailure()) return elms_obj; + if (elms_obj == NULL || + !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) { return CallJsBuiltin("ArrayShift", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); ASSERT(array->HasFastElements()); @@ -499,8 +505,8 @@ BUILTIN(ArrayShift) { first = Heap::undefined_value(); } - if (Heap::new_space()->Contains(elms)) { - // As elms still in the same space they used to be (new space), + if (!Heap::lo_space()->Contains(elms)) { + // As elms still in the same space they used to be, // there is no need to update region dirty mark. array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER); } else { @@ -519,10 +525,13 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsFastElementMovingAllowed(receiver, &elms)) { + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj->IsFailure()) return elms_obj; + if (elms_obj == NULL || + !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) { return CallJsBuiltin("ArrayUnshift", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); ASSERT(array->HasFastElements()); @@ -568,10 +577,13 @@ BUILTIN(ArrayUnshift) { BUILTIN(ArraySlice) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsFastElementMovingAllowed(receiver, &elms)) { + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj->IsFailure()) return elms_obj; + if (elms_obj == NULL || + !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) { return CallJsBuiltin("ArraySlice", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); ASSERT(array->HasFastElements()); @@ -637,10 +649,13 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { Object* receiver = *args.receiver(); - FixedArray* elms = NULL; - if (!IsFastElementMovingAllowed(receiver, &elms)) { + Object* elms_obj = EnsureJSArrayWithWritableFastElements(receiver); + if (elms_obj->IsFailure()) return elms_obj; + if (elms_obj == NULL || + !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) { return CallJsBuiltin("ArraySplice", args); } + FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); ASSERT(array->HasFastElements()); @@ -648,13 +663,9 @@ BUILTIN(ArraySplice) { int n_arguments = args.length() - 1; - // SpiderMonkey and JSC return undefined in the case where no - // arguments are given instead of using the implicit undefined - // arguments. This does not follow ECMA-262, but we do the same for - // compatibility. - // TraceMonkey follows ECMA-262 though. + // Return empty array when no arguments are supplied. if (n_arguments == 0) { - return Heap::undefined_value(); + return AllocateEmptyJSArray(); } int relative_start = 0; @@ -717,7 +728,7 @@ BUILTIN(ArraySplice) { if (item_count < actual_delete_count) { // Shrink the array. - const bool trim_array = Heap::new_space()->Contains(elms) && + const bool trim_array = !Heap::lo_space()->Contains(elms) && ((actual_start + item_count) < (len - actual_delete_count - actual_start)); if (trim_array) { diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 375e8f3f89..7e49f3133a 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -117,7 +117,10 @@ enum BuiltinExtraArguments { V(FunctionApply, BUILTIN, UNINITIALIZED) \ \ V(ArrayCode, BUILTIN, UNINITIALIZED) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED) + V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \ + \ + V(StringConstructCode, BUILTIN, UNINITIALIZED) + #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. @@ -258,6 +261,8 @@ class Builtins : public AllStatic { static void Generate_ArrayCode(MacroAssembler* masm); static void Generate_ArrayConstructCode(MacroAssembler* masm); + + static void Generate_StringConstructCode(MacroAssembler* masm); }; } } // namespace v8::internal diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h index fadbc9afbe..0dfc80d0b8 100644 --- a/deps/v8/src/char-predicates-inl.h +++ b/deps/v8/src/char-predicates-inl.h @@ -34,6 +34,14 @@ namespace v8 { namespace internal { +// If c is in 'A'-'Z' or 'a'-'z', return its lower-case. +// Else, return something outside of 'A'-'Z' and 'a'-'z'. +// Note: it ignores LOCALE. +inline int AsciiAlphaToLower(uc32 c) { + return c | 0x20; +} + + inline bool IsCarriageReturn(uc32 c) { return c == 0x000D; } @@ -59,12 +67,12 @@ inline bool IsDecimalDigit(uc32 c) { inline bool IsHexDigit(uc32 c) { // ECMA-262, 3rd, 7.6 (p 15) - return IsDecimalDigit(c) || IsInRange(c | 0x20, 'a', 'f'); + return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f'); } inline bool IsRegExpWord(uc16 c) { - return IsInRange(c | 0x20, 'a', 'z') + return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c) || (c == '_'); } diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc index af650de5e7..928c3f0c05 100644 --- a/deps/v8/src/circular-queue.cc +++ b/deps/v8/src/circular-queue.cc @@ -47,8 +47,9 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes, producer_consumer_distance_(2 * chunk_size_), buffer_(NewArray(buffer_size_ + 1)) { ASSERT(buffer_size_in_chunks > 2); - // Only need to keep the first cell of a chunk clean. - for (int i = 0; i < buffer_size_; i += chunk_size_) { + // Clean up the whole buffer to avoid encountering a random kEnd + // while enqueuing. + for (int i = 0; i < buffer_size_; ++i) { buffer_[i] = kClear; } buffer_[buffer_size_] = kEnd; diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index e5a222fcda..98a5cf67b9 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -29,6 +29,7 @@ #define V8_CODE_STUBS_H_ #include "globals.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -80,6 +81,14 @@ namespace internal { CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) +// Types of uncatchable exceptions. +enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; + +// Mode to overwrite BinaryExpression values. +enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; +enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE }; + + // Stub is base classes of all stubs. class CodeStub BASE_EMBEDDED { public: @@ -101,10 +110,16 @@ class CodeStub BASE_EMBEDDED { static Major MajorKeyFromKey(uint32_t key) { return static_cast(MajorKeyBits::decode(key)); - }; + } static int MinorKeyFromKey(uint32_t key) { return MinorKeyBits::decode(key); - }; + } + + // Gets the major key from a code object that is a code stub or binary op IC. + static Major GetMajorKey(Code* code_stub) { + return static_cast(code_stub->major_key()); + } + static const char* MajorName(Major major_key, bool allow_unknown_keys); virtual ~CodeStub() {} @@ -172,6 +187,609 @@ class CodeStub BASE_EMBEDDED { friend class BreakPointIterator; }; + +// Helper interface to prepare to/restore after making runtime calls. +class RuntimeCallHelper { + public: + virtual ~RuntimeCallHelper() {} + + virtual void BeforeCall(MacroAssembler* masm) const = 0; + + virtual void AfterCall(MacroAssembler* masm) const = 0; + + protected: + RuntimeCallHelper() {} + + private: + DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper); +}; + +} } // namespace v8::internal + +#if V8_TARGET_ARCH_IA32 +#include "ia32/code-stubs-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/code-stubs-x64.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/code-stubs-arm.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/code-stubs-mips.h" +#else +#error Unsupported target architecture. +#endif + +namespace v8 { +namespace internal { + + +// RuntimeCallHelper implementation used in IC stubs: enters/leaves a +// newly created internal frame before/after the runtime call. +class ICRuntimeCallHelper : public RuntimeCallHelper { + public: + ICRuntimeCallHelper() {} + + virtual void BeforeCall(MacroAssembler* masm) const; + + virtual void AfterCall(MacroAssembler* masm) const; +}; + + +// Trivial RuntimeCallHelper implementation. +class NopRuntimeCallHelper : public RuntimeCallHelper { + public: + NopRuntimeCallHelper() {} + + virtual void BeforeCall(MacroAssembler* masm) const {} + + virtual void AfterCall(MacroAssembler* masm) const {} +}; + + +class StackCheckStub : public CodeStub { + public: + StackCheckStub() { } + + void Generate(MacroAssembler* masm); + + private: + + const char* GetName() { return "StackCheckStub"; } + + Major MajorKey() { return StackCheck; } + int MinorKey() { return 0; } +}; + + +class FastNewClosureStub : public CodeStub { + public: + void Generate(MacroAssembler* masm); + + private: + const char* GetName() { return "FastNewClosureStub"; } + Major MajorKey() { return FastNewClosure; } + int MinorKey() { return 0; } +}; + + +class FastNewContextStub : public CodeStub { + public: + static const int kMaximumSlots = 64; + + explicit FastNewContextStub(int slots) : slots_(slots) { + ASSERT(slots_ > 0 && slots <= kMaximumSlots); + } + + void Generate(MacroAssembler* masm); + + private: + int slots_; + + const char* GetName() { return "FastNewContextStub"; } + Major MajorKey() { return FastNewContext; } + int MinorKey() { return slots_; } +}; + + +class FastCloneShallowArrayStub : public CodeStub { + public: + // Maximum length of copied elements array. + static const int kMaximumClonedLength = 8; + + enum Mode { + CLONE_ELEMENTS, + COPY_ON_WRITE_ELEMENTS + }; + + FastCloneShallowArrayStub(Mode mode, int length) + : mode_(mode), + length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { + ASSERT(length_ >= 0); + ASSERT(length_ <= kMaximumClonedLength); + } + + void Generate(MacroAssembler* masm); + + private: + Mode mode_; + int length_; + + const char* GetName() { return "FastCloneShallowArrayStub"; } + Major MajorKey() { return FastCloneShallowArray; } + int MinorKey() { + ASSERT(mode_ == 0 || mode_ == 1); + return (length_ << 1) | mode_; + } +}; + + +class InstanceofStub: public CodeStub { + public: + InstanceofStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return Instanceof; } + int MinorKey() { return 0; } +}; + + +enum NegativeZeroHandling { + kStrictNegativeZero, + kIgnoreNegativeZero +}; + + +class GenericUnaryOpStub : public CodeStub { + public: + GenericUnaryOpStub(Token::Value op, + UnaryOverwriteMode overwrite, + NegativeZeroHandling negative_zero = kStrictNegativeZero) + : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { } + + private: + Token::Value op_; + UnaryOverwriteMode overwrite_; + NegativeZeroHandling negative_zero_; + + class OverwriteField: public BitField {}; + class NegativeZeroField: public BitField {}; + class OpField: public BitField {}; + + Major MajorKey() { return GenericUnaryOp; } + int MinorKey() { + return OpField::encode(op_) | + OverwriteField::encode(overwrite_) | + NegativeZeroField::encode(negative_zero_); + } + + void Generate(MacroAssembler* masm); + + const char* GetName(); +}; + + +enum NaNInformation { + kBothCouldBeNaN, + kCantBothBeNaN +}; + + +class CompareStub: public CodeStub { + public: + CompareStub(Condition cc, + bool strict, + NaNInformation nan_info = kBothCouldBeNaN, + bool include_number_compare = true, + Register lhs = no_reg, + Register rhs = no_reg) : + cc_(cc), + strict_(strict), + never_nan_nan_(nan_info == kCantBothBeNaN), + include_number_compare_(include_number_compare), + lhs_(lhs), + rhs_(rhs), + name_(NULL) { } + + void Generate(MacroAssembler* masm); + + private: + Condition cc_; + bool strict_; + // Only used for 'equal' comparisons. Tells the stub that we already know + // that at least one side of the comparison is not NaN. This allows the + // stub to use object identity in the positive case. We ignore it when + // generating the minor key for other comparisons to avoid creating more + // stubs. + bool never_nan_nan_; + // Do generate the number comparison code in the stub. Stubs without number + // comparison code is used when the number comparison has been inlined, and + // the stub will be called if one of the operands is not a number. + bool include_number_compare_; + // Register holding the left hand side of the comparison if the stub gives + // a choice, no_reg otherwise. + Register lhs_; + // Register holding the right hand side of the comparison if the stub gives + // a choice, no_reg otherwise. + Register rhs_; + + // Encoding of the minor key CCCCCCCCCCCCRCNS. + class StrictField: public BitField {}; + class NeverNanNanField: public BitField {}; + class IncludeNumberCompareField: public BitField {}; + class RegisterField: public BitField {}; + class ConditionField: public BitField {}; + + Major MajorKey() { return Compare; } + + int MinorKey(); + + // Branch to the label if the given object isn't a symbol. + void BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch); + + // Unfortunately you have to run without snapshots to see most of these + // names in the profile since most compare stubs end up in the snapshot. + char* name_; + const char* GetName(); +#ifdef DEBUG + void Print() { + PrintF("CompareStub (cc %d), (strict %s), " + "(never_nan_nan %s), (number_compare %s) ", + static_cast(cc_), + strict_ ? "true" : "false", + never_nan_nan_ ? "true" : "false", + include_number_compare_ ? "included" : "not included"); + + if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) { + PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code()); + } else { + PrintF("\n"); + } + } +#endif +}; + + +class CEntryStub : public CodeStub { + public: + explicit CEntryStub(int result_size) : result_size_(result_size) { } + + void Generate(MacroAssembler* masm); + + private: + void GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate_scope, + int alignment_skew = 0); + void GenerateThrowTOS(MacroAssembler* masm); + void GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type); + + // Number of pointers/values returned. + const int result_size_; + + Major MajorKey() { return CEntry; } + // Minor key must differ if different result_size_ values means different + // code is generated. + int MinorKey(); + + const char* GetName() { return "CEntryStub"; } +}; + + +class ApiGetterEntryStub : public CodeStub { + public: + ApiGetterEntryStub(Handle info, + ApiFunction* fun) + : info_(info), + fun_(fun) { } + void Generate(MacroAssembler* masm); + virtual bool has_custom_cache() { return true; } + virtual bool GetCustomCache(Code** code_out); + virtual void SetCustomCache(Code* value); + + static const int kStackSpace = 5; + static const int kArgc = 4; + private: + Handle info() { return info_; } + ApiFunction* fun() { return fun_; } + Major MajorKey() { return NoCache; } + int MinorKey() { return 0; } + const char* GetName() { return "ApiEntryStub"; } + // The accessor info associated with the function. + Handle info_; + // The function to be called. + ApiFunction* fun_; +}; + + +class JSEntryStub : public CodeStub { + public: + JSEntryStub() { } + + void Generate(MacroAssembler* masm) { GenerateBody(masm, false); } + + protected: + void GenerateBody(MacroAssembler* masm, bool is_construct); + + private: + Major MajorKey() { return JSEntry; } + int MinorKey() { return 0; } + + const char* GetName() { return "JSEntryStub"; } +}; + + +class JSConstructEntryStub : public JSEntryStub { + public: + JSConstructEntryStub() { } + + void Generate(MacroAssembler* masm) { GenerateBody(masm, true); } + + private: + int MinorKey() { return 1; } + + const char* GetName() { return "JSConstructEntryStub"; } +}; + + +class ArgumentsAccessStub: public CodeStub { + public: + enum Type { + READ_ELEMENT, + NEW_OBJECT + }; + + explicit ArgumentsAccessStub(Type type) : type_(type) { } + + private: + Type type_; + + Major MajorKey() { return ArgumentsAccess; } + int MinorKey() { return type_; } + + void Generate(MacroAssembler* masm); + void GenerateReadElement(MacroAssembler* masm); + void GenerateNewObject(MacroAssembler* masm); + + const char* GetName() { return "ArgumentsAccessStub"; } + +#ifdef DEBUG + void Print() { + PrintF("ArgumentsAccessStub (type %d)\n", type_); + } +#endif +}; + + +class RegExpExecStub: public CodeStub { + public: + RegExpExecStub() { } + + private: + Major MajorKey() { return RegExpExec; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "RegExpExecStub"; } + +#ifdef DEBUG + void Print() { + PrintF("RegExpExecStub\n"); + } +#endif +}; + + +class CallFunctionStub: public CodeStub { + public: + CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags) + : argc_(argc), in_loop_(in_loop), flags_(flags) { } + + void Generate(MacroAssembler* masm); + + private: + int argc_; + InLoopFlag in_loop_; + CallFunctionFlags flags_; + +#ifdef DEBUG + void Print() { + PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n", + argc_, + static_cast(in_loop_), + static_cast(flags_)); + } +#endif + + // Minor key encoding in 32 bits with Bitfield . + class InLoopBits: public BitField {}; + class FlagBits: public BitField {}; + class ArgcBits: public BitField {}; + + Major MajorKey() { return CallFunction; } + int MinorKey() { + // Encode the parameters in a unique 32 bit value. + return InLoopBits::encode(in_loop_) + | FlagBits::encode(flags_) + | ArgcBits::encode(argc_); + } + + InLoopFlag InLoop() { return in_loop_; } + bool ReceiverMightBeValue() { + return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0; + } + + public: + static int ExtractArgcFromMinorKey(int minor_key) { + return ArgcBits::decode(minor_key); + } +}; + + +enum StringIndexFlags { + // Accepts smis or heap numbers. + STRING_INDEX_IS_NUMBER, + + // Accepts smis or heap numbers that are valid array indices + // (ECMA-262 15.4). Invalid indices are reported as being out of + // range. + STRING_INDEX_IS_ARRAY_INDEX +}; + + +// Generates code implementing String.prototype.charCodeAt. +// +// Only supports the case when the receiver is a string and the index +// is a number (smi or heap number) that is a valid index into the +// string. Additional index constraints are specified by the +// flags. Otherwise, bails out to the provided labels. +// +// Register usage: |object| may be changed to another string in a way +// that doesn't affect charCodeAt/charAt semantics, |index| is +// preserved, |scratch| and |result| are clobbered. +class StringCharCodeAtGenerator { + public: + StringCharCodeAtGenerator(Register object, + Register index, + Register scratch, + Register result, + Label* receiver_not_string, + Label* index_not_number, + Label* index_out_of_range, + StringIndexFlags index_flags) + : object_(object), + index_(index), + scratch_(scratch), + result_(result), + receiver_not_string_(receiver_not_string), + index_not_number_(index_not_number), + index_out_of_range_(index_out_of_range), + index_flags_(index_flags) { + ASSERT(!scratch_.is(object_)); + ASSERT(!scratch_.is(index_)); + ASSERT(!scratch_.is(result_)); + ASSERT(!result_.is(object_)); + ASSERT(!result_.is(index_)); + } + + // Generates the fast case code. On the fallthrough path |result| + // register contains the result. + void GenerateFast(MacroAssembler* masm); + + // Generates the slow case code. Must not be naturally + // reachable. Expected to be put after a ret instruction (e.g., in + // deferred code). Always jumps back to the fast case. + void GenerateSlow(MacroAssembler* masm, + const RuntimeCallHelper& call_helper); + + private: + Register object_; + Register index_; + Register scratch_; + Register result_; + + Label* receiver_not_string_; + Label* index_not_number_; + Label* index_out_of_range_; + + StringIndexFlags index_flags_; + + Label call_runtime_; + Label index_not_smi_; + Label got_smi_index_; + Label exit_; + + DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator); +}; + + +// Generates code for creating a one-char string from a char code. +class StringCharFromCodeGenerator { + public: + StringCharFromCodeGenerator(Register code, + Register result) + : code_(code), + result_(result) { + ASSERT(!code_.is(result_)); + } + + // Generates the fast case code. On the fallthrough path |result| + // register contains the result. + void GenerateFast(MacroAssembler* masm); + + // Generates the slow case code. Must not be naturally + // reachable. Expected to be put after a ret instruction (e.g., in + // deferred code). Always jumps back to the fast case. + void GenerateSlow(MacroAssembler* masm, + const RuntimeCallHelper& call_helper); + + private: + Register code_; + Register result_; + + Label slow_case_; + Label exit_; + + DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator); +}; + + +// Generates code implementing String.prototype.charAt. +// +// Only supports the case when the receiver is a string and the index +// is a number (smi or heap number) that is a valid index into the +// string. Additional index constraints are specified by the +// flags. Otherwise, bails out to the provided labels. +// +// Register usage: |object| may be changed to another string in a way +// that doesn't affect charCodeAt/charAt semantics, |index| is +// preserved, |scratch1|, |scratch2|, and |result| are clobbered. +class StringCharAtGenerator { + public: + StringCharAtGenerator(Register object, + Register index, + Register scratch1, + Register scratch2, + Register result, + Label* receiver_not_string, + Label* index_not_number, + Label* index_out_of_range, + StringIndexFlags index_flags) + : char_code_at_generator_(object, + index, + scratch1, + scratch2, + receiver_not_string, + index_not_number, + index_out_of_range, + index_flags), + char_from_code_generator_(scratch2, result) {} + + // Generates the fast case code. On the fallthrough path |result| + // register contains the result. + void GenerateFast(MacroAssembler* masm); + + // Generates the slow case code. Must not be naturally + // reachable. Expected to be put after a ret instruction (e.g., in + // deferred code). Always jumps back to the fast case. + void GenerateSlow(MacroAssembler* masm, + const RuntimeCallHelper& call_helper); + + private: + StringCharCodeAtGenerator char_code_at_generator_; + StringCharFromCodeGenerator char_from_code_generator_; + + DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator); +}; + } } // namespace v8::internal #endif // V8_CODE_STUBS_H_ diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index a9fab43f39..148cefca40 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -339,6 +339,11 @@ void CodeGenerator::ProcessDeclarations(ZoneList* declarations) { } +void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) { + UNREACHABLE(); +} + + // List of special runtime calls which are generated inline. For some of these // functions the code will be generated inline, and for others a call to a code // stub will be inlined. @@ -380,21 +385,6 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) { } -bool CodeGenerator::PatchInlineRuntimeEntry(Handle name, - const CodeGenerator::InlineRuntimeLUT& new_entry, - CodeGenerator::InlineRuntimeLUT* old_entry) { - InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name); - if (entry == NULL) return false; - if (old_entry != NULL) { - old_entry->name = entry->name; - old_entry->method = entry->method; - } - entry->name = new_entry.name; - entry->method = new_entry.method; - return true; -} - - int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle name) { CodeGenerator::InlineRuntimeLUT* f = CodeGenerator::FindInlineRuntimeLUT(name); @@ -496,12 +486,11 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { int CEntryStub::MinorKey() { - ASSERT(result_size_ <= 2); + ASSERT(result_size_ == 1 || result_size_ == 2); #ifdef _WIN64 - return ExitFrameModeBits::encode(mode_) - | IndirectResultBits::encode(result_size_ > 1); + return result_size_ == 1 ? 0 : 1; #else - return ExitFrameModeBits::encode(mode_); + return 0; #endif } diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 3b31c04f92..aa2d4422c3 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -64,7 +64,6 @@ // DeclareGlobals // FindInlineRuntimeLUT // CheckForInlineRuntimeCall -// PatchInlineRuntimeEntry // AnalyzeCondition // CodeForFunctionPosition // CodeForReturnPosition @@ -73,13 +72,6 @@ // CodeForSourcePosition -// Mode to overwrite BinaryExpression values. -enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; -enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE }; - -// Types of uncatchable exceptions. -enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; - #define INLINE_RUNTIME_FUNCTION_LIST(F) \ F(IsSmi, 1, 1) \ F(IsNonNegativeSmi, 1, 1) \ @@ -108,6 +100,7 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; F(StringCompare, 2, 1) \ F(RegExpExec, 4, 1) \ F(RegExpConstructResult, 3, 1) \ + F(RegExpCloneResult, 1, 1) \ F(GetFromCache, 2, 1) \ F(NumberToString, 1, 1) \ F(SwapElements, 3, 1) \ @@ -115,7 +108,9 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; F(MathSin, 1, 1) \ F(MathCos, 1, 1) \ F(MathSqrt, 1, 1) \ - F(IsRegExpEquivalent, 2, 1) + F(IsRegExpEquivalent, 2, 1) \ + F(HasCachedArrayIndex, 1, 1) \ + F(GetCachedArrayIndex, 1, 1) #if V8_TARGET_ARCH_IA32 @@ -135,29 +130,6 @@ enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION }; namespace v8 { namespace internal { -// Support for "structured" code comments. -#ifdef DEBUG - -class Comment BASE_EMBEDDED { - public: - Comment(MacroAssembler* masm, const char* msg); - ~Comment(); - - private: - MacroAssembler* masm_; - const char* msg_; -}; - -#else - -class Comment BASE_EMBEDDED { - public: - Comment(MacroAssembler*, const char*) {} -}; - -#endif // DEBUG - - // Code generation can be nested. Code generation scopes form a stack // of active code generators. class CodeGeneratorScope BASE_EMBEDDED { @@ -181,6 +153,7 @@ class CodeGeneratorScope BASE_EMBEDDED { CodeGenerator* previous_; }; + #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 // State of used registers in a virtual frame. @@ -229,23 +202,6 @@ class FrameRegisterState { #endif -// Helper interface to prepare to/restore after making runtime calls. -class RuntimeCallHelper { - public: - virtual ~RuntimeCallHelper() {} - - virtual void BeforeCall(MacroAssembler* masm) const = 0; - - virtual void AfterCall(MacroAssembler* masm) const = 0; - - protected: - RuntimeCallHelper() {} - - private: - DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper); -}; - - // RuntimeCallHelper implementation that saves/restores state of a // virtual frame. class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper { @@ -263,29 +219,6 @@ class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper { }; -// RuntimeCallHelper implementation used in IC stubs: enters/leaves a -// newly created internal frame before/after the runtime call. -class ICRuntimeCallHelper : public RuntimeCallHelper { - public: - ICRuntimeCallHelper() {} - - virtual void BeforeCall(MacroAssembler* masm) const; - - virtual void AfterCall(MacroAssembler* masm) const; -}; - - -// Trivial RuntimeCallHelper implementation. -class NopRuntimeCallHelper : public RuntimeCallHelper { - public: - NopRuntimeCallHelper() {} - - virtual void BeforeCall(MacroAssembler* masm) const {} - - virtual void AfterCall(MacroAssembler* masm) const {} -}; - - // Deferred code objects are small pieces of code that are compiled // out of line. They are used to defer the compilation of uncommon // paths thereby avoiding expensive jumps around uncommon code parts. @@ -348,547 +281,7 @@ class DeferredCode: public ZoneObject { DISALLOW_COPY_AND_ASSIGN(DeferredCode); }; -class StackCheckStub : public CodeStub { - public: - StackCheckStub() { } - - void Generate(MacroAssembler* masm); - - private: - - const char* GetName() { return "StackCheckStub"; } - - Major MajorKey() { return StackCheck; } - int MinorKey() { return 0; } -}; - - -class FastNewClosureStub : public CodeStub { - public: - void Generate(MacroAssembler* masm); - - private: - const char* GetName() { return "FastNewClosureStub"; } - Major MajorKey() { return FastNewClosure; } - int MinorKey() { return 0; } -}; - - -class FastNewContextStub : public CodeStub { - public: - static const int kMaximumSlots = 64; - - explicit FastNewContextStub(int slots) : slots_(slots) { - ASSERT(slots_ > 0 && slots <= kMaximumSlots); - } - - void Generate(MacroAssembler* masm); - - private: - int slots_; - - const char* GetName() { return "FastNewContextStub"; } - Major MajorKey() { return FastNewContext; } - int MinorKey() { return slots_; } -}; - - -class FastCloneShallowArrayStub : public CodeStub { - public: - static const int kMaximumLength = 8; - - explicit FastCloneShallowArrayStub(int length) : length_(length) { - ASSERT(length >= 0 && length <= kMaximumLength); - } - - void Generate(MacroAssembler* masm); - - private: - int length_; - - const char* GetName() { return "FastCloneShallowArrayStub"; } - Major MajorKey() { return FastCloneShallowArray; } - int MinorKey() { return length_; } -}; - - -class InstanceofStub: public CodeStub { - public: - InstanceofStub() { } - - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return Instanceof; } - int MinorKey() { return 0; } -}; - - -enum NegativeZeroHandling { - kStrictNegativeZero, - kIgnoreNegativeZero -}; - - -class GenericUnaryOpStub : public CodeStub { - public: - GenericUnaryOpStub(Token::Value op, - UnaryOverwriteMode overwrite, - NegativeZeroHandling negative_zero = kStrictNegativeZero) - : op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { } - - private: - Token::Value op_; - UnaryOverwriteMode overwrite_; - NegativeZeroHandling negative_zero_; - - class OverwriteField: public BitField {}; - class NegativeZeroField: public BitField {}; - class OpField: public BitField {}; - - Major MajorKey() { return GenericUnaryOp; } - int MinorKey() { - return OpField::encode(op_) | - OverwriteField::encode(overwrite_) | - NegativeZeroField::encode(negative_zero_); - } - - void Generate(MacroAssembler* masm); - - const char* GetName(); -}; - - -enum NaNInformation { - kBothCouldBeNaN, - kCantBothBeNaN -}; - - -class CompareStub: public CodeStub { - public: - CompareStub(Condition cc, - bool strict, - NaNInformation nan_info = kBothCouldBeNaN, - bool include_number_compare = true, - Register lhs = no_reg, - Register rhs = no_reg) : - cc_(cc), - strict_(strict), - never_nan_nan_(nan_info == kCantBothBeNaN), - include_number_compare_(include_number_compare), - lhs_(lhs), - rhs_(rhs), - name_(NULL) { } - - void Generate(MacroAssembler* masm); - - private: - Condition cc_; - bool strict_; - // Only used for 'equal' comparisons. Tells the stub that we already know - // that at least one side of the comparison is not NaN. This allows the - // stub to use object identity in the positive case. We ignore it when - // generating the minor key for other comparisons to avoid creating more - // stubs. - bool never_nan_nan_; - // Do generate the number comparison code in the stub. Stubs without number - // comparison code is used when the number comparison has been inlined, and - // the stub will be called if one of the operands is not a number. - bool include_number_compare_; - // Register holding the left hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - Register lhs_; - // Register holding the right hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - Register rhs_; - - // Encoding of the minor key CCCCCCCCCCCCRCNS. - class StrictField: public BitField {}; - class NeverNanNanField: public BitField {}; - class IncludeNumberCompareField: public BitField {}; - class RegisterField: public BitField {}; - class ConditionField: public BitField {}; - - Major MajorKey() { return Compare; } - - int MinorKey(); - - // Branch to the label if the given object isn't a symbol. - void BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch); - - // Unfortunately you have to run without snapshots to see most of these - // names in the profile since most compare stubs end up in the snapshot. - char* name_; - const char* GetName(); -#ifdef DEBUG - void Print() { - PrintF("CompareStub (cc %d), (strict %s), " - "(never_nan_nan %s), (number_compare %s) ", - static_cast(cc_), - strict_ ? "true" : "false", - never_nan_nan_ ? "true" : "false", - include_number_compare_ ? "included" : "not included"); - - if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) { - PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code()); - } else { - PrintF("\n"); - } - } -#endif -}; - - -class CEntryStub : public CodeStub { - public: - explicit CEntryStub(int result_size, - ExitFrame::Mode mode = ExitFrame::MODE_NORMAL) - : result_size_(result_size), mode_(mode) { } - - void Generate(MacroAssembler* masm); - - private: - void GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate_scope, - int alignment_skew = 0); - void GenerateThrowTOS(MacroAssembler* masm); - void GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type); - - // Number of pointers/values returned. - const int result_size_; - const ExitFrame::Mode mode_; - - // Minor key encoding - class ExitFrameModeBits: public BitField {}; - class IndirectResultBits: public BitField {}; - - Major MajorKey() { return CEntry; } - // Minor key must differ if different result_size_ values means different - // code is generated. - int MinorKey(); - - const char* GetName() { return "CEntryStub"; } -}; - - -class ApiGetterEntryStub : public CodeStub { - public: - ApiGetterEntryStub(Handle info, - ApiFunction* fun) - : info_(info), - fun_(fun) { } - void Generate(MacroAssembler* masm); - virtual bool has_custom_cache() { return true; } - virtual bool GetCustomCache(Code** code_out); - virtual void SetCustomCache(Code* value); - - static const int kStackSpace = 5; - static const int kArgc = 4; - private: - Handle info() { return info_; } - ApiFunction* fun() { return fun_; } - Major MajorKey() { return NoCache; } - int MinorKey() { return 0; } - const char* GetName() { return "ApiEntryStub"; } - // The accessor info associated with the function. - Handle info_; - // The function to be called. - ApiFunction* fun_; -}; - - -class JSEntryStub : public CodeStub { - public: - JSEntryStub() { } - - void Generate(MacroAssembler* masm) { GenerateBody(masm, false); } - - protected: - void GenerateBody(MacroAssembler* masm, bool is_construct); - - private: - Major MajorKey() { return JSEntry; } - int MinorKey() { return 0; } - - const char* GetName() { return "JSEntryStub"; } -}; - - -class JSConstructEntryStub : public JSEntryStub { - public: - JSConstructEntryStub() { } - - void Generate(MacroAssembler* masm) { GenerateBody(masm, true); } - - private: - int MinorKey() { return 1; } - - const char* GetName() { return "JSConstructEntryStub"; } -}; - - -class ArgumentsAccessStub: public CodeStub { - public: - enum Type { - READ_ELEMENT, - NEW_OBJECT - }; - - explicit ArgumentsAccessStub(Type type) : type_(type) { } - - private: - Type type_; - - Major MajorKey() { return ArgumentsAccess; } - int MinorKey() { return type_; } - - void Generate(MacroAssembler* masm); - void GenerateReadElement(MacroAssembler* masm); - void GenerateNewObject(MacroAssembler* masm); - - const char* GetName() { return "ArgumentsAccessStub"; } - -#ifdef DEBUG - void Print() { - PrintF("ArgumentsAccessStub (type %d)\n", type_); - } -#endif -}; - - -class RegExpExecStub: public CodeStub { - public: - RegExpExecStub() { } - - private: - Major MajorKey() { return RegExpExec; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "RegExpExecStub"; } - -#ifdef DEBUG - void Print() { - PrintF("RegExpExecStub\n"); - } -#endif -}; - - -class CallFunctionStub: public CodeStub { - public: - CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags) - : argc_(argc), in_loop_(in_loop), flags_(flags) { } - - void Generate(MacroAssembler* masm); - - private: - int argc_; - InLoopFlag in_loop_; - CallFunctionFlags flags_; - -#ifdef DEBUG - void Print() { - PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n", - argc_, - static_cast(in_loop_), - static_cast(flags_)); - } -#endif - - // Minor key encoding in 32 bits with Bitfield . - class InLoopBits: public BitField {}; - class FlagBits: public BitField {}; - class ArgcBits: public BitField {}; - - Major MajorKey() { return CallFunction; } - int MinorKey() { - // Encode the parameters in a unique 32 bit value. - return InLoopBits::encode(in_loop_) - | FlagBits::encode(flags_) - | ArgcBits::encode(argc_); - } - - InLoopFlag InLoop() { return in_loop_; } - bool ReceiverMightBeValue() { - return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0; - } - - public: - static int ExtractArgcFromMinorKey(int minor_key) { - return ArgcBits::decode(minor_key); - } -}; - - -enum StringIndexFlags { - // Accepts smis or heap numbers. - STRING_INDEX_IS_NUMBER, - - // Accepts smis or heap numbers that are valid array indices - // (ECMA-262 15.4). Invalid indices are reported as being out of - // range. - STRING_INDEX_IS_ARRAY_INDEX -}; - - -// Generates code implementing String.prototype.charCodeAt. -// -// Only supports the case when the receiver is a string and the index -// is a number (smi or heap number) that is a valid index into the -// string. Additional index constraints are specified by the -// flags. Otherwise, bails out to the provided labels. -// -// Register usage: |object| may be changed to another string in a way -// that doesn't affect charCodeAt/charAt semantics, |index| is -// preserved, |scratch| and |result| are clobbered. -class StringCharCodeAtGenerator { - public: - StringCharCodeAtGenerator(Register object, - Register index, - Register scratch, - Register result, - Label* receiver_not_string, - Label* index_not_number, - Label* index_out_of_range, - StringIndexFlags index_flags) - : object_(object), - index_(index), - scratch_(scratch), - result_(result), - receiver_not_string_(receiver_not_string), - index_not_number_(index_not_number), - index_out_of_range_(index_out_of_range), - index_flags_(index_flags) { - ASSERT(!scratch_.is(object_)); - ASSERT(!scratch_.is(index_)); - ASSERT(!scratch_.is(result_)); - ASSERT(!result_.is(object_)); - ASSERT(!result_.is(index_)); - } - - // Generates the fast case code. On the fallthrough path |result| - // register contains the result. - void GenerateFast(MacroAssembler* masm); - - // Generates the slow case code. Must not be naturally - // reachable. Expected to be put after a ret instruction (e.g., in - // deferred code). Always jumps back to the fast case. - void GenerateSlow(MacroAssembler* masm, - const RuntimeCallHelper& call_helper); - - private: - Register object_; - Register index_; - Register scratch_; - Register result_; - - Label* receiver_not_string_; - Label* index_not_number_; - Label* index_out_of_range_; - - StringIndexFlags index_flags_; - - Label call_runtime_; - Label index_not_smi_; - Label got_smi_index_; - Label exit_; - - DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator); -}; - - -// Generates code for creating a one-char string from a char code. -class StringCharFromCodeGenerator { - public: - StringCharFromCodeGenerator(Register code, - Register result) - : code_(code), - result_(result) { - ASSERT(!code_.is(result_)); - } - - // Generates the fast case code. On the fallthrough path |result| - // register contains the result. - void GenerateFast(MacroAssembler* masm); - - // Generates the slow case code. Must not be naturally - // reachable. Expected to be put after a ret instruction (e.g., in - // deferred code). Always jumps back to the fast case. - void GenerateSlow(MacroAssembler* masm, - const RuntimeCallHelper& call_helper); - - private: - Register code_; - Register result_; - - Label slow_case_; - Label exit_; - - DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator); -}; - - -// Generates code implementing String.prototype.charAt. -// -// Only supports the case when the receiver is a string and the index -// is a number (smi or heap number) that is a valid index into the -// string. Additional index constraints are specified by the -// flags. Otherwise, bails out to the provided labels. -// -// Register usage: |object| may be changed to another string in a way -// that doesn't affect charCodeAt/charAt semantics, |index| is -// preserved, |scratch1|, |scratch2|, and |result| are clobbered. -class StringCharAtGenerator { - public: - StringCharAtGenerator(Register object, - Register index, - Register scratch1, - Register scratch2, - Register result, - Label* receiver_not_string, - Label* index_not_number, - Label* index_out_of_range, - StringIndexFlags index_flags) - : char_code_at_generator_(object, - index, - scratch1, - scratch2, - receiver_not_string, - index_not_number, - index_out_of_range, - index_flags), - char_from_code_generator_(scratch2, result) {} - - // Generates the fast case code. On the fallthrough path |result| - // register contains the result. - void GenerateFast(MacroAssembler* masm); - - // Generates the slow case code. Must not be naturally - // reachable. Expected to be put after a ret instruction (e.g., in - // deferred code). Always jumps back to the fast case. - void GenerateSlow(MacroAssembler* masm, - const RuntimeCallHelper& call_helper); - - private: - StringCharCodeAtGenerator char_code_at_generator_; - StringCharFromCodeGenerator char_from_code_generator_; - - DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator); -}; - -} // namespace internal -} // namespace v8 +} } // namespace v8::internal #endif // V8_CODEGEN_H_ diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 14252a5892..7402e6857d 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -79,10 +79,9 @@ class CompilationSubCache { // young generation. void Age(); - bool HasFunction(SharedFunctionInfo* function_info); - // GC support. void Iterate(ObjectVisitor* v); + void IterateFunctions(ObjectVisitor* v); // Clear this sub-cache evicting all its content. void Clear(); @@ -206,27 +205,6 @@ Handle CompilationSubCache::GetTable(int generation) { } -bool CompilationSubCache::HasFunction(SharedFunctionInfo* function_info) { - if (function_info->script()->IsUndefined() || - Script::cast(function_info->script())->source()->IsUndefined()) { - return false; - } - - String* source = - String::cast(Script::cast(function_info->script())->source()); - // Check all generations. - for (int generation = 0; generation < generations(); generation++) { - if (tables_[generation]->IsUndefined()) continue; - - CompilationCacheTable* table = - CompilationCacheTable::cast(tables_[generation]); - Object* object = table->Lookup(source); - if (object->IsSharedFunctionInfo()) return true; - } - return false; -} - - void CompilationSubCache::Age() { // Age the generations implicitly killing off the oldest. for (int i = generations_ - 1; i > 0; i--) { @@ -238,6 +216,16 @@ void CompilationSubCache::Age() { } +void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { + Object* undefined = Heap::raw_unchecked_undefined_value(); + for (int i = 0; i < generations_; i++) { + if (tables_[i] != undefined) { + reinterpret_cast(tables_[i])->IterateElements(v); + } + } +} + + void CompilationSubCache::Iterate(ObjectVisitor* v) { v->VisitPointers(&tables_[0], &tables_[generations_]); } @@ -528,15 +516,16 @@ void CompilationCache::Clear() { } } - -bool CompilationCache::HasFunction(SharedFunctionInfo* function_info) { - return script.HasFunction(function_info); +void CompilationCache::Iterate(ObjectVisitor* v) { + for (int i = 0; i < kSubCacheCount; i++) { + subcaches[i]->Iterate(v); + } } -void CompilationCache::Iterate(ObjectVisitor* v) { +void CompilationCache::IterateFunctions(ObjectVisitor* v) { for (int i = 0; i < kSubCacheCount; i++) { - subcaches[i]->Iterate(v); + subcaches[i]->IterateFunctions(v); } } diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h index 583f04c48a..22ecff8358 100644 --- a/deps/v8/src/compilation-cache.h +++ b/deps/v8/src/compilation-cache.h @@ -79,11 +79,9 @@ class CompilationCache { // Clear the cache - also used to initialize the cache at startup. static void Clear(); - - static bool HasFunction(SharedFunctionInfo* function_info); - // GC support. static void Iterate(ObjectVisitor* v); + static void IterateFunctions(ObjectVisitor* v); // Notify the cache that a mark-sweep garbage collection is about to // take place. This is used to retire entries from the cache to diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 9f0162ea7e..bf6d41d854 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -33,7 +33,6 @@ #include "compiler.h" #include "data-flow.h" #include "debug.h" -#include "flow-graph.h" #include "full-codegen.h" #include "liveedit.h" #include "oprofile-agent.h" @@ -92,27 +91,6 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { return Handle::null(); } - if (function->scope()->num_parameters() > 0 || - function->scope()->num_stack_slots()) { - AssignedVariablesAnalyzer ava(function); - ava.Analyze(); - if (ava.HasStackOverflow()) { - return Handle::null(); - } - } - - if (FLAG_use_flow_graph) { - FlowGraphBuilder builder; - FlowGraph* graph = builder.Build(function); - USE(graph); - -#ifdef DEBUG - if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - graph->PrintAsText(function->name()); - } -#endif - } - // Generate code and return it. Code generator selection is governed by // which backends are enabled and whether the function is considered // run-once code or not: @@ -126,17 +104,13 @@ static Handle MakeCode(Handle context, CompilationInfo* info) { bool is_run_once = (shared.is_null()) ? info->scope()->is_global_scope() : (shared->is_toplevel() || shared->try_full_codegen()); - - if (AlwaysFullCompiler()) { + bool use_full = FLAG_full_compiler && !function->contains_loops(); + if (AlwaysFullCompiler() || (use_full && is_run_once)) { return FullCodeGenerator::MakeCode(info); - } else if (FLAG_full_compiler && is_run_once) { - FullCodeGenSyntaxChecker checker; - checker.Check(function); - if (checker.has_supported_syntax()) { - return FullCodeGenerator::MakeCode(info); - } } + AssignedVariablesAnalyzer ava(function); + if (!ava.Analyze()) return Handle::null(); return CodeGenerator::MakeCode(info); } @@ -442,6 +416,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // object last we avoid this. shared->set_scope_info(*SerializedScopeInfo::Create(info->scope())); shared->set_code(*code); + if (!info->closure().is_null()) { + info->closure()->set_code(*code); + } // Set the expected number of properties for instances. SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count()); @@ -454,6 +431,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { // Check the function has compiled code. ASSERT(shared->is_compiled()); + shared->set_code_age(0); return true; } @@ -489,49 +467,19 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, return Handle::null(); } - if (literal->scope()->num_parameters() > 0 || - literal->scope()->num_stack_slots()) { - AssignedVariablesAnalyzer ava(literal); - ava.Analyze(); - if (ava.HasStackOverflow()) { - return Handle::null(); - } - } - - if (FLAG_use_flow_graph) { - FlowGraphBuilder builder; - FlowGraph* graph = builder.Build(literal); - USE(graph); - -#ifdef DEBUG - if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - graph->PrintAsText(literal->name()); - } -#endif - } - // Generate code and return it. The way that the compilation mode // is controlled by the command-line flags is described in // the static helper function MakeCode. CompilationInfo info(literal, script, false); bool is_run_once = literal->try_full_codegen(); - bool is_compiled = false; - - if (AlwaysFullCompiler()) { + bool use_full = FLAG_full_compiler && !literal->contains_loops(); + if (AlwaysFullCompiler() || (use_full && is_run_once)) { code = FullCodeGenerator::MakeCode(&info); - is_compiled = true; - } else if (FLAG_full_compiler && is_run_once) { - FullCodeGenSyntaxChecker checker; - checker.Check(literal); - if (checker.has_supported_syntax()) { - code = FullCodeGenerator::MakeCode(&info); - is_compiled = true; - } - } - - if (!is_compiled) { + } else { // We fall back to the classic V8 code generator. + AssignedVariablesAnalyzer ava(literal); + if (!ava.Analyze()) return Handle::null(); code = CodeGenerator::MakeCode(&info); } diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index d1c98bd954..78dda6a64a 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -28,6 +28,9 @@ #ifndef V8_CONTEXTS_H_ #define V8_CONTEXTS_H_ +#include "heap.h" +#include "objects.h" + namespace v8 { namespace internal { @@ -86,6 +89,7 @@ enum ContextLookupFlags { V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \ V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \ V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \ + V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \ V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ @@ -211,6 +215,7 @@ class Context: public FixedArray { CONFIGURE_GLOBAL_INDEX, FUNCTION_CACHE_INDEX, JSFUNCTION_RESULT_CACHES_INDEX, + NORMALIZED_MAP_CACHE_INDEX, RUNTIME_CONTEXT_INDEX, CALL_AS_FUNCTION_DELEGATE_INDEX, CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, @@ -243,7 +248,8 @@ class Context: public FixedArray { GlobalObject* global() { Object* result = get(GLOBAL_INDEX); - ASSERT(IsBootstrappingOrGlobalObject(result)); + ASSERT(Heap::gc_state() != Heap::NOT_IN_GC || + IsBootstrappingOrGlobalObject(result)); return reinterpret_cast(result); } void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); } diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 1e2bb20c4f..90cdc773ea 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -733,11 +733,18 @@ double StringToInt(String* str, int radix) { double StringToDouble(const char* str, int flags, double empty_string_val) { const char* end = str + StrLength(str); - return InternalStringToDouble(str, end, flags, empty_string_val); } +double StringToDouble(Vector str, + int flags, + double empty_string_val) { + const char* end = str.start() + str.length(); + return InternalStringToDouble(str.start(), end, flags, empty_string_val); +} + + extern "C" char* dtoa(double d, int mode, int ndigits, int* decpt, int* sign, char** rve); diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index c4ceea6b90..9e32a0cdb5 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -96,8 +96,12 @@ static inline uint32_t NumberToUint32(Object* number); // Converts a string into a double value according to ECMA-262 9.3.1 -double StringToDouble(const char* str, int flags, double empty_string_val = 0); double StringToDouble(String* str, int flags, double empty_string_val = 0); +double StringToDouble(Vector str, + int flags, + double empty_string_val = 0); +// This version expects a zero-terminated character array. +double StringToDouble(const char* str, int flags, double empty_string_val = 0); // Converts a string into an integer. double StringToInt(String* str, int radix); diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 3e554ccebd..4248a64338 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -46,7 +46,7 @@ static const int kTickSamplesBufferChunksCount = 16; ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) : generator_(generator), - running_(false), + running_(true), ticks_buffer_(sizeof(TickSampleEventRecord), kTickSamplesBufferChunkSize, kTickSamplesBufferChunksCount), @@ -235,8 +235,19 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { const TickSampleEventRecord* rec = TickSampleEventRecord::cast(ticks_buffer_.StartDequeue()); if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty(); - if (rec->order == dequeue_order) { - generator_->RecordTickSample(rec->sample); + // Make a local copy of tick sample record to ensure that it won't + // be modified as we are processing it. This is possible as the + // sampler writes w/o any sync to the queue, so if the processor + // will get far behind, a record may be modified right under its + // feet. + TickSampleEventRecord record = *rec; + if (record.order == dequeue_order) { + // A paranoid check to make sure that we don't get a memory overrun + // in case of frames_count having a wild value. + if (record.sample.frames_count < 0 + || record.sample.frames_count >= TickSample::kMaxFramesCount) + record.sample.frames_count = 0; + generator_->RecordTickSample(record.sample); ticks_buffer_.FinishDequeue(); } else { return true; @@ -247,7 +258,6 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { void ProfilerEventsProcessor::Run() { unsigned dequeue_order = 0; - running_ = true; while (running_) { // Process ticks until we have any. diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 7fd7925baa..5a1e63a763 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -486,7 +486,7 @@ void Shell::Initialize() { // Start the debugger agent if requested. if (i::FLAG_debugger_agent) { - v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port); + v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true); } // Start the in-process debugger if requested. diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc index 55d85825b2..d480c1bcf9 100644 --- a/deps/v8/src/data-flow.cc +++ b/deps/v8/src/data-flow.cc @@ -50,258 +50,13 @@ void BitVector::Print() { #endif -void AstLabeler::Label(CompilationInfo* info) { - info_ = info; - VisitStatements(info_->function()->body()); -} - - -void AstLabeler::VisitStatements(ZoneList* stmts) { - for (int i = 0, len = stmts->length(); i < len; i++) { - Visit(stmts->at(i)); - } -} - - -void AstLabeler::VisitDeclarations(ZoneList* decls) { - UNREACHABLE(); -} - - -void AstLabeler::VisitBlock(Block* stmt) { - VisitStatements(stmt->statements()); -} - - -void AstLabeler::VisitExpressionStatement( - ExpressionStatement* stmt) { - Visit(stmt->expression()); -} - - -void AstLabeler::VisitEmptyStatement(EmptyStatement* stmt) { - // Do nothing. -} - - -void AstLabeler::VisitIfStatement(IfStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitContinueStatement(ContinueStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitBreakStatement(BreakStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitReturnStatement(ReturnStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitWithEnterStatement( - WithEnterStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitWithExitStatement(WithExitStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitSwitchStatement(SwitchStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitDoWhileStatement(DoWhileStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitWhileStatement(WhileStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitForStatement(ForStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitForInStatement(ForInStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitTryCatchStatement(TryCatchStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitTryFinallyStatement( - TryFinallyStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitDebuggerStatement( - DebuggerStatement* stmt) { - UNREACHABLE(); -} - - -void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitConditional(Conditional* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitVariableProxy(VariableProxy* expr) { - expr->set_num(next_number_++); - Variable* var = expr->var(); - if (var->is_global() && !var->is_this()) { - info_->set_has_globals(true); - } -} - - -void AstLabeler::VisitLiteral(Literal* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitRegExpLiteral(RegExpLiteral* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitObjectLiteral(ObjectLiteral* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitArrayLiteral(ArrayLiteral* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitAssignment(Assignment* expr) { - Property* prop = expr->target()->AsProperty(); - ASSERT(prop != NULL); - ASSERT(prop->key()->IsPropertyName()); - VariableProxy* proxy = prop->obj()->AsVariableProxy(); - USE(proxy); - ASSERT(proxy != NULL && proxy->var()->is_this()); - info()->set_has_this_properties(true); - - prop->obj()->set_num(AstNode::kNoNumber); - prop->key()->set_num(AstNode::kNoNumber); - Visit(expr->value()); - expr->set_num(next_number_++); -} - - -void AstLabeler::VisitThrow(Throw* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitProperty(Property* expr) { - ASSERT(expr->key()->IsPropertyName()); - VariableProxy* proxy = expr->obj()->AsVariableProxy(); - USE(proxy); - ASSERT(proxy != NULL && proxy->var()->is_this()); - info()->set_has_this_properties(true); - - expr->obj()->set_num(AstNode::kNoNumber); - expr->key()->set_num(AstNode::kNoNumber); - expr->set_num(next_number_++); -} - - -void AstLabeler::VisitCall(Call* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitCallNew(CallNew* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitCallRuntime(CallRuntime* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitUnaryOperation(UnaryOperation* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitCountOperation(CountOperation* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitBinaryOperation(BinaryOperation* expr) { - Visit(expr->left()); - Visit(expr->right()); - expr->set_num(next_number_++); -} - - -void AstLabeler::VisitCompareOperation(CompareOperation* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitThisFunction(ThisFunction* expr) { - UNREACHABLE(); -} - - -void AstLabeler::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - -AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(FunctionLiteral* fun) - : fun_(fun), - av_(fun->scope()->num_parameters() + fun->scope()->num_stack_slots()) {} - - -void AssignedVariablesAnalyzer::Analyze() { - ASSERT(av_.length() > 0); +bool AssignedVariablesAnalyzer::Analyze() { + Scope* scope = fun_->scope(); + int variables = scope->num_parameters() + scope->num_stack_slots(); + if (variables == 0) return true; + av_.ExpandTo(variables); VisitStatements(fun_->body()); + return !HasStackOverflow(); } @@ -394,7 +149,7 @@ void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) { !var->is_arguments() && var->mode() != Variable::CONST && (var->is_this() || !av_.Contains(BitIndex(var)))) { - expr->AsVariableProxy()->set_is_trivial(true); + expr->AsVariableProxy()->MarkAsTrivial(); } } @@ -489,9 +244,7 @@ void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) { void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) { if (stmt->init() != NULL) Visit(stmt->init()); - if (stmt->cond() != NULL) ProcessExpression(stmt->cond()); - if (stmt->next() != NULL) Visit(stmt->next()); // Process loop body. After visiting the loop body av_ contains @@ -504,7 +257,6 @@ void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) { if (var != NULL && !av_.Contains(BitIndex(var))) { stmt->set_loop_variable(var); } - av_.Union(saved_av); } @@ -712,13 +464,20 @@ void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) { void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) { ASSERT(av_.IsEmpty()); + MarkIfTrivial(expr->expression()); Visit(expr->expression()); } +void AssignedVariablesAnalyzer::VisitIncrementOperation( + IncrementOperation* expr) { + UNREACHABLE(); +} + + void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) { ASSERT(av_.IsEmpty()); - + if (expr->is_prefix()) MarkIfTrivial(expr->expression()); Visit(expr->expression()); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); @@ -744,6 +503,13 @@ void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) { } +void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) { + ASSERT(av_.IsEmpty()); + MarkIfTrivial(expr->expression()); + Visit(expr->expression()); +} + + void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) { // Nothing to do. ASSERT(av_.IsEmpty()); diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index 079da65b4d..540db162f6 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -42,12 +42,10 @@ class Node; class BitVector: public ZoneObject { public: - explicit BitVector(int length) - : length_(length), - data_length_(SizeFor(length)), - data_(Zone::NewArray(data_length_)) { - ASSERT(length > 0); - Clear(); + BitVector() : length_(0), data_length_(0), data_(NULL) { } + + explicit BitVector(int length) { + ExpandTo(length); } BitVector(const BitVector& other) @@ -57,8 +55,12 @@ class BitVector: public ZoneObject { CopyFrom(other); } - static int SizeFor(int length) { - return 1 + ((length - 1) / 32); + void ExpandTo(int length) { + ASSERT(length > 0); + length_ = length; + data_length_ = SizeFor(length); + data_ = Zone::NewArray(data_length_); + Clear(); } BitVector& operator=(const BitVector& rhs) { @@ -137,6 +139,10 @@ class BitVector: public ZoneObject { #endif private: + static int SizeFor(int length) { + return 1 + ((length - 1) / 32); + } + int length_; int data_length_; uint32_t* data_; @@ -187,63 +193,13 @@ class WorkList BASE_EMBEDDED { }; -struct ReachingDefinitionsData BASE_EMBEDDED { - public: - ReachingDefinitionsData() : rd_in_(NULL), kill_(NULL), gen_(NULL) {} - - void Initialize(int definition_count) { - rd_in_ = new BitVector(definition_count); - kill_ = new BitVector(definition_count); - gen_ = new BitVector(definition_count); - } - - BitVector* rd_in() { return rd_in_; } - BitVector* kill() { return kill_; } - BitVector* gen() { return gen_; } - - private: - BitVector* rd_in_; - BitVector* kill_; - BitVector* gen_; -}; - - -// This class is used to number all expressions in the AST according to -// their evaluation order (post-order left-to-right traversal). -class AstLabeler: public AstVisitor { - public: - AstLabeler() : next_number_(0) {} - - void Label(CompilationInfo* info); - - private: - CompilationInfo* info() { return info_; } - - void VisitDeclarations(ZoneList* decls); - void VisitStatements(ZoneList* stmts); - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - // Traversal number for labelling AST nodes. - int next_number_; - - CompilationInfo* info_; - - DISALLOW_COPY_AND_ASSIGN(AstLabeler); -}; - - // Computes the set of assigned variables and annotates variables proxies // that are trivial sub-expressions and for-loops where the loop variable // is guaranteed to be a smi. class AssignedVariablesAnalyzer : public AstVisitor { public: - explicit AssignedVariablesAnalyzer(FunctionLiteral* fun); - - void Analyze(); + explicit AssignedVariablesAnalyzer(FunctionLiteral* fun) : fun_(fun) { } + bool Analyze(); private: Variable* FindSmiLoopVariable(ForStatement* stmt); diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index 9c42a04f67..b101ea66b5 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -137,12 +137,18 @@ var DST_offset_cache = { // Time interval where the cached offset is valid. start: 0, end: -1, // Size of next interval expansion. - increment: 0 + increment: 0, + initial_increment: 19 * msPerDay }; // NOTE: The implementation relies on the fact that no time zones have -// more than one daylight savings offset change per month. +// more than one daylight savings offset change per 19 days. +// +// In Egypt in 2010 they decided to suspend DST during Ramadan. This +// led to a short interval where DST is in effect from September 10 to +// September 30. +// // If this function is called with NaN it returns NaN. function DaylightSavingsOffset(t) { // Load the cache object from the builtins object. @@ -171,7 +177,7 @@ function DaylightSavingsOffset(t) { // the offset in the cache, we grow the cached time interval // and return the offset. cache.end = new_end; - cache.increment = msPerMonth; + cache.increment = cache.initial_increment; return end_offset; } else { var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); @@ -182,7 +188,7 @@ function DaylightSavingsOffset(t) { // the interval to reflect this and reset the increment. cache.start = t; cache.end = new_end; - cache.increment = msPerMonth; + cache.increment = cache.initial_increment; } else { // The interval contains a DST offset change and the given time is // before it. Adjust the increment to avoid a linear search for @@ -207,7 +213,7 @@ function DaylightSavingsOffset(t) { var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); cache.offset = offset; cache.start = cache.end = t; - cache.increment = msPerMonth; + cache.increment = cache.initial_increment; return offset; } diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h index d999d9ca7d..cae9b08d5b 100644 --- a/deps/v8/src/dateparser.h +++ b/deps/v8/src/dateparser.h @@ -92,7 +92,7 @@ class DateParser : public AllStatic { int ReadWord(uint32_t* prefix, int prefix_size) { int len; for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) { - if (len < prefix_size) prefix[len] = GetAsciiAlphaLower(); + if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_); } for (int i = len; i < prefix_size; i++) prefix[i] = 0; return len; @@ -130,10 +130,6 @@ class DateParser : public AllStatic { bool HasReadNumber() const { return has_read_number_; } private: - // If current character is in 'A'-'Z' or 'a'-'z', return its lower-case. - // Else, return something outside of 'A'-'Z' and 'a'-'z'. - uint32_t GetAsciiAlphaLower() const { return ch_ | 32; } - int index_; Vector buffer_; bool has_read_number_; diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index c13c8c9878..87780d350c 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -461,6 +461,8 @@ void BreakLocationIterator::SetDebugBreakAtIC() { KeyedStoreIC::ClearInlinedVersion(pc()); } else if (code->is_load_stub()) { LoadIC::ClearInlinedVersion(pc()); + } else if (code->is_store_stub()) { + StoreIC::ClearInlinedVersion(pc()); } } } @@ -549,6 +551,7 @@ void Debug::ThreadInit() { thread_local_.after_break_target_ = 0; thread_local_.debugger_entry_ = NULL; thread_local_.pending_interrupts_ = 0; + thread_local_.restarter_frame_function_pointer_ = NULL; } @@ -1004,17 +1007,18 @@ Handle Debug::CheckBreakPoints(Handle break_point_objects) { for (int i = 0; i < array->length(); i++) { Handle o(array->get(i)); if (CheckBreakPoint(o)) { - break_points_hit->SetElement(break_points_hit_count++, *o); + SetElement(break_points_hit, break_points_hit_count++, o); } } } else { if (CheckBreakPoint(break_point_objects)) { - break_points_hit->SetElement(break_points_hit_count++, - *break_point_objects); + SetElement(break_points_hit, + break_points_hit_count++, + break_point_objects); } } - // Return undefined if no break points where triggered. + // Return undefined if no break points were triggered. if (break_points_hit_count == 0) { return Factory::undefined_value(); } @@ -1440,7 +1444,7 @@ bool Debug::IsDebugBreak(Address addr) { // Check whether a code stub with the specified major key is a possible break // point location when looking for source break locations. bool Debug::IsSourceBreakStub(Code* code) { - CodeStub::Major major_key = code->major_key(); + CodeStub::Major major_key = CodeStub::GetMajorKey(code); return major_key == CodeStub::CallFunction; } @@ -1448,7 +1452,7 @@ bool Debug::IsSourceBreakStub(Code* code) { // Check whether a code stub with the specified major key is a possible break // location. bool Debug::IsBreakStub(Code* code) { - CodeStub::Major major_key = code->major_key(); + CodeStub::Major major_key = CodeStub::GetMajorKey(code); return major_key == CodeStub::CallFunction || major_key == CodeStub::StackCheck; } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 98d1919423..8b3b29e636 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -29,7 +29,6 @@ #define V8_DEBUG_H_ #include "assembler.h" -#include "code-stubs.h" #include "debug-agent.h" #include "execution.h" #include "factory.h" @@ -332,8 +331,7 @@ class Debug { k_after_break_target_address, k_debug_break_return_address, k_debug_break_slot_address, - k_restarter_frame_function_pointer, - k_register_address + k_restarter_frame_function_pointer }; // Support for setting the address to jump to when returning from break point. @@ -953,10 +951,7 @@ class DisableBreak BASE_EMBEDDED { // code. class Debug_Address { public: - Debug_Address(Debug::AddressId id, int reg = 0) - : id_(id), reg_(reg) { - ASSERT(reg == 0 || id == Debug::k_register_address); - } + explicit Debug_Address(Debug::AddressId id) : id_(id) { } static Debug_Address AfterBreakTarget() { return Debug_Address(Debug::k_after_break_target_address); @@ -970,10 +965,6 @@ class Debug_Address { return Debug_Address(Debug::k_restarter_frame_function_pointer); } - static Debug_Address Register(int reg) { - return Debug_Address(Debug::k_register_address, reg); - } - Address address() const { switch (id_) { case Debug::k_after_break_target_address: @@ -985,8 +976,6 @@ class Debug_Address { case Debug::k_restarter_frame_function_pointer: return reinterpret_cast
( Debug::restarter_frame_function_pointer_address()); - case Debug::k_register_address: - return reinterpret_cast
(Debug::register_address(reg_)); default: UNREACHABLE(); return NULL; @@ -994,7 +983,6 @@ class Debug_Address { } private: Debug::AddressId id_; - int reg_; }; // The optional thread that Debug Agent may use to temporary call V8 to process diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index 19cb6af728..e79421fe2f 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -258,11 +258,12 @@ static int DecodeIt(FILE* f, // Get the STUB key and extract major and minor key. uint32_t key = Smi::cast(obj)->value(); uint32_t minor_key = CodeStub::MinorKeyFromKey(key); - ASSERT(code->major_key() == CodeStub::MajorKeyFromKey(key)); + CodeStub::Major major_key = CodeStub::GetMajorKey(code); + ASSERT(major_key == CodeStub::MajorKeyFromKey(key)); out.AddFormatted(" %s, %s, ", Code::Kind2String(kind), - CodeStub::MajorName(code->major_key(), false)); - switch (code->major_key()) { + CodeStub::MajorName(major_key, false)); + switch (major_key) { case CodeStub::CallFunction: out.AddFormatted("argc = %d", minor_key); break; diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index a6b15ccb45..54216784a7 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -710,7 +710,7 @@ class SimpleStringResource : public Base { : data_(data), length_(length) {} - virtual ~SimpleStringResource() { delete data_; } + virtual ~SimpleStringResource() { delete[] data_; } virtual const Char* data() const { return data_; } diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index a143bcd6f5..a63088d5a5 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -283,7 +283,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the " "debugger agent in another process") DEFINE_bool(debugger_agent, false, "Enable debugger agent") DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") -DEFINE_string(map_counters, false, "Map counters to a file") +DEFINE_string(map_counters, NULL, "Map counters to a file") DEFINE_args(js_arguments, JSArguments(), "Pass all remaining arguments to the script. Alias for \"--\".") diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h index a8eca95c2c..f9cbde0bf7 100644 --- a/deps/v8/src/flags.h +++ b/deps/v8/src/flags.h @@ -27,8 +27,6 @@ #ifndef V8_FLAGS_H_ #define V8_FLAGS_H_ -#include "checks.h" - namespace v8 { namespace internal { diff --git a/deps/v8/src/flow-graph.cc b/deps/v8/src/flow-graph.cc deleted file mode 100644 index 02a2cd9cfe..0000000000 --- a/deps/v8/src/flow-graph.cc +++ /dev/null @@ -1,763 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "flow-graph.h" -#include "scopes.h" - -namespace v8 { -namespace internal { - -void BasicBlock::BuildTraversalOrder(ZoneList* preorder, - ZoneList* postorder, - bool mark) { - if (mark_ == mark) return; - mark_ = mark; - preorder->Add(this); - if (right_successor_ != NULL) { - right_successor_->BuildTraversalOrder(preorder, postorder, mark); - } - if (left_successor_ != NULL) { - left_successor_->BuildTraversalOrder(preorder, postorder, mark); - } - postorder->Add(this); -} - - -FlowGraph* FlowGraphBuilder::Build(FunctionLiteral* lit) { - // Create new entry and exit nodes. These will not change during - // construction. - entry_ = new BasicBlock(NULL); - exit_ = new BasicBlock(NULL); - // Begin accumulating instructions in the entry block. - current_ = entry_; - - VisitDeclarations(lit->scope()->declarations()); - VisitStatements(lit->body()); - // In the event of stack overflow or failure to handle a syntactic - // construct, return an invalid flow graph. - if (HasStackOverflow()) return new FlowGraph(NULL, NULL); - - // If current is not the exit, add a link to the exit. - if (current_ != exit_) { - // If current already has a successor (i.e., will be a branch node) and - // if the exit already has a predecessor, insert an empty block to - // maintain edge split form. - if (current_->HasSuccessor() && exit_->HasPredecessor()) { - current_ = new BasicBlock(current_); - } - Literal* undefined = new Literal(Factory::undefined_value()); - current_->AddInstruction(new ReturnStatement(undefined)); - exit_->AddPredecessor(current_); - } - - FlowGraph* graph = new FlowGraph(entry_, exit_); - bool mark = !entry_->GetMark(); - entry_->BuildTraversalOrder(graph->preorder(), graph->postorder(), mark); - -#ifdef DEBUG - // Number the nodes in reverse postorder. - int n = 0; - for (int i = graph->postorder()->length() - 1; i >= 0; --i) { - graph->postorder()->at(i)->set_number(n++); - } -#endif - - return graph; -} - - -void FlowGraphBuilder::VisitDeclaration(Declaration* decl) { - Variable* var = decl->proxy()->AsVariable(); - Slot* slot = var->slot(); - // We allow only declarations that do not require code generation. - // The following all require code generation: global variables and - // functions, variables with slot type LOOKUP, declarations with - // mode CONST, and functions. - - if (var->is_global() || - (slot != NULL && slot->type() == Slot::LOOKUP) || - decl->mode() == Variable::CONST || - decl->fun() != NULL) { - // Here and in the rest of the flow graph builder we indicate an - // unsupported syntactic construct by setting the stack overflow - // flag on the visitor. This causes bailout of the visitor. - SetStackOverflow(); - } -} - - -void FlowGraphBuilder::VisitBlock(Block* stmt) { - VisitStatements(stmt->statements()); -} - - -void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) { - Visit(stmt->expression()); -} - - -void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { - // Nothing to do. -} - - -void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) { - // Build a diamond in the flow graph. First accumulate the instructions - // of the test in the current basic block. - Visit(stmt->condition()); - - // Remember the branch node and accumulate the true branch as its left - // successor. This relies on the successors being added left to right. - BasicBlock* branch = current_; - current_ = new BasicBlock(branch); - Visit(stmt->then_statement()); - - // Construct a join node and then accumulate the false branch in a fresh - // successor of the branch node. - BasicBlock* join = new BasicBlock(current_); - current_ = new BasicBlock(branch); - Visit(stmt->else_statement()); - join->AddPredecessor(current_); - - current_ = join; -} - - -void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { - // Build a loop in the flow graph. First accumulate the instructions of - // the initializer in the current basic block. - if (stmt->init() != NULL) Visit(stmt->init()); - - // Create a new basic block for the test. This will be the join node. - BasicBlock* join = new BasicBlock(current_); - current_ = join; - if (stmt->cond() != NULL) Visit(stmt->cond()); - - // The current node is the branch node. Create a new basic block to begin - // the body. - BasicBlock* branch = current_; - current_ = new BasicBlock(branch); - Visit(stmt->body()); - if (stmt->next() != NULL) Visit(stmt->next()); - - // Add the backward edge from the end of the body and continue with the - // false arm of the branch. - join->AddPredecessor(current_); - current_ = new BasicBlock(branch); -} - - -void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitConditional(Conditional* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitSlot(Slot* expr) { - // Slots do not appear in the AST. - UNREACHABLE(); -} - - -void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) { - current_->AddInstruction(expr); -} - - -void FlowGraphBuilder::VisitLiteral(Literal* expr) { - current_->AddInstruction(expr); -} - - -void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitAssignment(Assignment* expr) { - // There are three basic kinds of assignment: variable assignments, - // property assignments, and invalid left-hand sides (which are translated - // to "throw ReferenceError" by the parser). - Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - Property* prop = expr->target()->AsProperty(); - ASSERT(var == NULL || prop == NULL); - if (var != NULL) { - if (expr->is_compound() && !expr->target()->IsTrivial()) { - Visit(expr->target()); - } - if (!expr->value()->IsTrivial()) Visit(expr->value()); - current_->AddInstruction(expr); - - } else if (prop != NULL) { - if (!prop->obj()->IsTrivial()) Visit(prop->obj()); - if (!prop->key()->IsPropertyName() && !prop->key()->IsTrivial()) { - Visit(prop->key()); - } - if (!expr->value()->IsTrivial()) Visit(expr->value()); - current_->AddInstruction(expr); - - } else { - Visit(expr->target()); - } -} - - -void FlowGraphBuilder::VisitThrow(Throw* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitProperty(Property* expr) { - if (!expr->obj()->IsTrivial()) Visit(expr->obj()); - if (!expr->key()->IsPropertyName() && !expr->key()->IsTrivial()) { - Visit(expr->key()); - } - current_->AddInstruction(expr); -} - - -void FlowGraphBuilder::VisitCall(Call* expr) { - Visit(expr->expression()); - VisitExpressions(expr->arguments()); - current_->AddInstruction(expr); -} - - -void FlowGraphBuilder::VisitCallNew(CallNew* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) { - SetStackOverflow(); -} - - -void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { - switch (expr->op()) { - case Token::NOT: - case Token::BIT_NOT: - case Token::DELETE: - case Token::TYPEOF: - case Token::VOID: - SetStackOverflow(); - break; - - case Token::ADD: - case Token::SUB: - Visit(expr->expression()); - current_->AddInstruction(expr); - break; - - default: - UNREACHABLE(); - } -} - - -void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) { - Visit(expr->expression()); - current_->AddInstruction(expr); -} - - -void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { - switch (expr->op()) { - case Token::COMMA: - case Token::OR: - case Token::AND: - SetStackOverflow(); - break; - - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SHL: - case Token::SAR: - case Token::SHR: - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - if (!expr->left()->IsTrivial()) Visit(expr->left()); - if (!expr->right()->IsTrivial()) Visit(expr->right()); - current_->AddInstruction(expr); - break; - - default: - UNREACHABLE(); - } -} - - -void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) { - switch (expr->op()) { - case Token::EQ: - case Token::NE: - case Token::EQ_STRICT: - case Token::NE_STRICT: - case Token::INSTANCEOF: - case Token::IN: - SetStackOverflow(); - break; - - case Token::LT: - case Token::GT: - case Token::LTE: - case Token::GTE: - if (!expr->left()->IsTrivial()) Visit(expr->left()); - if (!expr->right()->IsTrivial()) Visit(expr->right()); - current_->AddInstruction(expr); - break; - - default: - UNREACHABLE(); - } -} - - -void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) { - SetStackOverflow(); -} - - -#ifdef DEBUG - -// Print a textual representation of an instruction in a flow graph. -class InstructionPrinter: public AstVisitor { - public: - InstructionPrinter() {} - - private: - // Overridden from the base class. - virtual void VisitExpressions(ZoneList* exprs); - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - DISALLOW_COPY_AND_ASSIGN(InstructionPrinter); -}; - - -static void PrintSubexpression(Expression* expr) { - if (!expr->IsTrivial()) { - PrintF("@%d", expr->num()); - } else if (expr->AsLiteral() != NULL) { - expr->AsLiteral()->handle()->Print(); - } else if (expr->AsVariableProxy() != NULL) { - PrintF("%s", *expr->AsVariableProxy()->name()->ToCString()); - } else { - UNREACHABLE(); - } -} - - -void InstructionPrinter::VisitExpressions(ZoneList* exprs) { - for (int i = 0; i < exprs->length(); ++i) { - if (i != 0) PrintF(", "); - PrintF("@%d", exprs->at(i)->num()); - } -} - - -// We only define printing functions for the node types that can occur as -// instructions in a flow graph. The rest are unreachable. -void InstructionPrinter::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitBlock(Block* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitExpressionStatement(ExpressionStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitEmptyStatement(EmptyStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitIfStatement(IfStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitContinueStatement(ContinueStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitBreakStatement(BreakStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitReturnStatement(ReturnStatement* stmt) { - PrintF("return "); - PrintSubexpression(stmt->expression()); -} - - -void InstructionPrinter::VisitWithEnterStatement(WithEnterStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitWithExitStatement(WithExitStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitSwitchStatement(SwitchStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitDoWhileStatement(DoWhileStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitWhileStatement(WhileStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitForStatement(ForStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitForInStatement(ForInStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitTryCatchStatement(TryCatchStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitTryFinallyStatement(TryFinallyStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitDebuggerStatement(DebuggerStatement* stmt) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitConditional(Conditional* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitVariableProxy(VariableProxy* expr) { - Variable* var = expr->AsVariable(); - if (var != NULL) { - PrintF("%s", *var->name()->ToCString()); - } else { - ASSERT(expr->AsProperty() != NULL); - Visit(expr->AsProperty()); - } -} - - -void InstructionPrinter::VisitLiteral(Literal* expr) { - expr->handle()->Print(); -} - - -void InstructionPrinter::VisitRegExpLiteral(RegExpLiteral* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitObjectLiteral(ObjectLiteral* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitArrayLiteral(ArrayLiteral* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitAssignment(Assignment* expr) { - Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - Property* prop = expr->target()->AsProperty(); - - // Print the left-hand side. - Visit(expr->target()); - if (var == NULL && prop == NULL) return; // Throw reference error. - PrintF(" = "); - // For compound assignments, print the left-hand side again and the - // corresponding binary operator. - if (expr->is_compound()) { - PrintSubexpression(expr->target()); - PrintF(" %s ", Token::String(expr->binary_op())); - } - - // Print the right-hand side. - PrintSubexpression(expr->value()); -} - - -void InstructionPrinter::VisitThrow(Throw* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitProperty(Property* expr) { - PrintSubexpression(expr->obj()); - if (expr->key()->IsPropertyName()) { - PrintF("."); - ASSERT(expr->key()->AsLiteral() != NULL); - expr->key()->AsLiteral()->handle()->Print(); - } else { - PrintF("["); - PrintSubexpression(expr->key()); - PrintF("]"); - } -} - - -void InstructionPrinter::VisitCall(Call* expr) { - PrintF("@%d(", expr->expression()->num()); - VisitExpressions(expr->arguments()); - PrintF(")"); -} - - -void InstructionPrinter::VisitCallNew(CallNew* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitCallRuntime(CallRuntime* expr) { - UNREACHABLE(); -} - - -void InstructionPrinter::VisitUnaryOperation(UnaryOperation* expr) { - PrintF("%s(@%d)", Token::String(expr->op()), expr->expression()->num()); -} - - -void InstructionPrinter::VisitCountOperation(CountOperation* expr) { - if (expr->is_prefix()) { - PrintF("%s@%d", Token::String(expr->op()), expr->expression()->num()); - } else { - PrintF("@%d%s", expr->expression()->num(), Token::String(expr->op())); - } -} - - -void InstructionPrinter::VisitBinaryOperation(BinaryOperation* expr) { - PrintSubexpression(expr->left()); - PrintF(" %s ", Token::String(expr->op())); - PrintSubexpression(expr->right()); -} - - -void InstructionPrinter::VisitCompareOperation(CompareOperation* expr) { - PrintSubexpression(expr->left()); - PrintF(" %s ", Token::String(expr->op())); - PrintSubexpression(expr->right()); -} - - -void InstructionPrinter::VisitThisFunction(ThisFunction* expr) { - UNREACHABLE(); -} - - -int BasicBlock::PrintAsText(int instruction_number) { - // Print a label for all blocks except the entry. - if (HasPredecessor()) { - PrintF("L%d:", number()); - } - - // Number and print the instructions. Since AST child nodes are visited - // before their parents, the parent nodes can refer to them by number. - InstructionPrinter printer; - for (int i = 0; i < instructions_.length(); ++i) { - PrintF("\n%d ", instruction_number); - instructions_[i]->set_num(instruction_number++); - instructions_[i]->Accept(&printer); - } - - // If this is the exit, print "exit". If there is a single successor, - // print "goto" successor on a separate line. If there are two - // successors, print "goto" successor on the same line as the last - // instruction in the block. There is a blank line between blocks (and - // after the last one). - if (left_successor_ == NULL) { - PrintF("\nexit\n\n"); - } else if (right_successor_ == NULL) { - PrintF("\ngoto L%d\n\n", left_successor_->number()); - } else { - PrintF(", goto (L%d, L%d)\n\n", - left_successor_->number(), - right_successor_->number()); - } - - return instruction_number; -} - - -void FlowGraph::PrintAsText(Handle name) { - PrintF("\n==== name = \"%s\" ====\n", *name->ToCString()); - // Print nodes in reverse postorder. Note that AST node numbers are used - // during printing of instructions and thus their current values are - // destroyed. - int number = 0; - for (int i = postorder_.length() - 1; i >= 0; --i) { - number = postorder_[i]->PrintAsText(number); - } -} - -#endif // DEBUG - - -} } // namespace v8::internal diff --git a/deps/v8/src/flow-graph.h b/deps/v8/src/flow-graph.h deleted file mode 100644 index f6af8410ae..0000000000 --- a/deps/v8/src/flow-graph.h +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2010 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_FLOW_GRAPH_H_ -#define V8_FLOW_GRAPH_H_ - -#include "v8.h" - -#include "data-flow.h" -#include "zone.h" - -namespace v8 { -namespace internal { - -// The nodes of a flow graph are basic blocks. Basic blocks consist of -// instructions represented as pointers to AST nodes in the order that they -// would be visited by the code generator. A block can have arbitrarily many -// (even zero) predecessors and up to two successors. Blocks with multiple -// predecessors are "join nodes" and blocks with multiple successors are -// "branch nodes". A block can be both a branch and a join node. -// -// Flow graphs are in edge split form: a branch node is never the -// predecessor of a merge node. Empty basic blocks are inserted to maintain -// edge split form. -class BasicBlock: public ZoneObject { - public: - // Construct a basic block with a given predecessor. NULL indicates no - // predecessor or that the predecessor will be set later. - explicit BasicBlock(BasicBlock* predecessor) - : predecessors_(2), - instructions_(8), - left_successor_(NULL), - right_successor_(NULL), - mark_(false) { - if (predecessor != NULL) AddPredecessor(predecessor); - } - - bool HasPredecessor() { return !predecessors_.is_empty(); } - bool HasSuccessor() { return left_successor_ != NULL; } - - // Add a given basic block as a predecessor of this block. This function - // also adds this block as a successor of the given block. - void AddPredecessor(BasicBlock* predecessor) { - ASSERT(predecessor != NULL); - predecessors_.Add(predecessor); - predecessor->AddSuccessor(this); - } - - // Add an instruction to the end of this block. The block must be "open" - // by not having a successor yet. - void AddInstruction(AstNode* instruction) { - ASSERT(!HasSuccessor() && instruction != NULL); - instructions_.Add(instruction); - } - - // Perform a depth-first traversal of graph rooted at this node, - // accumulating pre- and postorder traversal orders. Visited nodes are - // marked with mark. - void BuildTraversalOrder(ZoneList* preorder, - ZoneList* postorder, - bool mark); - bool GetMark() { return mark_; } - -#ifdef DEBUG - // In debug mode, blocks are numbered in reverse postorder to help with - // printing. - int number() { return number_; } - void set_number(int n) { number_ = n; } - - // Print a basic block, given the number of the first instruction. - // Returns the next number after the number of the last instruction. - int PrintAsText(int instruction_number); -#endif - - private: - // Add a given basic block as successor to this block. This function does - // not add this block as a predecessor of the given block so as to avoid - // circularity. - void AddSuccessor(BasicBlock* successor) { - ASSERT(right_successor_ == NULL && successor != NULL); - if (HasSuccessor()) { - right_successor_ = successor; - } else { - left_successor_ = successor; - } - } - - ZoneList predecessors_; - ZoneList instructions_; - BasicBlock* left_successor_; - BasicBlock* right_successor_; - - // Support for graph traversal. Before traversal, all nodes in the graph - // have the same mark (true or false). Traversal marks already-visited - // nodes with the opposite mark. After traversal, all nodes again have - // the same mark. Traversal of the same graph is not reentrant. - bool mark_; - -#ifdef DEBUG - int number_; -#endif - - DISALLOW_COPY_AND_ASSIGN(BasicBlock); -}; - - -// A flow graph has distinguished entry and exit blocks. The entry block is -// the only one with no predecessors and the exit block is the only one with -// no successors. -class FlowGraph: public ZoneObject { - public: - FlowGraph(BasicBlock* entry, BasicBlock* exit) - : entry_(entry), exit_(exit), preorder_(8), postorder_(8) { - } - - ZoneList* preorder() { return &preorder_; } - ZoneList* postorder() { return &postorder_; } - -#ifdef DEBUG - void PrintAsText(Handle name); -#endif - - private: - BasicBlock* entry_; - BasicBlock* exit_; - ZoneList preorder_; - ZoneList postorder_; -}; - - -// The flow graph builder walks the AST adding reachable AST nodes to the -// flow graph as instructions. It remembers the entry and exit nodes of the -// graph, and keeps a pointer to the current block being constructed. -class FlowGraphBuilder: public AstVisitor { - public: - FlowGraphBuilder() {} - - FlowGraph* Build(FunctionLiteral* lit); - - private: - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - BasicBlock* entry_; - BasicBlock* exit_; - BasicBlock* current_; - - DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder); -}; - - -} } // namespace v8::internal - -#endif // V8_FLOW_GRAPH_H_ diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h index 7221851325..78bb646c78 100644 --- a/deps/v8/src/frames-inl.h +++ b/deps/v8/src/frames-inl.h @@ -64,9 +64,8 @@ inline bool StackHandler::includes(Address address) const { } -inline void StackHandler::Iterate(ObjectVisitor* v) const { - // Stack handlers do not contain any pointers that need to be - // traversed. +inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const { + StackFrame::IteratePc(v, pc_address(), holder); } @@ -81,15 +80,9 @@ inline StackHandler::State StackHandler::state() const { } -inline Address StackHandler::pc() const { +inline Address* StackHandler::pc_address() const { const int offset = StackHandlerConstants::kPCOffset; - return Memory::Address_at(address() + offset); -} - - -inline void StackHandler::set_pc(Address value) { - const int offset = StackHandlerConstants::kPCOffset; - Memory::Address_at(address() + offset) = value; + return reinterpret_cast(address() + offset); } diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index bdd5100ed8..76a441b64d 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -36,6 +36,11 @@ namespace v8 { namespace internal { +PcToCodeCache::PcToCodeCacheEntry + PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize]; + +int SafeStackFrameIterator::active_count_ = 0; + // Iterator that supports traversing the stack handlers of a // particular frame. Needs to know the top of the handler chain. class StackHandlerIterator BASE_EMBEDDED { @@ -88,7 +93,6 @@ StackFrameIterator::StackFrameIterator(bool use_top, Address fp, Address sp) if (use_top || fp != NULL) { Reset(); } - JavaScriptFrame_.DisableHeapAccess(); } #undef INITIALIZE_SINGLETON @@ -201,7 +205,7 @@ bool StackTraceFrameIterator::IsValidFrame() { SafeStackFrameIterator::SafeStackFrameIterator( Address fp, Address sp, Address low_bound, Address high_bound) : - low_bound_(low_bound), high_bound_(high_bound), + maintainer_(), low_bound_(low_bound), high_bound_(high_bound), is_valid_top_( IsWithinBounds(low_bound, high_bound, Top::c_entry_fp(Top::GetCurrentThread())) && @@ -302,69 +306,42 @@ void SafeStackTraceFrameIterator::Advance() { #endif -// ------------------------------------------------------------------------- - - -void StackHandler::Cook(Code* code) { - ASSERT(code->contains(pc())); - set_pc(AddressFrom
(pc() - code->instruction_start())); -} - - -void StackHandler::Uncook(Code* code) { - set_pc(code->instruction_start() + OffsetFrom(pc())); - ASSERT(code->contains(pc())); -} - - -// ------------------------------------------------------------------------- - - bool StackFrame::HasHandler() const { StackHandlerIterator it(this, top_handler()); return !it.done(); } - -void StackFrame::CookFramesForThread(ThreadLocalTop* thread) { - ASSERT(!thread->stack_is_cooked()); - for (StackFrameIterator it(thread); !it.done(); it.Advance()) { - it.frame()->Cook(); +void StackFrame::IteratePc(ObjectVisitor* v, + Address* pc_address, + Code* holder) { + Address pc = *pc_address; + ASSERT(holder->contains(pc)); + unsigned pc_offset = static_cast(pc - holder->instruction_start()); + Object* code = holder; + v->VisitPointer(&code); + if (code != holder) { + holder = reinterpret_cast(code); + pc = holder->instruction_start() + pc_offset; + *pc_address = pc; } - thread->set_stack_is_cooked(true); } -void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) { - ASSERT(thread->stack_is_cooked()); - for (StackFrameIterator it(thread); !it.done(); it.Advance()) { - it.frame()->Uncook(); +StackFrame::Type StackFrame::ComputeType(State* state) { + ASSERT(state->fp != NULL); + if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) { + return ARGUMENTS_ADAPTOR; } - thread->set_stack_is_cooked(false); + // The marker and function offsets overlap. If the marker isn't a + // smi then the frame is a JavaScript frame -- and the marker is + // really the function. + const int offset = StandardFrameConstants::kMarkerOffset; + Object* marker = Memory::Object_at(state->fp + offset); + if (!marker->IsSmi()) return JAVA_SCRIPT; + return static_cast(Smi::cast(marker)->value()); } -void StackFrame::Cook() { - Code* code = this->code(); - ASSERT(code->IsCode()); - for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { - it.handler()->Cook(code); - } - ASSERT(code->contains(pc())); - set_pc(AddressFrom
(pc() - code->instruction_start())); -} - - -void StackFrame::Uncook() { - Code* code = this->code(); - ASSERT(code->IsCode()); - for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { - it.handler()->Uncook(code); - } - set_pc(code->instruction_start() + OffsetFrom(pc())); - ASSERT(code->contains(pc())); -} - StackFrame::Type StackFrame::GetCallerState(State* state) const { ComputeCallerState(state); @@ -372,8 +349,8 @@ StackFrame::Type StackFrame::GetCallerState(State* state) const { } -Code* EntryFrame::code() const { - return Heap::js_entry_code(); +Code* EntryFrame::unchecked_code() const { + return Heap::raw_unchecked_js_entry_code(); } @@ -395,8 +372,8 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const { } -Code* EntryConstructFrame::code() const { - return Heap::js_construct_entry_code(); +Code* EntryConstructFrame::unchecked_code() const { + return Heap::raw_unchecked_js_construct_entry_code(); } @@ -406,8 +383,8 @@ Object*& ExitFrame::code_slot() const { } -Code* ExitFrame::code() const { - return Code::cast(code_slot()); +Code* ExitFrame::unchecked_code() const { + return reinterpret_cast(code_slot()); } @@ -425,6 +402,14 @@ void ExitFrame::SetCallerFp(Address caller_fp) { } +void ExitFrame::Iterate(ObjectVisitor* v) const { + // The arguments are traversed as part of the expression stack of + // the calling frame. + IteratePc(v, pc_address(), code()); + v->VisitPointer(&code_slot()); +} + + Address ExitFrame::GetCallerStackPointer() const { return fp() + ExitFrameConstants::kCallerSPDisplacement; } @@ -493,22 +478,65 @@ bool JavaScriptFrame::IsConstructor() const { } -Code* JavaScriptFrame::code() const { +Code* JavaScriptFrame::unchecked_code() const { JSFunction* function = JSFunction::cast(this->function()); - return function->shared()->code(); + return function->unchecked_code(); } -Code* ArgumentsAdaptorFrame::code() const { +int JavaScriptFrame::GetProvidedParametersCount() const { + return ComputeParametersCount(); +} + + +Address JavaScriptFrame::GetCallerStackPointer() const { + int arguments; + if (Heap::gc_state() != Heap::NOT_IN_GC || + SafeStackFrameIterator::is_active()) { + // If the we are currently iterating the safe stack the + // arguments for frames are traversed as if they were + // expression stack elements of the calling frame. The reason for + // this rather strange decision is that we cannot access the + // function during mark-compact GCs when objects may have been marked. + // In fact accessing heap objects (like function->shared() below) + // at all during GC is problematic. + arguments = 0; + } else { + // Compute the number of arguments by getting the number of formal + // parameters of the function. We must remember to take the + // receiver into account (+1). + JSFunction* function = JSFunction::cast(this->function()); + arguments = function->shared()->formal_parameter_count() + 1; + } + const int offset = StandardFrameConstants::kCallerSPOffset; + return fp() + offset + (arguments * kPointerSize); +} + + +Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { + const int arguments = Smi::cast(GetExpression(0))->value(); + const int offset = StandardFrameConstants::kCallerSPOffset; + return fp() + offset + (arguments + 1) * kPointerSize; +} + + +Address InternalFrame::GetCallerStackPointer() const { + // Internal frames have no arguments. The stack pointer of the + // caller is at a fixed offset from the frame pointer. + return fp() + StandardFrameConstants::kCallerSPOffset; +} + + +Code* ArgumentsAdaptorFrame::unchecked_code() const { return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline); } -Code* InternalFrame::code() const { +Code* InternalFrame::unchecked_code() const { const int offset = InternalFrameConstants::kCodeOffset; Object* code = Memory::Object_at(fp() + offset); ASSERT(code != NULL); - return Code::cast(code); + return reinterpret_cast(code); } @@ -694,13 +722,14 @@ void EntryFrame::Iterate(ObjectVisitor* v) const { ASSERT(!it.done()); StackHandler* handler = it.handler(); ASSERT(handler->is_entry()); - handler->Iterate(v); - // Make sure that there's the entry frame does not contain more than - // one stack handler. + handler->Iterate(v, code()); #ifdef DEBUG + // Make sure that the entry frame does not contain more than one + // stack handler. it.Advance(); ASSERT(it.done()); #endif + IteratePc(v, pc_address(), code()); } @@ -717,7 +746,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const { v->VisitPointers(base, reinterpret_cast(address)); base = reinterpret_cast(address + StackHandlerConstants::kSize); // Traverse the pointers in the handler itself. - handler->Iterate(v); + handler->Iterate(v, code()); } v->VisitPointers(base, limit); } @@ -725,6 +754,7 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const { void JavaScriptFrame::Iterate(ObjectVisitor* v) const { IterateExpressions(v); + IteratePc(v, pc_address(), code()); // Traverse callee-saved registers, receiver, and parameters. const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset; @@ -739,6 +769,7 @@ void InternalFrame::Iterate(ObjectVisitor* v) const { // Internal frames only have object pointers on the expression stack // as they never have any arguments. IterateExpressions(v); + IteratePc(v, pc_address(), code()); } @@ -760,6 +791,56 @@ JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) { // ------------------------------------------------------------------------- +Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) { + Code* code = reinterpret_cast(object); + ASSERT(code != NULL && code->contains(pc)); + return code; +} + + +Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) { + // Check if the pc points into a large object chunk. + LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc); + if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc); + + // Iterate through the 8K page until we reach the end or find an + // object starting after the pc. + Page* page = Page::FromAddress(pc); + HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction()); + HeapObject* previous = NULL; + while (true) { + HeapObject* next = iterator.next(); + if (next == NULL || next->address() >= pc) { + return GcSafeCastToCode(previous, pc); + } + previous = next; + } +} + +PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) { + Counters::pc_to_code.Increment(); + ASSERT(IsPowerOf2(kPcToCodeCacheSize)); + uint32_t hash = ComputeIntegerHash( + static_cast(reinterpret_cast(pc))); + uint32_t index = hash & (kPcToCodeCacheSize - 1); + PcToCodeCacheEntry* entry = cache(index); + if (entry->pc == pc) { + Counters::pc_to_code_cached.Increment(); + ASSERT(entry->code == GcSafeFindCodeForPc(pc)); + } else { + // Because this code may be interrupted by a profiling signal that + // also queries the cache, we cannot update pc before the code has + // been set. Otherwise, we risk trying to use a cache entry before + // the code has been computed. + entry->code = GcSafeFindCodeForPc(pc); + entry->pc = pc; + } + return entry; +} + + +// ------------------------------------------------------------------------- + int NumRegs(RegList reglist) { int n = 0; while (reglist != 0) { diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 102244c9ba..20111904f5 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -46,6 +46,32 @@ class Top; class ThreadLocalTop; +class PcToCodeCache : AllStatic { + public: + struct PcToCodeCacheEntry { + Address pc; + Code* code; + }; + + static PcToCodeCacheEntry* cache(int index) { + return &cache_[index]; + } + + static Code* GcSafeFindCodeForPc(Address pc); + static Code* GcSafeCastToCode(HeapObject* object, Address pc); + + static void FlushPcToCodeCache() { + memset(&cache_[0], 0, sizeof(cache_)); + } + + static PcToCodeCacheEntry* GetCacheEntry(Address pc); + + private: + static const int kPcToCodeCacheSize = 256; + static PcToCodeCacheEntry cache_[kPcToCodeCacheSize]; +}; + + class StackHandler BASE_EMBEDDED { public: enum State { @@ -64,7 +90,7 @@ class StackHandler BASE_EMBEDDED { inline bool includes(Address address) const; // Garbage collection support. - inline void Iterate(ObjectVisitor* v) const; + inline void Iterate(ObjectVisitor* v, Code* holder) const; // Conversion support. static inline StackHandler* FromAddress(Address address); @@ -74,16 +100,11 @@ class StackHandler BASE_EMBEDDED { bool is_try_catch() { return state() == TRY_CATCH; } bool is_try_finally() { return state() == TRY_FINALLY; } - // Garbage collection support. - void Cook(Code* code); - void Uncook(Code* code); - private: // Accessors. inline State state() const; - inline Address pc() const; - inline void set_pc(Address value); + inline Address* pc_address() const; DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler); }; @@ -112,7 +133,13 @@ class StackFrame BASE_EMBEDDED { // Opaque data type for identifying stack frames. Used extensively // by the debugger. - enum Id { NO_ID = 0 }; + // ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type + // has correct value range (see Issue 830 for more details). + enum Id { + ID_MIN_VALUE = kMinInt, + ID_MAX_VALUE = kMaxInt, + NO_ID = 0 + }; // Copy constructor; it breaks the connection to host iterator. StackFrame(const StackFrame& original) { @@ -152,13 +179,20 @@ class StackFrame BASE_EMBEDDED { virtual Type type() const = 0; // Get the code associated with this frame. - virtual Code* code() const = 0; + // This method could be called during marking phase of GC. + virtual Code* unchecked_code() const = 0; - // Garbage collection support. - static void CookFramesForThread(ThreadLocalTop* thread); - static void UncookFramesForThread(ThreadLocalTop* thread); + // Get the code associated with this frame. + Code* code() const { return GetContainingCode(pc()); } + + // Get the code object that contains the given pc. + Code* GetContainingCode(Address pc) const { + return PcToCodeCache::GetCacheEntry(pc)->code; + } + + virtual void Iterate(ObjectVisitor* v) const = 0; + static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder); - virtual void Iterate(ObjectVisitor* v) const { } // Printing support. enum PrintMode { OVERVIEW, DETAILS }; @@ -200,10 +234,6 @@ class StackFrame BASE_EMBEDDED { // Get the type and the state of the calling frame. virtual Type GetCallerState(State* state) const; - // Cooking/uncooking support. - void Cook(); - void Uncook(); - friend class StackFrameIterator; friend class StackHandlerIterator; friend class SafeStackFrameIterator; @@ -218,7 +248,7 @@ class EntryFrame: public StackFrame { public: virtual Type type() const { return ENTRY; } - virtual Code* code() const; + virtual Code* unchecked_code() const; // Garbage collection support. virtual void Iterate(ObjectVisitor* v) const; @@ -249,7 +279,7 @@ class EntryConstructFrame: public EntryFrame { public: virtual Type type() const { return ENTRY_CONSTRUCT; } - virtual Code* code() const; + virtual Code* unchecked_code() const; static EntryConstructFrame* cast(StackFrame* frame) { ASSERT(frame->is_entry_construct()); @@ -268,10 +298,9 @@ class EntryConstructFrame: public EntryFrame { // Exit frames are used to exit JavaScript execution and go to C. class ExitFrame: public StackFrame { public: - enum Mode { MODE_NORMAL, MODE_DEBUG }; virtual Type type() const { return EXIT; } - virtual Code* code() const; + virtual Code* unchecked_code() const; Object*& code_slot() const; @@ -397,7 +426,7 @@ class JavaScriptFrame: public StandardFrame { int index) const; // Determine the code for the frame. - virtual Code* code() const; + virtual Code* unchecked_code() const; static JavaScriptFrame* cast(StackFrame* frame) { ASSERT(frame->is_java_script()); @@ -406,19 +435,11 @@ class JavaScriptFrame: public StandardFrame { protected: explicit JavaScriptFrame(StackFrameIterator* iterator) - : StandardFrame(iterator), disable_heap_access_(false) { } + : StandardFrame(iterator) { } virtual Address GetCallerStackPointer() const; - // When this mode is enabled it is not allowed to access heap objects. - // This is a special mode used when gathering stack samples in profiler. - // A shortcoming is that caller's SP value will be calculated incorrectly - // (see GetCallerStackPointer implementation), but it is not used for stack - // sampling. - void DisableHeapAccess() { disable_heap_access_ = true; } - private: - bool disable_heap_access_; inline Object* function_slot_object() const; friend class StackFrameIterator; @@ -433,7 +454,7 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame { virtual Type type() const { return ARGUMENTS_ADAPTOR; } // Determine the code for the frame. - virtual Code* code() const; + virtual Code* unchecked_code() const; static ArgumentsAdaptorFrame* cast(StackFrame* frame) { ASSERT(frame->is_arguments_adaptor()); @@ -463,7 +484,7 @@ class InternalFrame: public StandardFrame { virtual void Iterate(ObjectVisitor* v) const; // Determine the code for the frame. - virtual Code* code() const; + virtual Code* unchecked_code() const; static InternalFrame* cast(StackFrame* frame) { ASSERT(frame->is_internal()); @@ -625,6 +646,8 @@ class SafeStackFrameIterator BASE_EMBEDDED { void Advance(); void Reset(); + static bool is_active() { return active_count_ > 0; } + static bool IsWithinBounds( Address low_bound, Address high_bound, Address addr) { return low_bound <= addr && addr <= high_bound; @@ -638,6 +661,19 @@ class SafeStackFrameIterator BASE_EMBEDDED { bool IsValidFrame(StackFrame* frame) const; bool IsValidCaller(StackFrame* frame); + // This is a nasty hack to make sure the active count is incremented + // before the constructor for the embedded iterator is invoked. This + // is needed because the constructor will start looking at frames + // right away and we need to make sure it doesn't start inspecting + // heap objects. + class ActiveCountMaintainer BASE_EMBEDDED { + public: + ActiveCountMaintainer() { active_count_++; } + ~ActiveCountMaintainer() { active_count_--; } + }; + + ActiveCountMaintainer maintainer_; + static int active_count_; Address low_bound_; Address high_bound_; const bool is_valid_top_; diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index e97ed76072..5ffebfb53b 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -30,6 +30,7 @@ #include "codegen-inl.h" #include "compiler.h" #include "full-codegen.h" +#include "macro-assembler.h" #include "scopes.h" #include "stub-cache.h" #include "debug.h" @@ -38,407 +39,6 @@ namespace v8 { namespace internal { -#define BAILOUT(reason) \ - do { \ - if (FLAG_trace_bailout) { \ - PrintF("%s\n", reason); \ - } \ - has_supported_syntax_ = false; \ - return; \ - } while (false) - - -#define CHECK_BAILOUT \ - do { \ - if (!has_supported_syntax_) return; \ - } while (false) - - -void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) { - Scope* scope = fun->scope(); - VisitDeclarations(scope->declarations()); - CHECK_BAILOUT; - - VisitStatements(fun->body()); -} - - -void FullCodeGenSyntaxChecker::VisitDeclarations( - ZoneList* decls) { - for (int i = 0; i < decls->length(); i++) { - Visit(decls->at(i)); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitStatements(ZoneList* stmts) { - for (int i = 0, len = stmts->length(); i < len; i++) { - Visit(stmts->at(i)); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) { - Property* prop = decl->proxy()->AsProperty(); - if (prop != NULL) { - Visit(prop->obj()); - Visit(prop->key()); - } - - if (decl->fun() != NULL) { - Visit(decl->fun()); - } -} - - -void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) { - VisitStatements(stmt->statements()); -} - - -void FullCodeGenSyntaxChecker::VisitExpressionStatement( - ExpressionStatement* stmt) { - Visit(stmt->expression()); -} - - -void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) { - Visit(stmt->condition()); - CHECK_BAILOUT; - Visit(stmt->then_statement()); - CHECK_BAILOUT; - Visit(stmt->else_statement()); -} - - -void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) { - Visit(stmt->expression()); -} - - -void FullCodeGenSyntaxChecker::VisitWithEnterStatement( - WithEnterStatement* stmt) { - Visit(stmt->expression()); -} - - -void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) { - BAILOUT("SwitchStatement"); -} - - -void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) { - Visit(stmt->cond()); - CHECK_BAILOUT; - Visit(stmt->body()); -} - - -void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) { - Visit(stmt->cond()); - CHECK_BAILOUT; - Visit(stmt->body()); -} - - -void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) { - if (!FLAG_always_full_compiler) BAILOUT("ForStatement"); - if (stmt->init() != NULL) { - Visit(stmt->init()); - CHECK_BAILOUT; - } - if (stmt->cond() != NULL) { - Visit(stmt->cond()); - CHECK_BAILOUT; - } - Visit(stmt->body()); - if (stmt->next() != NULL) { - CHECK_BAILOUT; - Visit(stmt->next()); - } -} - - -void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) { - BAILOUT("ForInStatement"); -} - - -void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) { - Visit(stmt->try_block()); - CHECK_BAILOUT; - Visit(stmt->catch_block()); -} - - -void FullCodeGenSyntaxChecker::VisitTryFinallyStatement( - TryFinallyStatement* stmt) { - Visit(stmt->try_block()); - CHECK_BAILOUT; - Visit(stmt->finally_block()); -} - - -void FullCodeGenSyntaxChecker::VisitDebuggerStatement( - DebuggerStatement* stmt) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - BAILOUT("SharedFunctionInfoLiteral"); -} - - -void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) { - Visit(expr->condition()); - CHECK_BAILOUT; - Visit(expr->then_expression()); - CHECK_BAILOUT; - Visit(expr->else_expression()); -} - - -void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) { - UNREACHABLE(); -} - - -void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) { - // Supported. -} - - -void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) { - ZoneList* properties = expr->properties(); - - for (int i = 0, len = properties->length(); i < len; i++) { - ObjectLiteral::Property* property = properties->at(i); - if (property->IsCompileTimeValue()) continue; - Visit(property->key()); - CHECK_BAILOUT; - Visit(property->value()); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) { - ZoneList* subexprs = expr->values(); - for (int i = 0, len = subexprs->length(); i < len; i++) { - Expression* subexpr = subexprs->at(i); - if (subexpr->AsLiteral() != NULL) continue; - if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; - Visit(subexpr); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitCatchExtensionObject( - CatchExtensionObject* expr) { - Visit(expr->key()); - CHECK_BAILOUT; - Visit(expr->value()); -} - - -void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) { - Token::Value op = expr->op(); - if (op == Token::INIT_CONST) BAILOUT("initialize constant"); - - Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - Property* prop = expr->target()->AsProperty(); - ASSERT(var == NULL || prop == NULL); - if (var != NULL) { - if (var->mode() == Variable::CONST) BAILOUT("Assignment to const"); - // All other variables are supported. - } else if (prop != NULL) { - Visit(prop->obj()); - CHECK_BAILOUT; - Visit(prop->key()); - CHECK_BAILOUT; - } else { - // This is a throw reference error. - BAILOUT("non-variable/non-property assignment"); - } - - Visit(expr->value()); -} - - -void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) { - Visit(expr->exception()); -} - - -void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) { - Visit(expr->obj()); - CHECK_BAILOUT; - Visit(expr->key()); -} - - -void FullCodeGenSyntaxChecker::VisitCall(Call* expr) { - Expression* fun = expr->expression(); - ZoneList* args = expr->arguments(); - Variable* var = fun->AsVariableProxy()->AsVariable(); - - // Check for supported calls - if (var != NULL && var->is_possibly_eval()) { - BAILOUT("call to the identifier 'eval'"); - } else if (var != NULL && !var->is_this() && var->is_global()) { - // Calls to global variables are supported. - } else if (var != NULL && var->slot() != NULL && - var->slot()->type() == Slot::LOOKUP) { - BAILOUT("call to a lookup slot"); - } else if (fun->AsProperty() != NULL) { - Property* prop = fun->AsProperty(); - Visit(prop->obj()); - CHECK_BAILOUT; - Visit(prop->key()); - CHECK_BAILOUT; - } else { - // Otherwise the call is supported if the function expression is. - Visit(fun); - } - // Check all arguments to the call. - for (int i = 0; i < args->length(); i++) { - Visit(args->at(i)); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) { - Visit(expr->expression()); - CHECK_BAILOUT; - ZoneList* args = expr->arguments(); - // Check all arguments to the call - for (int i = 0; i < args->length(); i++) { - Visit(args->at(i)); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) { - // Check for inline runtime call - if (expr->name()->Get(0) == '_' && - CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) { - BAILOUT("inlined runtime call"); - } - // Check all arguments to the call. (Relies on TEMP meaning STACK.) - for (int i = 0; i < expr->arguments()->length(); i++) { - Visit(expr->arguments()->at(i)); - CHECK_BAILOUT; - } -} - - -void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) { - switch (expr->op()) { - case Token::ADD: - case Token::BIT_NOT: - case Token::NOT: - case Token::SUB: - case Token::TYPEOF: - case Token::VOID: - Visit(expr->expression()); - break; - case Token::DELETE: - BAILOUT("UnaryOperation: DELETE"); - default: - UNREACHABLE(); - } -} - - -void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) { - Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); - Property* prop = expr->expression()->AsProperty(); - ASSERT(var == NULL || prop == NULL); - if (var != NULL) { - // All global variables are supported. - if (!var->is_global()) { - ASSERT(var->slot() != NULL); - Slot::Type type = var->slot()->type(); - if (type == Slot::LOOKUP) { - BAILOUT("CountOperation with lookup slot"); - } - } - } else if (prop != NULL) { - Visit(prop->obj()); - CHECK_BAILOUT; - Visit(prop->key()); - CHECK_BAILOUT; - } else { - // This is a throw reference error. - BAILOUT("CountOperation non-variable/non-property expression"); - } -} - - -void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) { - Visit(expr->left()); - CHECK_BAILOUT; - Visit(expr->right()); -} - - -void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) { - Visit(expr->left()); - CHECK_BAILOUT; - Visit(expr->right()); -} - - -void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) { - // Supported. -} - -#undef BAILOUT -#undef CHECK_BAILOUT - - void BreakableStatementChecker::Check(Statement* stmt) { Visit(stmt); } @@ -616,6 +216,12 @@ void BreakableStatementChecker::VisitThrow(Throw* expr) { } +void BreakableStatementChecker::VisitIncrementOperation( + IncrementOperation* expr) { + UNREACHABLE(); +} + + void BreakableStatementChecker::VisitProperty(Property* expr) { // Property load is breakable. is_breakable_ = true; @@ -654,6 +260,11 @@ void BreakableStatementChecker::VisitBinaryOperation(BinaryOperation* expr) { } +void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) { + Visit(expr->expression()); +} + + void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) { Visit(expr->left()); Visit(expr->right()); @@ -707,6 +318,46 @@ int FullCodeGenerator::SlotOffset(Slot* slot) { } +bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) { + // TODO(kasperl): Once the compare stub allows leaving out the + // inlined smi case, we should get rid of this check. + if (Token::IsCompareOp(op)) return true; + // TODO(kasperl): Once the unary bit not stub allows leaving out + // the inlined smi case, we should get rid of this check. + if (op == Token::BIT_NOT) return true; + // Inline smi case inside loops, but not division and modulo which + // are too complicated and take up too much space. + return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0); +} + + +void FullCodeGenerator::PrepareTest(Label* materialize_true, + Label* materialize_false, + Label** if_true, + Label** if_false, + Label** fall_through) { + switch (context_) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + // In an effect context, the true and the false case branch to the + // same label. + *if_true = *if_false = *fall_through = materialize_true; + break; + case Expression::kValue: + *if_true = *fall_through = materialize_true; + *if_false = materialize_false; + break; + case Expression::kTest: + *if_true = true_label_; + *if_false = false_label_; + *fall_through = fall_through_; + break; + } +} + + void FullCodeGenerator::VisitDeclarations( ZoneList* declarations) { int length = declarations->length(); @@ -851,79 +502,81 @@ void FullCodeGenerator::SetSourcePosition(int pos) { void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { Handle name = expr->name(); - if (strcmp("_IsSmi", *name->ToCString()) == 0) { - EmitIsSmi(expr->arguments()); - } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) { - EmitIsNonNegativeSmi(expr->arguments()); - } else if (strcmp("_IsObject", *name->ToCString()) == 0) { - EmitIsObject(expr->arguments()); - } else if (strcmp("_IsSpecObject", *name->ToCString()) == 0) { - EmitIsSpecObject(expr->arguments()); - } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) { - EmitIsUndetectableObject(expr->arguments()); - } else if (strcmp("_IsFunction", *name->ToCString()) == 0) { - EmitIsFunction(expr->arguments()); - } else if (strcmp("_IsArray", *name->ToCString()) == 0) { - EmitIsArray(expr->arguments()); - } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) { - EmitIsRegExp(expr->arguments()); - } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) { - EmitIsConstructCall(expr->arguments()); - } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) { - EmitObjectEquals(expr->arguments()); - } else if (strcmp("_Arguments", *name->ToCString()) == 0) { - EmitArguments(expr->arguments()); - } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) { - EmitArgumentsLength(expr->arguments()); - } else if (strcmp("_ClassOf", *name->ToCString()) == 0) { - EmitClassOf(expr->arguments()); - } else if (strcmp("_Log", *name->ToCString()) == 0) { - EmitLog(expr->arguments()); - } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) { - EmitRandomHeapNumber(expr->arguments()); - } else if (strcmp("_SubString", *name->ToCString()) == 0) { - EmitSubString(expr->arguments()); - } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) { - EmitRegExpExec(expr->arguments()); - } else if (strcmp("_ValueOf", *name->ToCString()) == 0) { - EmitValueOf(expr->arguments()); - } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) { - EmitSetValueOf(expr->arguments()); - } else if (strcmp("_NumberToString", *name->ToCString()) == 0) { - EmitNumberToString(expr->arguments()); - } else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) { - EmitStringCharFromCode(expr->arguments()); - } else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) { - EmitStringCharCodeAt(expr->arguments()); - } else if (strcmp("_StringCharAt", *name->ToCString()) == 0) { - EmitStringCharAt(expr->arguments()); - } else if (strcmp("_StringAdd", *name->ToCString()) == 0) { - EmitStringAdd(expr->arguments()); - } else if (strcmp("_StringCompare", *name->ToCString()) == 0) { - EmitStringCompare(expr->arguments()); - } else if (strcmp("_MathPow", *name->ToCString()) == 0) { - EmitMathPow(expr->arguments()); - } else if (strcmp("_MathSin", *name->ToCString()) == 0) { - EmitMathSin(expr->arguments()); - } else if (strcmp("_MathCos", *name->ToCString()) == 0) { - EmitMathCos(expr->arguments()); - } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) { - EmitMathSqrt(expr->arguments()); - } else if (strcmp("_CallFunction", *name->ToCString()) == 0) { - EmitCallFunction(expr->arguments()); - } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) { - EmitRegExpConstructResult(expr->arguments()); - } else if (strcmp("_SwapElements", *name->ToCString()) == 0) { - EmitSwapElements(expr->arguments()); - } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) { - EmitGetFromCache(expr->arguments()); - } else if (strcmp("_IsRegExpEquivalent", *name->ToCString()) == 0) { - EmitIsRegExpEquivalent(expr->arguments()); - } else if (strcmp("_IsStringWrapperSafeForDefaultValueOf", - *name->ToCString()) == 0) { - EmitIsStringWrapperSafeForDefaultValueOf(expr->arguments()); - } else { - UNREACHABLE(); + SmartPointer cstring = name->ToCString(); + +#define CHECK_EMIT_INLINE_CALL(name, x, y) \ + if (strcmp("_"#name, *cstring) == 0) { \ + Emit##name(expr->arguments()); \ + return; \ + } + INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL) +#undef CHECK_EMIT_INLINE_CALL + UNREACHABLE(); +} + + +void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { + Comment cmnt(masm_, "[ BinaryOperation"); + Token::Value op = expr->op(); + Expression* left = expr->left(); + Expression* right = expr->right(); + + OverwriteMode mode = NO_OVERWRITE; + if (left->ResultOverwriteAllowed()) { + mode = OVERWRITE_LEFT; + } else if (right->ResultOverwriteAllowed()) { + mode = OVERWRITE_RIGHT; + } + + switch (op) { + case Token::COMMA: + VisitForEffect(left); + Visit(right); + break; + + case Token::OR: + case Token::AND: + EmitLogicalOperation(expr); + break; + + case Token::ADD: + case Token::SUB: + case Token::DIV: + case Token::MOD: + case Token::MUL: + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SHL: + case Token::SHR: + case Token::SAR: { + // Figure out if either of the operands is a constant. + ConstantOperand constant = ShouldInlineSmiCase(op) + ? GetConstantOperand(op, left, right) + : kNoConstants; + + // Load only the operands that we need to materialize. + if (constant == kNoConstants) { + VisitForValue(left, kStack); + VisitForValue(right, kAccumulator); + } else if (constant == kRightConstant) { + VisitForValue(left, kAccumulator); + } else { + ASSERT(constant == kLeftConstant); + VisitForValue(right, kAccumulator); + } + + SetSourcePosition(expr->position()); + if (ShouldInlineSmiCase(op)) { + EmitInlineSmiBinaryOp(expr, op, context_, mode, left, right, constant); + } else { + EmitBinaryOp(op, context_, mode); + } + break; + } + + default: + UNREACHABLE(); } } @@ -939,25 +592,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { case Expression::kUninitialized: UNREACHABLE(); case Expression::kEffect: - VisitForControl(expr->left(), &done, &eval_right); + VisitForControl(expr->left(), &done, &eval_right, &eval_right); break; case Expression::kValue: - VisitForValueControl(expr->left(), - location_, - &done, - &eval_right); + VisitLogicalForValue(expr->left(), expr->op(), location_, &done); break; case Expression::kTest: - VisitForControl(expr->left(), true_label_, &eval_right); - break; - case Expression::kValueTest: - VisitForValueControl(expr->left(), - location_, - true_label_, - &eval_right); - break; - case Expression::kTestValue: - VisitForControl(expr->left(), true_label_, &eval_right); + VisitForControl(expr->left(), true_label_, &eval_right, &eval_right); break; } } else { @@ -966,25 +607,13 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { case Expression::kUninitialized: UNREACHABLE(); case Expression::kEffect: - VisitForControl(expr->left(), &eval_right, &done); + VisitForControl(expr->left(), &eval_right, &done, &eval_right); break; case Expression::kValue: - VisitForControlValue(expr->left(), - location_, - &eval_right, - &done); + VisitLogicalForValue(expr->left(), expr->op(), location_, &done); break; case Expression::kTest: - VisitForControl(expr->left(), &eval_right, false_label_); - break; - case Expression::kValueTest: - VisitForControl(expr->left(), &eval_right, false_label_); - break; - case Expression::kTestValue: - VisitForControlValue(expr->left(), - location_, - &eval_right, - false_label_); + VisitForControl(expr->left(), &eval_right, false_label_, &eval_right); break; } } @@ -996,6 +625,43 @@ void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { } +void FullCodeGenerator::VisitLogicalForValue(Expression* expr, + Token::Value op, + Location where, + Label* done) { + ASSERT(op == Token::AND || op == Token::OR); + VisitForValue(expr, kAccumulator); + __ push(result_register()); + + Label discard; + switch (where) { + case kAccumulator: { + Label restore; + if (op == Token::OR) { + DoTest(&restore, &discard, &restore); + } else { + DoTest(&discard, &restore, &restore); + } + __ bind(&restore); + __ pop(result_register()); + __ jmp(done); + break; + } + case kStack: { + if (op == Token::OR) { + DoTest(done, &discard, &discard); + } else { + DoTest(&discard, done, &discard); + } + break; + } + } + + __ bind(&discard); + __ Drop(1); +} + + void FullCodeGenerator::VisitBlock(Block* stmt) { Comment cmnt(masm_, "[ Block"); Breakable nested_statement(this, stmt); @@ -1023,16 +689,19 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) { SetStatementPosition(stmt); Label then_part, else_part, done; - // Do not worry about optimizing for empty then or else bodies. - VisitForControl(stmt->condition(), &then_part, &else_part); - - __ bind(&then_part); - Visit(stmt->then_statement()); - __ jmp(&done); - - __ bind(&else_part); - Visit(stmt->else_statement()); + if (stmt->HasElseStatement()) { + VisitForControl(stmt->condition(), &then_part, &else_part, &then_part); + __ bind(&then_part); + Visit(stmt->then_statement()); + __ jmp(&done); + __ bind(&else_part); + Visit(stmt->else_statement()); + } else { + VisitForControl(stmt->condition(), &then_part, &done, &then_part); + __ bind(&then_part); + Visit(stmt->then_statement()); + } __ bind(&done); } @@ -1120,7 +789,7 @@ void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) { void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { Comment cmnt(masm_, "[ DoWhileStatement"); SetStatementPosition(stmt); - Label body, stack_limit_hit, stack_check_success; + Label body, stack_limit_hit, stack_check_success, done; Iteration loop_statement(this, stmt); increment_loop_depth(); @@ -1132,28 +801,31 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { __ StackLimitCheck(&stack_limit_hit); __ bind(&stack_check_success); + // Record the position of the do while condition and make sure it is + // possible to break on the condition. __ bind(loop_statement.continue_target()); - - // Record the position of the do while condition and make sure it is possible - // to break on the condition. SetExpressionPosition(stmt->cond(), stmt->condition_position()); + VisitForControl(stmt->cond(), + &body, + loop_statement.break_target(), + loop_statement.break_target()); - VisitForControl(stmt->cond(), &body, loop_statement.break_target()); + __ bind(loop_statement.break_target()); + __ jmp(&done); __ bind(&stack_limit_hit); StackCheckStub stack_stub; __ CallStub(&stack_stub); __ jmp(&stack_check_success); - __ bind(loop_statement.break_target()); - + __ bind(&done); decrement_loop_depth(); } void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { Comment cmnt(masm_, "[ WhileStatement"); - Label body, stack_limit_hit, stack_check_success; + Label body, stack_limit_hit, stack_check_success, done; Iteration loop_statement(this, stmt); increment_loop_depth(); @@ -1163,24 +835,30 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { __ bind(&body); Visit(stmt->body()); - __ bind(loop_statement.continue_target()); - // Emit the statement position here as this is where the while statement code - // starts. + + // Emit the statement position here as this is where the while + // statement code starts. SetStatementPosition(stmt); // Check stack before looping. __ StackLimitCheck(&stack_limit_hit); __ bind(&stack_check_success); - VisitForControl(stmt->cond(), &body, loop_statement.break_target()); + VisitForControl(stmt->cond(), + &body, + loop_statement.break_target(), + loop_statement.break_target()); + + __ bind(loop_statement.break_target()); + __ jmp(&done); __ bind(&stack_limit_hit); StackCheckStub stack_stub; __ CallStub(&stack_stub); __ jmp(&stack_check_success); - __ bind(loop_statement.break_target()); + __ bind(&done); decrement_loop_depth(); } @@ -1198,6 +876,11 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { // Emit the test at the bottom of the loop (even if empty). __ jmp(&test); + __ bind(&stack_limit_hit); + StackCheckStub stack_stub; + __ CallStub(&stack_stub); + __ jmp(&stack_check_success); + __ bind(&body); Visit(stmt->body()); @@ -1209,8 +892,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { } __ bind(&test); - // Emit the statement position here as this is where the for statement code - // starts. + // Emit the statement position here as this is where the for + // statement code starts. SetStatementPosition(stmt); // Check stack before looping. @@ -1218,16 +901,14 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { __ bind(&stack_check_success); if (stmt->cond() != NULL) { - VisitForControl(stmt->cond(), &body, loop_statement.break_target()); + VisitForControl(stmt->cond(), + &body, + loop_statement.break_target(), + loop_statement.break_target()); } else { __ jmp(&body); } - __ bind(&stack_limit_hit); - StackCheckStub stack_stub; - __ CallStub(&stack_stub); - __ jmp(&stack_check_success); - __ bind(loop_statement.break_target()); decrement_loop_depth(); } @@ -1354,7 +1035,7 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { void FullCodeGenerator::VisitConditional(Conditional* expr) { Comment cmnt(masm_, "[ Conditional"); Label true_case, false_case, done; - VisitForControl(expr->condition(), &true_case, &false_case); + VisitForControl(expr->condition(), &true_case, &false_case, &true_case); __ bind(&true_case); SetExpressionPosition(expr->then_expression(), @@ -1426,6 +1107,11 @@ void FullCodeGenerator::VisitThrow(Throw* expr) { } +void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) { + UNREACHABLE(); +} + + int FullCodeGenerator::TryFinally::Exit(int stack_depth) { // The macros used here must preserve the result register. __ Drop(stack_depth); @@ -1442,6 +1128,14 @@ int FullCodeGenerator::TryCatch::Exit(int stack_depth) { return 0; } + +void FullCodeGenerator::EmitRegExpCloneResult(ZoneList* args) { + ASSERT(args->length() == 1); + VisitForValue(args->at(0), kStack); + __ CallRuntime(Runtime::kRegExpCloneResult, 1); + Apply(context_, result_register()); +} + #undef __ diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 00f4c06e29..840c825014 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -36,29 +36,6 @@ namespace v8 { namespace internal { -class FullCodeGenSyntaxChecker: public AstVisitor { - public: - FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {} - - void Check(FunctionLiteral* fun); - - bool has_supported_syntax() { return has_supported_syntax_; } - - private: - void VisitDeclarations(ZoneList* decls); - void VisitStatements(ZoneList* stmts); - - // AST node visit functions. -#define DECLARE_VISIT(type) virtual void Visit##type(type* node); - AST_NODE_LIST(DECLARE_VISIT) -#undef DECLARE_VISIT - - bool has_supported_syntax_; - - DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker); -}; - - // AST node visitor which can tell whether a given statement will be breakable // when the code is compiled by the full compiler in the debugger. This means // that there will be an IC (load/store/call) in the code generated for the @@ -96,7 +73,8 @@ class FullCodeGenerator: public AstVisitor { loop_depth_(0), location_(kStack), true_label_(NULL), - false_label_(NULL) { + false_label_(NULL), + fall_through_(NULL) { } static Handle MakeCode(CompilationInfo* info); @@ -259,8 +237,25 @@ class FullCodeGenerator: public AstVisitor { kStack }; + enum ConstantOperand { + kNoConstants, + kLeftConstant, + kRightConstant + }; + + // Compute the frame pointer relative offset for a given local or + // parameter slot. int SlotOffset(Slot* slot); + // Determine whether or not to inline the smi case for the given + // operation. + bool ShouldInlineSmiCase(Token::Value op); + + // Compute which (if any) of the operands is a compile-time constant. + ConstantOperand GetConstantOperand(Token::Value op, + Expression* left, + Expression* right); + // Emit code to convert a pure value (in a register, slot, as a literal, // or on top of the stack) into the result expected according to an // expression context. @@ -281,7 +276,8 @@ class FullCodeGenerator: public AstVisitor { void PrepareTest(Label* materialize_true, Label* materialize_false, Label** if_true, - Label** if_false); + Label** if_false, + Label** fall_through); // Emit code to convert pure control flow to a pair of labels into the // result expected according to an expression context. @@ -296,7 +292,14 @@ class FullCodeGenerator: public AstVisitor { // Helper function to convert a pure value into a test context. The value // is expected on the stack or the accumulator, depending on the platform. // See the platform-specific implementation for details. - void DoTest(Expression::Context context); + void DoTest(Label* if_true, Label* if_false, Label* fall_through); + + // Helper function to split control flow and avoid a branch to the + // fall-through label if it is set up. + void Split(Condition cc, + Label* if_true, + Label* if_false, + Label* fall_through); void Move(Slot* dst, Register source, Register scratch1, Register scratch2); void Move(Register dst, Slot* source); @@ -323,60 +326,38 @@ class FullCodeGenerator: public AstVisitor { location_ = saved_location; } - void VisitForControl(Expression* expr, Label* if_true, Label* if_false) { + void VisitForControl(Expression* expr, + Label* if_true, + Label* if_false, + Label* fall_through) { Expression::Context saved_context = context_; Label* saved_true = true_label_; Label* saved_false = false_label_; + Label* saved_fall_through = fall_through_; context_ = Expression::kTest; true_label_ = if_true; false_label_ = if_false; + fall_through_ = fall_through; Visit(expr); context_ = saved_context; true_label_ = saved_true; false_label_ = saved_false; - } - - void VisitForValueControl(Expression* expr, - Location where, - Label* if_true, - Label* if_false) { - Expression::Context saved_context = context_; - Location saved_location = location_; - Label* saved_true = true_label_; - Label* saved_false = false_label_; - context_ = Expression::kValueTest; - location_ = where; - true_label_ = if_true; - false_label_ = if_false; - Visit(expr); - context_ = saved_context; - location_ = saved_location; - true_label_ = saved_true; - false_label_ = saved_false; - } - - void VisitForControlValue(Expression* expr, - Location where, - Label* if_true, - Label* if_false) { - Expression::Context saved_context = context_; - Location saved_location = location_; - Label* saved_true = true_label_; - Label* saved_false = false_label_; - context_ = Expression::kTestValue; - location_ = where; - true_label_ = if_true; - false_label_ = if_false; - Visit(expr); - context_ = saved_context; - location_ = saved_location; - true_label_ = saved_true; - false_label_ = saved_false; + fall_through_ = saved_fall_through; } void VisitDeclarations(ZoneList* declarations); void DeclareGlobals(Handle pairs); + // Try to perform a comparison as a fast inlined literal compare if + // the operands allow it. Returns true if the compare operations + // has been matched and all code generated; false otherwise. + bool TryLiteralCompare(Token::Value op, + Expression* left, + Expression* right, + Label* if_true, + Label* if_false, + Label* fall_through); + // Platform-specific code for a variable, constant, or function // declaration. Functions have an initial value. void EmitDeclaration(Variable* variable, @@ -391,45 +372,13 @@ class FullCodeGenerator: public AstVisitor { void EmitCallWithIC(Call* expr, Handle name, RelocInfo::Mode mode); void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode); - // Platform-specific code for inline runtime calls. void EmitInlineRuntimeCall(CallRuntime* expr); - void EmitIsSmi(ZoneList* arguments); - void EmitIsNonNegativeSmi(ZoneList* arguments); - void EmitIsObject(ZoneList* arguments); - void EmitIsSpecObject(ZoneList* arguments); - void EmitIsUndetectableObject(ZoneList* arguments); - void EmitIsFunction(ZoneList* arguments); - void EmitIsArray(ZoneList* arguments); - void EmitIsRegExp(ZoneList* arguments); - void EmitIsConstructCall(ZoneList* arguments); - void EmitIsStringWrapperSafeForDefaultValueOf( - ZoneList* arguments); - void EmitObjectEquals(ZoneList* arguments); - void EmitArguments(ZoneList* arguments); - void EmitArgumentsLength(ZoneList* arguments); - void EmitClassOf(ZoneList* arguments); - void EmitValueOf(ZoneList* arguments); - void EmitSetValueOf(ZoneList* arguments); - void EmitNumberToString(ZoneList* arguments); - void EmitStringCharFromCode(ZoneList* arguments); - void EmitStringCharCodeAt(ZoneList* arguments); - void EmitStringCharAt(ZoneList* arguments); - void EmitStringCompare(ZoneList* arguments); - void EmitStringAdd(ZoneList* arguments); - void EmitLog(ZoneList* arguments); - void EmitRandomHeapNumber(ZoneList* arguments); - void EmitSubString(ZoneList* arguments); - void EmitRegExpExec(ZoneList* arguments); - void EmitMathPow(ZoneList* arguments); - void EmitMathSin(ZoneList* arguments); - void EmitMathCos(ZoneList* arguments); - void EmitMathSqrt(ZoneList* arguments); - void EmitCallFunction(ZoneList* arguments); - void EmitRegExpConstructResult(ZoneList* arguments); - void EmitSwapElements(ZoneList* arguments); - void EmitGetFromCache(ZoneList* arguments); - void EmitIsRegExpEquivalent(ZoneList* arguments); + +#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \ + void Emit##name(ZoneList* arguments); + INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL) +#undef EMIT_INLINE_RUNTIME_CALL // Platform-specific code for loading variables. void EmitVariableLoad(Variable* expr, Expression::Context context); @@ -450,7 +399,50 @@ class FullCodeGenerator: public AstVisitor { // Apply the compound assignment operator. Expects the left operand on top // of the stack and the right one in the accumulator. - void EmitBinaryOp(Token::Value op, Expression::Context context); + void EmitBinaryOp(Token::Value op, + Expression::Context context, + OverwriteMode mode); + + // Helper functions for generating inlined smi code for certain + // binary operations. + void EmitInlineSmiBinaryOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Expression* left, + Expression* right, + ConstantOperand constant); + + void EmitConstantSmiBinaryOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value); + + void EmitConstantSmiBitOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Smi* value); + + void EmitConstantSmiShiftOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Smi* value); + + void EmitConstantSmiAdd(Expression* expr, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value); + + void EmitConstantSmiSub(Expression* expr, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value); // Assign to the given expression as if via '='. The right-hand-side value // is expected in the accumulator. @@ -471,14 +463,6 @@ class FullCodeGenerator: public AstVisitor { // accumulator. void EmitKeyedPropertyAssignment(Assignment* expr); - // Helper for compare operations. Expects the null-value in a register. - void EmitNullCompare(bool strict, - Register obj, - Register null_const, - Label* if_true, - Label* if_false, - Register scratch); - void SetFunctionPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); @@ -523,6 +507,14 @@ class FullCodeGenerator: public AstVisitor { // Handles the shortcutted logical binary operations in VisitBinaryOperation. void EmitLogicalOperation(BinaryOperation* expr); + void VisitForTypeofValue(Expression* expr, Location where); + + void VisitLogicalForValue(Expression* expr, + Token::Value op, + Location where, + Label* done); + + MacroAssembler* masm_; CompilationInfo* info_; @@ -534,6 +526,7 @@ class FullCodeGenerator: public AstVisitor { Location location_; Label* true_label_; Label* false_label_; + Label* fall_through_; friend class NestedStatement; diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc index 2d6a86a6f7..f12d026bdb 100644 --- a/deps/v8/src/func-name-inferrer.cc +++ b/deps/v8/src/func-name-inferrer.cc @@ -44,6 +44,20 @@ void FuncNameInferrer::PushEnclosingName(Handle name) { } +void FuncNameInferrer::PushLiteralName(Handle name) { + if (IsOpen() && !Heap::prototype_symbol()->Equals(*name)) { + names_stack_.Add(name); + } +} + + +void FuncNameInferrer::PushVariableName(Handle name) { + if (IsOpen() && !Heap::result_symbol()->Equals(*name)) { + names_stack_.Add(name); + } +} + + Handle FuncNameInferrer::MakeNameFromStack() { if (names_stack_.is_empty()) { return Factory::empty_string(); diff --git a/deps/v8/src/func-name-inferrer.h b/deps/v8/src/func-name-inferrer.h index e88586a445..a35034ecb5 100644 --- a/deps/v8/src/func-name-inferrer.h +++ b/deps/v8/src/func-name-inferrer.h @@ -36,11 +36,12 @@ namespace internal { // Inference is performed in cases when an anonymous function is assigned // to a variable or a property (see test-func-name-inference.cc for examples.) // -// The basic idea is that during AST traversal LHSs of expressions are -// always visited before RHSs. Thus, during visiting the LHS, a name can be -// collected, and during visiting the RHS, a function literal can be collected. -// Inference is performed while leaving the assignment node. -class FuncNameInferrer BASE_EMBEDDED { +// The basic idea is that during parsing of LHSs of certain expressions +// (assignments, declarations, object literals) we collect name strings, +// and during parsing of the RHS, a function literal can be collected. After +// parsing the RHS we can infer a name for function literals that do not have +// a name. +class FuncNameInferrer : public ZoneObject { public: FuncNameInferrer() : entries_stack_(10), @@ -61,11 +62,9 @@ class FuncNameInferrer BASE_EMBEDDED { } // Pushes an encountered name onto names stack when in collection state. - void PushName(Handle name) { - if (IsOpen()) { - names_stack_.Add(name); - } - } + void PushLiteralName(Handle name); + + void PushVariableName(Handle name); // Adds a function to infer name for. void AddFunction(FunctionLiteral* func_to_infer) { @@ -75,11 +74,16 @@ class FuncNameInferrer BASE_EMBEDDED { } // Infers a function name and leaves names collection state. - void InferAndLeave() { + void Infer() { ASSERT(IsOpen()); if (!funcs_to_infer_.is_empty()) { InferFunctionsNames(); } + } + + // Infers a function name and leaves names collection state. + void Leave() { + ASSERT(IsOpen()); names_stack_.Rewind(entries_stack_.RemoveLast()); } @@ -102,34 +106,6 @@ class FuncNameInferrer BASE_EMBEDDED { }; -// A wrapper class that automatically calls InferAndLeave when -// leaving scope. -class ScopedFuncNameInferrer BASE_EMBEDDED { - public: - explicit ScopedFuncNameInferrer(FuncNameInferrer* inferrer) - : inferrer_(inferrer), - is_entered_(false) {} - - ~ScopedFuncNameInferrer() { - if (is_entered_) { - inferrer_->InferAndLeave(); - } - } - - // Triggers the wrapped inferrer into name collection state. - void Enter() { - inferrer_->Enter(); - is_entered_ = true; - } - - private: - FuncNameInferrer* inferrer_; - bool is_entered_; - - DISALLOW_COPY_AND_ASSIGN(ScopedFuncNameInferrer); -}; - - } } // namespace v8::internal #endif // V8_FUNC_NAME_INFERRER_H_ diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 3fe9e240b5..f168d6eb14 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -244,10 +244,12 @@ const Address kHandleZapValue = reinterpret_cast
(V8_UINT64_C(0x1baddead0baddead)); const Address kFromSpaceZapValue = reinterpret_cast
(V8_UINT64_C(0x1beefdad0beefdad)); +const uint64_t kDebugZapValue = 0xbadbaddbbadbaddb; #else const Address kZapValue = reinterpret_cast
(0xdeadbeed); const Address kHandleZapValue = reinterpret_cast
(0xbaddead); const Address kFromSpaceZapValue = reinterpret_cast
(0xbeefdad); +const uint32_t kDebugZapValue = 0xbadbaddb; #endif @@ -662,7 +664,7 @@ F FUNCTION_CAST(Address addr) { #define TRACK_MEMORY(name) #endif -// define used for helping GCC to make better inlining. Don't bother for debug +// Define used for helping GCC to make better inlining. Don't bother for debug // builds. On GCC 3.4.5 using __attribute__((always_inline)) causes compilation // errors in debug build. #if defined(__GNUC__) && !defined(DEBUG) @@ -678,6 +680,14 @@ F FUNCTION_CAST(Address addr) { #define NO_INLINE(header) header #endif + +#if defined(__GNUC__) && __GNUC__ >= 4 +#define MUST_USE_RESULT __attribute__ ((warn_unused_result)) +#else +#define MUST_USE_RESULT +#endif + + // Feature flags bit positions. They are mostly based on the CPUID spec. // (We assign CPUID itself to one of the currently reserved bits -- // feel free to change this if needed.) diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 7b76e923fa..01464016be 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -773,6 +773,7 @@ bool CompileLazy(Handle function, ClearExceptionFlag flag) { if (function->shared()->is_compiled()) { function->set_code(function->shared()->code()); + function->shared()->set_code_age(0); return true; } else { CompilationInfo info(function, 0, receiver); @@ -788,6 +789,7 @@ bool CompileLazyInLoop(Handle function, ClearExceptionFlag flag) { if (function->shared()->is_compiled()) { function->set_code(function->shared()->code()); + function->shared()->set_code_age(0); return true; } else { CompilationInfo info(function, 1, receiver); diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 656c5546b2..0d1ad5ada9 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -28,7 +28,8 @@ #ifndef V8_HEAP_INL_H_ #define V8_HEAP_INL_H_ -#include "log.h" +#include "heap.h" +#include "objects.h" #include "v8-counters.h" namespace v8 { diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 7668bbc150..e47d66f984 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -280,10 +280,12 @@ void AggregatingRetainerTreePrinter::Call(const JSObjectsCluster& cluster, printer_->PrintRetainers(cluster, stream); } +} // namespace + // A helper class for building a retainers tree, that aggregates // all equivalent clusters. -class RetainerTreeAggregator BASE_EMBEDDED { +class RetainerTreeAggregator { public: explicit RetainerTreeAggregator(ClustersCoarser* coarser) : coarser_(coarser) {} @@ -311,8 +313,6 @@ void RetainerTreeAggregator::Call(const JSObjectsCluster& cluster, tree->ForEach(&retainers_aggregator); } -} // namespace - HeapProfiler* HeapProfiler::singleton_ = NULL; @@ -347,30 +347,46 @@ void HeapProfiler::TearDown() { #ifdef ENABLE_LOGGING_AND_PROFILING -HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) { +HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) { ASSERT(singleton_ != NULL); - return singleton_->TakeSnapshotImpl(name); + return singleton_->TakeSnapshotImpl(name, type); } -HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) { +HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) { ASSERT(singleton_ != NULL); - return singleton_->TakeSnapshotImpl(name); + return singleton_->TakeSnapshotImpl(name, type); } -HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) { +HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) { Heap::CollectAllGarbage(true); - HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++); - HeapSnapshotGenerator generator(result); - generator.GenerateSnapshot(); + HeapSnapshot::Type s_type = static_cast(type); + HeapSnapshot* result = + snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++); + switch (s_type) { + case HeapSnapshot::kFull: { + HeapSnapshotGenerator generator(result); + generator.GenerateSnapshot(); + break; + } + case HeapSnapshot::kAggregated: { + AggregatedHeapSnapshot agg_snapshot; + AggregatedHeapSnapshotGenerator generator(&agg_snapshot); + generator.GenerateSnapshot(); + generator.FillHeapSnapshot(result); + break; + } + default: + UNREACHABLE(); + } snapshots_->SnapshotGenerationFinished(); return result; } -HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name) { - return TakeSnapshotImpl(snapshots_->GetName(name)); +HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) { + return TakeSnapshotImpl(snapshots_->GetName(name), type); } @@ -433,16 +449,25 @@ static const char* GetConstructorName(const char* name) { } -void JSObjectsCluster::Print(StringStream* accumulator) const { - ASSERT(!is_null()); +const char* JSObjectsCluster::GetSpecialCaseName() const { if (constructor_ == FromSpecialCase(ROOTS)) { - accumulator->Add("(roots)"); + return "(roots)"; } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) { - accumulator->Add("(global property)"); + return "(global property)"; } else if (constructor_ == FromSpecialCase(CODE)) { - accumulator->Add("(code)"); + return "(code)"; } else if (constructor_ == FromSpecialCase(SELF)) { - accumulator->Add("(self)"); + return "(self)"; + } + return NULL; +} + + +void JSObjectsCluster::Print(StringStream* accumulator) const { + ASSERT(!is_null()); + const char* special_case_name = GetSpecialCaseName(); + if (special_case_name != NULL) { + accumulator->Add(special_case_name); } else { SmartPointer s_name( constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)); @@ -618,13 +643,19 @@ const JSObjectsRetainerTreeConfig::Value JSObjectsRetainerTreeConfig::kNoValue = RetainerHeapProfile::RetainerHeapProfile() - : zscope_(DELETE_ON_EXIT) { + : zscope_(DELETE_ON_EXIT), + aggregator_(NULL) { JSObjectsCluster roots(JSObjectsCluster::ROOTS); ReferencesExtractor extractor(roots, this); Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG); } +RetainerHeapProfile::~RetainerHeapProfile() { + delete aggregator_; +} + + void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster, HeapObject* ref) { JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref); @@ -646,18 +677,22 @@ void RetainerHeapProfile::CollectStats(HeapObject* obj) { } +void RetainerHeapProfile::CoarseAndAggregate() { + coarser_.Process(&retainers_tree_); + ASSERT(aggregator_ == NULL); + aggregator_ = new RetainerTreeAggregator(&coarser_); + aggregator_->Process(&retainers_tree_); +} + + void RetainerHeapProfile::DebugPrintStats( RetainerHeapProfile::Printer* printer) { - coarser_.Process(&retainers_tree_); // Print clusters that have no equivalents, aggregating their retainers. AggregatingRetainerTreePrinter agg_printer(&coarser_, printer); retainers_tree_.ForEach(&agg_printer); - // Now aggregate clusters that have equivalents... - RetainerTreeAggregator aggregator(&coarser_); - aggregator.Process(&retainers_tree_); - // ...and print them. + // Print clusters that have equivalents. SimpleRetainerTreePrinter s_printer(printer); - aggregator.output_tree().ForEach(&s_printer); + aggregator_->output_tree().ForEach(&s_printer); } @@ -670,16 +705,6 @@ void RetainerHeapProfile::PrintStats() { // // HeapProfiler class implementation. // -void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) { - InstanceType type = obj->map()->instance_type(); - ASSERT(0 <= type && type <= LAST_TYPE); - if (!FreeListNode::IsFreeListNode(obj)) { - info[type].increment_number(1); - info[type].increment_bytes(obj->Size()); - } -} - - static void StackWeakReferenceCallback(Persistent object, void* trace) { DeleteArray(static_cast(trace)); @@ -702,46 +727,339 @@ void HeapProfiler::WriteSample() { LOG(HeapSampleStats( "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects())); - HistogramInfo info[LAST_TYPE+1]; -#define DEF_TYPE_NAME(name) info[name].set_name(#name); - INSTANCE_TYPE_LIST(DEF_TYPE_NAME) -#undef DEF_TYPE_NAME + AggregatedHeapSnapshot snapshot; + AggregatedHeapSnapshotGenerator generator(&snapshot); + generator.GenerateSnapshot(); - ConstructorHeapProfile js_cons_profile; - RetainerHeapProfile js_retainer_profile; - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - CollectStats(obj, info); - js_cons_profile.CollectStats(obj); - js_retainer_profile.CollectStats(obj); + HistogramInfo* info = snapshot.info(); + for (int i = FIRST_NONSTRING_TYPE; + i <= AggregatedHeapSnapshotGenerator::kAllStringsType; + ++i) { + if (info[i].bytes() > 0) { + LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), + info[i].bytes())); + } } + snapshot.js_cons_profile()->PrintStats(); + snapshot.js_retainer_profile()->PrintStats(); + + GlobalHandles::IterateWeakRoots(PrintProducerStackTrace, + StackWeakReferenceCallback); + + LOG(HeapSampleEndEvent("Heap", "allocated")); +} + + +AggregatedHeapSnapshot::AggregatedHeapSnapshot() + : info_(NewArray( + AggregatedHeapSnapshotGenerator::kAllStringsType + 1)) { +#define DEF_TYPE_NAME(name) info_[name].set_name(#name); + INSTANCE_TYPE_LIST(DEF_TYPE_NAME); +#undef DEF_TYPE_NAME + info_[AggregatedHeapSnapshotGenerator::kAllStringsType].set_name( + "STRING_TYPE"); +} + + +AggregatedHeapSnapshot::~AggregatedHeapSnapshot() { + DeleteArray(info_); +} + + +AggregatedHeapSnapshotGenerator::AggregatedHeapSnapshotGenerator( + AggregatedHeapSnapshot* agg_snapshot) + : agg_snapshot_(agg_snapshot) { +} + + +void AggregatedHeapSnapshotGenerator::CalculateStringsStats() { + HistogramInfo* info = agg_snapshot_->info(); + HistogramInfo& strings = info[kAllStringsType]; // Lump all the string types together. - int string_number = 0; - int string_bytes = 0; #define INCREMENT_SIZE(type, size, name, camel_name) \ - string_number += info[type].number(); \ - string_bytes += info[type].bytes(); - STRING_TYPE_LIST(INCREMENT_SIZE) + strings.increment_number(info[type].number()); \ + strings.increment_bytes(info[type].bytes()); + STRING_TYPE_LIST(INCREMENT_SIZE); #undef INCREMENT_SIZE - if (string_bytes > 0) { - LOG(HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); +} + + +void AggregatedHeapSnapshotGenerator::CollectStats(HeapObject* obj) { + InstanceType type = obj->map()->instance_type(); + ASSERT(0 <= type && type <= LAST_TYPE); + if (!FreeListNode::IsFreeListNode(obj)) { + agg_snapshot_->info()[type].increment_number(1); + agg_snapshot_->info()[type].increment_bytes(obj->Size()); } +} - for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { - if (info[i].bytes() > 0) { - LOG(HeapSampleItemEvent(info[i].name(), info[i].number(), - info[i].bytes())); + +void AggregatedHeapSnapshotGenerator::GenerateSnapshot() { + HeapIterator iterator; + for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { + CollectStats(obj); + agg_snapshot_->js_cons_profile()->CollectStats(obj); + agg_snapshot_->js_retainer_profile()->CollectStats(obj); + } + CalculateStringsStats(); + agg_snapshot_->js_retainer_profile()->CoarseAndAggregate(); +} + + +class CountingConstructorHeapProfileIterator { + public: + CountingConstructorHeapProfileIterator() + : entities_count_(0), children_count_(0) { + } + + void Call(const JSObjectsCluster& cluster, + const NumberAndSizeInfo& number_and_size) { + ++entities_count_; + children_count_ += number_and_size.number(); + } + + int entities_count() { return entities_count_; } + int children_count() { return children_count_; } + + private: + int entities_count_; + int children_count_; +}; + + +static HeapEntry* AddEntryFromAggregatedSnapshot(HeapSnapshot* snapshot, + int* root_child_index, + HeapEntry::Type type, + const char* name, + int count, + int size, + int children_count, + int retainers_count) { + HeapEntry* entry = snapshot->AddEntry( + type, name, count, size, children_count, retainers_count); + ASSERT(entry != NULL); + snapshot->root()->SetUnidirElementReference(*root_child_index, + *root_child_index + 1, + entry); + *root_child_index = *root_child_index + 1; + return entry; +} + + +class AllocatingConstructorHeapProfileIterator { + public: + AllocatingConstructorHeapProfileIterator(HeapSnapshot* snapshot, + int* root_child_index) + : snapshot_(snapshot), + root_child_index_(root_child_index) { + } + + void Call(const JSObjectsCluster& cluster, + const NumberAndSizeInfo& number_and_size) { + const char* name = cluster.GetSpecialCaseName(); + if (name == NULL) { + name = snapshot_->collection()->GetFunctionName(cluster.constructor()); } + AddEntryFromAggregatedSnapshot(snapshot_, + root_child_index_, + HeapEntry::kObject, + name, + number_and_size.number(), + number_and_size.bytes(), + 0, + 0); } - js_cons_profile.PrintStats(); - js_retainer_profile.PrintStats(); + private: + HeapSnapshot* snapshot_; + int* root_child_index_; +}; - GlobalHandles::IterateWeakRoots(PrintProducerStackTrace, - StackWeakReferenceCallback); - LOG(HeapSampleEndEvent("Heap", "allocated")); +static HeapObject* ClusterAsHeapObject(const JSObjectsCluster& cluster) { + return cluster.can_be_coarsed() ? + reinterpret_cast(cluster.instance()) : cluster.constructor(); +} + + +static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) { + if (object->IsString()) { + return JSObjectsCluster(String::cast(object)); + } else { + JSObject* js_obj = JSObject::cast(object); + String* constructor = JSObject::cast(js_obj)->constructor_name(); + return JSObjectsCluster(constructor, object); + } +} + + +class CountingRetainersIterator { + public: + CountingRetainersIterator(const JSObjectsCluster& child_cluster, + HeapEntriesMap* map) + : child_(ClusterAsHeapObject(child_cluster)), map_(map) { + if (map_->Map(child_) == NULL) + map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder); + } + + void Call(const JSObjectsCluster& cluster, + const NumberAndSizeInfo& number_and_size) { + if (map_->Map(ClusterAsHeapObject(cluster)) == NULL) + map_->Pair(ClusterAsHeapObject(cluster), + HeapEntriesMap::kHeapEntryPlaceholder); + map_->CountReference(ClusterAsHeapObject(cluster), child_); + } + + private: + HeapObject* child_; + HeapEntriesMap* map_; +}; + + +class AllocatingRetainersIterator { + public: + AllocatingRetainersIterator(const JSObjectsCluster& child_cluster, + HeapEntriesMap* map) + : child_(ClusterAsHeapObject(child_cluster)), map_(map) { + child_entry_ = map_->Map(child_); + ASSERT(child_entry_ != NULL); + } + + void Call(const JSObjectsCluster& cluster, + const NumberAndSizeInfo& number_and_size) { + int child_index, retainer_index; + map_->CountReference(ClusterAsHeapObject(cluster), child_, + &child_index, &retainer_index); + map_->Map(ClusterAsHeapObject(cluster))->SetElementReference( + child_index, number_and_size.number(), child_entry_, retainer_index); + } + + private: + HeapObject* child_; + HeapEntriesMap* map_; + HeapEntry* child_entry_; +}; + + +template +class AggregatingRetainerTreeIterator { + public: + explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser, + HeapEntriesMap* map) + : coarser_(coarser), map_(map) { + } + + void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) { + if (coarser_ != NULL && + !coarser_->GetCoarseEquivalent(cluster).is_null()) return; + JSObjectsClusterTree* tree_to_iterate = tree; + ZoneScope zs(DELETE_ON_EXIT); + JSObjectsClusterTree dest_tree_; + if (coarser_ != NULL) { + RetainersAggregator retainers_aggregator(coarser_, &dest_tree_); + tree->ForEach(&retainers_aggregator); + tree_to_iterate = &dest_tree_; + } + RetainersIterator iterator(cluster, map_); + tree_to_iterate->ForEach(&iterator); + } + + private: + ClustersCoarser* coarser_; + HeapEntriesMap* map_; +}; + + +class AggregatedRetainerTreeAllocator { + public: + AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot, + int* root_child_index) + : snapshot_(snapshot), root_child_index_(root_child_index) { + } + + HeapEntry* GetEntry( + HeapObject* obj, int children_count, int retainers_count) { + JSObjectsCluster cluster = HeapObjectAsCluster(obj); + const char* name = cluster.GetSpecialCaseName(); + if (name == NULL) { + name = snapshot_->collection()->GetFunctionName(cluster.constructor()); + } + return AddEntryFromAggregatedSnapshot( + snapshot_, root_child_index_, HeapEntry::kObject, name, + 0, 0, children_count, retainers_count); + } + + private: + HeapSnapshot* snapshot_; + int* root_child_index_; +}; + + +template +void AggregatedHeapSnapshotGenerator::IterateRetainers( + HeapEntriesMap* entries_map) { + RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile(); + AggregatingRetainerTreeIterator agg_ret_iter_1( + p->coarser(), entries_map); + p->retainers_tree()->ForEach(&agg_ret_iter_1); + AggregatingRetainerTreeIterator agg_ret_iter_2(NULL, entries_map); + p->aggregator()->output_tree().ForEach(&agg_ret_iter_2); +} + + +void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) { + // Count the number of entities. + int histogram_entities_count = 0; + int histogram_children_count = 0; + int histogram_retainers_count = 0; + for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) { + if (agg_snapshot_->info()[i].bytes() > 0) { + ++histogram_entities_count; + } + } + CountingConstructorHeapProfileIterator counting_cons_iter; + agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter); + histogram_entities_count += counting_cons_iter.entities_count(); + HeapEntriesMap entries_map; + IterateRetainers(&entries_map); + histogram_entities_count += entries_map.entries_count(); + histogram_children_count += entries_map.total_children_count(); + histogram_retainers_count += entries_map.total_retainers_count(); + + // Root entry references all other entries. + histogram_children_count += histogram_entities_count; + int root_children_count = histogram_entities_count; + ++histogram_entities_count; + + // Allocate and fill entries in the snapshot, allocate references. + snapshot->AllocateEntries(histogram_entities_count, + histogram_children_count, + histogram_retainers_count); + snapshot->AddEntry(HeapSnapshot::kInternalRootObject, + root_children_count, + 0); + int root_child_index = 0; + for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) { + if (agg_snapshot_->info()[i].bytes() > 0) { + AddEntryFromAggregatedSnapshot(snapshot, + &root_child_index, + HeapEntry::kInternal, + agg_snapshot_->info()[i].name(), + agg_snapshot_->info()[i].number(), + agg_snapshot_->info()[i].bytes(), + 0, + 0); + } + } + AllocatingConstructorHeapProfileIterator alloc_cons_iter( + snapshot, &root_child_index); + agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter); + AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index); + entries_map.UpdateEntries(&allocator); + + // Fill up references. + IterateRetainers(&entries_map); } diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index dac488e9fe..2ef081ee29 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -56,8 +56,8 @@ class HeapProfiler { static void TearDown(); #ifdef ENABLE_LOGGING_AND_PROFILING - static HeapSnapshot* TakeSnapshot(const char* name); - static HeapSnapshot* TakeSnapshot(String* name); + static HeapSnapshot* TakeSnapshot(const char* name, int type); + static HeapSnapshot* TakeSnapshot(String* name, int type); static int GetSnapshotsCount(); static HeapSnapshot* GetSnapshot(int index); static HeapSnapshot* FindSnapshot(unsigned uid); @@ -75,12 +75,8 @@ class HeapProfiler { private: HeapProfiler(); ~HeapProfiler(); - HeapSnapshot* TakeSnapshotImpl(const char* name); - HeapSnapshot* TakeSnapshotImpl(String* name); - - // Obsolete interface. - // Update the array info with stats from obj. - static void CollectStats(HeapObject* obj, HistogramInfo* info); + HeapSnapshot* TakeSnapshotImpl(const char* name, int type); + HeapSnapshot* TakeSnapshotImpl(String* name, int type); HeapSnapshotsCollection* snapshots_; unsigned next_snapshot_uid_; @@ -132,7 +128,9 @@ class JSObjectsCluster BASE_EMBEDDED { bool is_null() const { return constructor_ == NULL; } bool can_be_coarsed() const { return instance_ != NULL; } String* constructor() const { return constructor_; } + Object* instance() const { return instance_; } + const char* GetSpecialCaseName() const; void Print(StringStream* accumulator) const; // Allows null clusters to be printed. void DebugPrint(StringStream* accumulator) const; @@ -179,6 +177,9 @@ class ConstructorHeapProfile BASE_EMBEDDED { virtual ~ConstructorHeapProfile() {} void CollectStats(HeapObject* obj); void PrintStats(); + + template + void ForEach(Callback* callback) { js_objects_info_tree_.ForEach(callback); } // Used by ZoneSplayTree::ForEach. Made virtual to allow overriding in tests. virtual void Call(const JSObjectsCluster& cluster, const NumberAndSizeInfo& number_and_size); @@ -282,6 +283,8 @@ class ClustersCoarser BASE_EMBEDDED { // "retainer profile" of JS objects allocated on heap. // It is run during garbage collection cycle, thus it doesn't need // to use handles. +class RetainerTreeAggregator; + class RetainerHeapProfile BASE_EMBEDDED { public: class Printer { @@ -292,7 +295,14 @@ class RetainerHeapProfile BASE_EMBEDDED { }; RetainerHeapProfile(); + ~RetainerHeapProfile(); + + RetainerTreeAggregator* aggregator() { return aggregator_; } + ClustersCoarser* coarser() { return &coarser_; } + JSObjectsRetainerTree* retainers_tree() { return &retainers_tree_; } + void CollectStats(HeapObject* obj); + void CoarseAndAggregate(); void PrintStats(); void DebugPrintStats(Printer* printer); void StoreReference(const JSObjectsCluster& cluster, HeapObject* ref); @@ -301,6 +311,44 @@ class RetainerHeapProfile BASE_EMBEDDED { ZoneScope zscope_; JSObjectsRetainerTree retainers_tree_; ClustersCoarser coarser_; + RetainerTreeAggregator* aggregator_; +}; + + +class AggregatedHeapSnapshot { + public: + AggregatedHeapSnapshot(); + ~AggregatedHeapSnapshot(); + + HistogramInfo* info() { return info_; } + ConstructorHeapProfile* js_cons_profile() { return &js_cons_profile_; } + RetainerHeapProfile* js_retainer_profile() { return &js_retainer_profile_; } + + private: + HistogramInfo* info_; + ConstructorHeapProfile js_cons_profile_; + RetainerHeapProfile js_retainer_profile_; +}; + + +class HeapEntriesMap; +class HeapSnapshot; + +class AggregatedHeapSnapshotGenerator { + public: + explicit AggregatedHeapSnapshotGenerator(AggregatedHeapSnapshot* snapshot); + void GenerateSnapshot(); + void FillHeapSnapshot(HeapSnapshot* snapshot); + + static const int kAllStringsType = LAST_TYPE + 1; + + private: + void CalculateStringsStats(); + void CollectStats(HeapObject* obj); + template + void IterateRetainers(HeapEntriesMap* entries_map); + + AggregatedHeapSnapshot* agg_snapshot_; }; diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 1d696c7a16..443c926d95 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -104,6 +104,7 @@ List Heap::gc_epilogue_callbacks_; GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL; +HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL; // Variables set based on semispace_size_ and old_generation_size_ in // ConfigureHeap. @@ -193,6 +194,33 @@ bool Heap::HasBeenSetup() { } +int Heap::GcSafeSizeOfOldObject(HeapObject* object) { + ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects. + ASSERT(!MarkCompactCollector::are_map_pointers_encoded()); + MapWord map_word = object->map_word(); + map_word.ClearMark(); + map_word.ClearOverflow(); + return object->SizeFromMap(map_word.ToMap()); +} + + +int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) { + ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects. + ASSERT(MarkCompactCollector::are_map_pointers_encoded()); + uint32_t marker = Memory::uint32_at(object->address()); + if (marker == MarkCompactCollector::kSingleFreeEncoding) { + return kIntSize; + } else if (marker == MarkCompactCollector::kMultiFreeEncoding) { + return Memory::int_at(object->address() + kIntSize); + } else { + MapWord map_word = object->map_word(); + Address map_address = map_word.DecodeMapAddress(Heap::map_space()); + Map* map = reinterpret_cast(HeapObject::FromAddress(map_address)); + return object->SizeFromMap(map); + } +} + + GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { // Is global GC requested? if (space != NEW_SPACE || FLAG_gc_global) { @@ -540,6 +568,13 @@ void Heap::EnsureFromSpaceIsCommitted() { // Committing memory to from space failed. // Try shrinking and try again. + PagedSpaces spaces; + for (PagedSpace* space = spaces.next(); + space != NULL; + space = spaces.next()) { + space->RelinkPageListInChunkOrder(true); + } + Shrink(); if (new_space_.CommitFromSpaceIfNeeded()) return; @@ -571,6 +606,22 @@ void Heap::ClearJSFunctionResultCaches() { } +class ClearThreadNormalizedMapCachesVisitor: public ThreadVisitor { + virtual void VisitThread(ThreadLocalTop* top) { + Context* context = top->context_; + if (context == NULL) return; + context->global()->global_context()->normalized_map_cache()->Clear(); + } +}; + + +void Heap::ClearNormalizedMapCaches() { + if (Bootstrapper::IsActive()) return; + ClearThreadNormalizedMapCachesVisitor visitor; + ThreadManager::IterateArchivedThreads(&visitor); +} + + #ifdef DEBUG enum PageWatermarkValidity { @@ -637,12 +688,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space, int start_new_space_size = Heap::new_space()->Size(); if (collector == MARK_COMPACTOR) { - if (FLAG_flush_code) { - // Flush all potentially unused code. - GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE); - FlushCode(); - } - // Perform mark-sweep with optional compaction. MarkCompact(tracer); @@ -732,8 +777,6 @@ void Heap::MarkCompact(GCTracer* tracer) { MarkCompactCollector::CollectGarbage(); - MarkCompactEpilogue(is_compacting); - LOG(ResourceEvent("markcompact", "end")); gc_state_ = NOT_IN_GC; @@ -755,18 +798,11 @@ void Heap::MarkCompactPrologue(bool is_compacting) { CompilationCache::MarkCompactPrologue(); - Top::MarkCompactPrologue(is_compacting); - ThreadManager::MarkCompactPrologue(is_compacting); - CompletelyClearInstanceofCache(); if (is_compacting) FlushNumberStringCache(); -} - -void Heap::MarkCompactEpilogue(bool is_compacting) { - Top::MarkCompactEpilogue(is_compacting); - ThreadManager::MarkCompactEpilogue(is_compacting); + ClearNormalizedMapCaches(); } @@ -1100,6 +1136,10 @@ class ScavengingVisitor : public StaticVisitorBase { &ObjectEvacuationStrategy:: VisitSpecialized); + table_.Register(kVisitJSFunction, + &ObjectEvacuationStrategy:: + VisitSpecialized); + table_.RegisterSpecializations, kVisitDataObject, kVisitDataObjectGeneric>(); @@ -1415,7 +1455,7 @@ bool Heap::CreateInitialMaps() { set_meta_map(new_meta_map); new_meta_map->set_map(new_meta_map); - obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); + obj = AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_fixed_array_map(Map::cast(obj)); @@ -1457,6 +1497,11 @@ bool Heap::CreateInitialMaps() { oddball_map()->set_prototype(null_value()); oddball_map()->set_constructor(null_value()); + obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); + if (obj->IsFailure()) return false; + set_fixed_cow_array_map(Map::cast(obj)); + ASSERT(fixed_array_map() != fixed_cow_array_map()); + obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize); if (obj->IsFailure()) return false; set_heap_number_map(Map::cast(obj)); @@ -1472,17 +1517,17 @@ bool Heap::CreateInitialMaps() { roots_[entry.index] = Map::cast(obj); } - obj = AllocateMap(STRING_TYPE, SeqTwoByteString::kAlignedSize); + obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_undetectable_string_map(Map::cast(obj)); Map::cast(obj)->set_is_undetectable(); - obj = AllocateMap(ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); + obj = AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_undetectable_ascii_string_map(Map::cast(obj)); Map::cast(obj)->set_is_undetectable(); - obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize); + obj = AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_byte_array_map(Map::cast(obj)); @@ -1525,7 +1570,7 @@ bool Heap::CreateInitialMaps() { if (obj->IsFailure()) return false; set_external_float_array_map(Map::cast(obj)); - obj = AllocateMap(CODE_TYPE, Code::kHeaderSize); + obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_code_map(Map::cast(obj)); @@ -1549,19 +1594,19 @@ bool Heap::CreateInitialMaps() { roots_[entry.index] = Map::cast(obj); } - obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); + obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_hash_table_map(Map::cast(obj)); - obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); + obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_context_map(Map::cast(obj)); - obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); + obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_catch_context_map(Map::cast(obj)); - obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); + obj = AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel); if (obj->IsFailure()) return false; set_global_context_map(Map::cast(obj)); @@ -2354,109 +2399,6 @@ Object* Heap::AllocateExternalArray(int length, } -// The StackVisitor is used to traverse all the archived threads to see if -// there are activations on any of the stacks corresponding to the code. -class FlushingStackVisitor : public ThreadVisitor { - public: - explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {} - - void VisitThread(ThreadLocalTop* top) { - // If we already found the code in a previous traversed thread we return. - if (found_) return; - - for (StackFrameIterator it(top); !it.done(); it.Advance()) { - if (code_->contains(it.frame()->pc())) { - found_ = true; - return; - } - } - } - bool FoundCode() {return found_;} - - private: - bool found_; - Code* code_; -}; - - -static bool CodeIsActive(Code* code) { - // Make sure we are not referencing the code from the stack. - for (StackFrameIterator it; !it.done(); it.Advance()) { - if (code->contains(it.frame()->pc())) return true; - } - // Iterate the archived stacks in all threads to check if - // the code is referenced. - FlushingStackVisitor threadvisitor(code); - ThreadManager::IterateArchivedThreads(&threadvisitor); - if (threadvisitor.FoundCode()) return true; - return false; -} - - -static void FlushCodeForFunction(JSFunction* function) { - SharedFunctionInfo* shared_info = function->shared(); - - // Special handling if the function and shared info objects - // have different code objects. - if (function->code() != shared_info->code()) { - // If the shared function has been flushed but the function has not, - // we flush the function if possible. - if (!shared_info->is_compiled() && function->is_compiled() && - !CodeIsActive(function->code())) { - function->set_code(shared_info->code()); - } - return; - } - - // The function must be compiled and have the source code available, - // to be able to recompile it in case we need the function again. - if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return; - - // We never flush code for Api functions. - if (shared_info->IsApiFunction()) return; - - // Only flush code for functions. - if (!shared_info->code()->kind() == Code::FUNCTION) return; - - // Function must be lazy compilable. - if (!shared_info->allows_lazy_compilation()) return; - - // If this is a full script wrapped in a function we do no flush the code. - if (shared_info->is_toplevel()) return; - - // If this function is in the compilation cache we do not flush the code. - if (CompilationCache::HasFunction(shared_info)) return; - - // Check stack and archived threads for the code. - if (CodeIsActive(shared_info->code())) return; - - // Compute the lazy compilable version of the code. - Code* code = Builtins::builtin(Builtins::LazyCompile); - shared_info->set_code(code); - function->set_code(code); -} - - -void Heap::FlushCode() { -#ifdef ENABLE_DEBUGGER_SUPPORT - // Do not flush code if the debugger is loaded or there are breakpoints. - if (Debug::IsLoaded() || Debug::has_break_points()) return; -#endif - HeapObjectIterator it(old_pointer_space()); - for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { - if (obj->IsJSFunction()) { - JSFunction* function = JSFunction::cast(obj); - - // The function must have a valid context and not be a builtin. - if (function->unchecked_context()->IsContext() && - !function->IsBuiltin()) { - FlushCodeForFunction(function); - } - } - } -} - - Object* Heap::CreateCode(const CodeDesc& desc, Code::Flags flags, Handle self_reference) { @@ -2910,7 +2852,9 @@ Object* Heap::CopyJSObject(JSObject* source) { FixedArray* properties = FixedArray::cast(source->properties()); // Update elements if necessary. if (elements->length() > 0) { - Object* elem = CopyFixedArray(elements); + Object* elem = + (elements->map() == fixed_cow_array_map()) ? + elements : CopyFixedArray(elements); if (elem->IsFailure()) return elem; JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); } @@ -4057,8 +4001,8 @@ bool Heap::ConfigureHeapDefault() { void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { - *stats->start_marker = 0xDECADE00; - *stats->end_marker = 0xDECADE01; + *stats->start_marker = HeapStats::kStartMarker; + *stats->end_marker = HeapStats::kEndMarker; *stats->new_space_size = new_space_.Size(); *stats->new_space_capacity = new_space_.Capacity(); *stats->old_pointer_space_size = old_pointer_space_->Size(); @@ -4129,6 +4073,8 @@ bool Heap::Setup(bool create_heap_objects) { NewSpaceScavenger::Initialize(); MarkCompactCollector::Initialize(); + MarkMapPointersAsEncoded(false); + // Setup memory allocator and reserve a chunk of memory for new // space. The chunk is double the size of the requested reserved // new space size to ensure that we can find a pair of semispaces that @@ -4815,7 +4761,6 @@ GCTracer::~GCTracer() { PrintF("sweep=%d ", static_cast(scopes_[Scope::MC_SWEEP])); PrintF("sweepns=%d ", static_cast(scopes_[Scope::MC_SWEEP_NEWSPACE])); PrintF("compact=%d ", static_cast(scopes_[Scope::MC_COMPACT])); - PrintF("flushcode=%d ", static_cast(scopes_[Scope::MC_FLUSH_CODE])); PrintF("total_size_before=%d ", start_size_); PrintF("total_size_after=%d ", Heap::SizeOfObjects()); diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 93b90b1846..484cd22bdf 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -30,6 +30,7 @@ #include +#include "spaces.h" #include "splay-tree-inl.h" #include "v8-counters.h" @@ -55,6 +56,7 @@ namespace internal { V(Map, heap_number_map, HeapNumberMap) \ V(Map, global_context_map, GlobalContextMap) \ V(Map, fixed_array_map, FixedArrayMap) \ + V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ V(Map, meta_map, MetaMap) \ V(Object, termination_exception, TerminationException) \ @@ -312,61 +314,64 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateJSObject(JSFunction* constructor, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* AllocateJSObject( + JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); // Allocates and initializes a new global object based on a constructor. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateGlobalObject(JSFunction* constructor); + MUST_USE_RESULT static Object* AllocateGlobalObject(JSFunction* constructor); // Returns a deep copy of the JavaScript object. // Properties and elements are copied too. // Returns failure if allocation failed. - static Object* CopyJSObject(JSObject* source); + MUST_USE_RESULT static Object* CopyJSObject(JSObject* source); // Allocates the function prototype. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateFunctionPrototype(JSFunction* function); + MUST_USE_RESULT static Object* AllocateFunctionPrototype( + JSFunction* function); // Reinitialize an JSGlobalProxy based on a constructor. The object // must have the same size as objects allocated using the // constructor. The object is reinitialized and behaves as an // object that has been freshly allocated using the constructor. - static Object* ReinitializeJSGlobalProxy(JSFunction* constructor, - JSGlobalProxy* global); + MUST_USE_RESULT static Object* ReinitializeJSGlobalProxy( + JSFunction* constructor, + JSGlobalProxy* global); // Allocates and initializes a new JavaScript object based on a map. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateJSObjectFromMap(Map* map, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* AllocateJSObjectFromMap( + Map* map, PretenureFlag pretenure = NOT_TENURED); // Allocates a heap object based on the map. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - static Object* Allocate(Map* map, AllocationSpace space); + MUST_USE_RESULT static Object* Allocate(Map* map, AllocationSpace space); // Allocates a JS Map in the heap. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - static Object* AllocateMap(InstanceType instance_type, int instance_size); + MUST_USE_RESULT static Object* AllocateMap(InstanceType instance_type, + int instance_size); // Allocates a partial map for bootstrapping. - static Object* AllocatePartialMap(InstanceType instance_type, - int instance_size); + MUST_USE_RESULT static Object* AllocatePartialMap(InstanceType instance_type, + int instance_size); // Allocate a map for the specified function - static Object* AllocateInitialMap(JSFunction* fun); + MUST_USE_RESULT static Object* AllocateInitialMap(JSFunction* fun); // Allocates an empty code cache. - static Object* AllocateCodeCache(); + MUST_USE_RESULT static Object* AllocateCodeCache(); // Clear the Instanceof cache (used when a prototype changes). static void ClearInstanceofCache() { @@ -391,13 +396,13 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateStringFromAscii( + MUST_USE_RESULT static Object* AllocateStringFromAscii( Vector str, PretenureFlag pretenure = NOT_TENURED); - static Object* AllocateStringFromUtf8( + MUST_USE_RESULT static Object* AllocateStringFromUtf8( Vector str, PretenureFlag pretenure = NOT_TENURED); - static Object* AllocateStringFromTwoByte( + MUST_USE_RESULT static Object* AllocateStringFromTwoByte( Vector str, PretenureFlag pretenure = NOT_TENURED); @@ -405,16 +410,15 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - static inline Object* AllocateSymbol(Vector str, - int chars, - uint32_t hash_field); + MUST_USE_RESULT static inline Object* AllocateSymbol(Vector str, + int chars, + uint32_t hash_field); - static Object* AllocateInternalSymbol(unibrow::CharacterStream* buffer, - int chars, - uint32_t hash_field); + MUST_USE_RESULT static Object* AllocateInternalSymbol( + unibrow::CharacterStream* buffer, int chars, uint32_t hash_field); - static Object* AllocateExternalSymbol(Vector str, - int chars); + MUST_USE_RESULT static Object* AllocateExternalSymbol(Vector str, + int chars); // Allocates and partially initializes a String. There are two String @@ -424,10 +428,10 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateRawAsciiString( + MUST_USE_RESULT static Object* AllocateRawAsciiString( int length, PretenureFlag pretenure = NOT_TENURED); - static Object* AllocateRawTwoByteString( + MUST_USE_RESULT static Object* AllocateRawTwoByteString( int length, PretenureFlag pretenure = NOT_TENURED); @@ -435,97 +439,103 @@ class Heap : public AllStatic { // A cache is used for ascii codes. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. Please note this does not perform a garbage collection. - static Object* LookupSingleCharacterStringFromCode(uint16_t code); + MUST_USE_RESULT static Object* LookupSingleCharacterStringFromCode( + uint16_t code); // Allocate a byte array of the specified length // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateByteArray(int length, PretenureFlag pretenure); + MUST_USE_RESULT static Object* AllocateByteArray(int length, + PretenureFlag pretenure); // Allocate a non-tenured byte array of the specified length // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateByteArray(int length); + MUST_USE_RESULT static Object* AllocateByteArray(int length); // Allocate a pixel array of the specified length // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocatePixelArray(int length, - uint8_t* external_pointer, - PretenureFlag pretenure); + MUST_USE_RESULT static Object* AllocatePixelArray(int length, + uint8_t* external_pointer, + PretenureFlag pretenure); // Allocates an external array of the specified length and type. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateExternalArray(int length, - ExternalArrayType array_type, - void* external_pointer, - PretenureFlag pretenure); + MUST_USE_RESULT static Object* AllocateExternalArray( + int length, + ExternalArrayType array_type, + void* external_pointer, + PretenureFlag pretenure); // Allocate a tenured JS global property cell. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateJSGlobalPropertyCell(Object* value); + MUST_USE_RESULT static Object* AllocateJSGlobalPropertyCell(Object* value); // Allocates a fixed array initialized with undefined values // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateFixedArray(int length, PretenureFlag pretenure); + MUST_USE_RESULT static Object* AllocateFixedArray(int length, + PretenureFlag pretenure); // Allocates a fixed array initialized with undefined values - static Object* AllocateFixedArray(int length); + MUST_USE_RESULT static Object* AllocateFixedArray(int length); // Allocates an uninitialized fixed array. It must be filled by the caller. // // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateUninitializedFixedArray(int length); + MUST_USE_RESULT static Object* AllocateUninitializedFixedArray(int length); // Make a copy of src and return it. Returns // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. - static Object* CopyFixedArray(FixedArray* src); + MUST_USE_RESULT static Object* CopyFixedArray(FixedArray* src); // Allocates a fixed array initialized with the hole values. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateFixedArrayWithHoles( + MUST_USE_RESULT static Object* AllocateFixedArrayWithHoles( int length, PretenureFlag pretenure = NOT_TENURED); // AllocateHashTable is identical to AllocateFixedArray except // that the resulting object has hash_table_map as map. - static Object* AllocateHashTable(int length, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* AllocateHashTable( + int length, PretenureFlag pretenure = NOT_TENURED); // Allocate a global (but otherwise uninitialized) context. - static Object* AllocateGlobalContext(); + MUST_USE_RESULT static Object* AllocateGlobalContext(); // Allocate a function context. - static Object* AllocateFunctionContext(int length, JSFunction* closure); + MUST_USE_RESULT static Object* AllocateFunctionContext(int length, + JSFunction* closure); // Allocate a 'with' context. - static Object* AllocateWithContext(Context* previous, - JSObject* extension, - bool is_catch_context); + MUST_USE_RESULT static Object* AllocateWithContext(Context* previous, + JSObject* extension, + bool is_catch_context); // Allocates a new utility object in the old generation. - static Object* AllocateStruct(InstanceType type); + MUST_USE_RESULT static Object* AllocateStruct(InstanceType type); // Allocates a function initialized with a shared part. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateFunction(Map* function_map, - SharedFunctionInfo* shared, - Object* prototype, - PretenureFlag pretenure = TENURED); + MUST_USE_RESULT static Object* AllocateFunction( + Map* function_map, + SharedFunctionInfo* shared, + Object* prototype, + PretenureFlag pretenure = TENURED); // Indicies for direct access into argument objects. static const int kArgumentsObjectSize = @@ -537,47 +547,52 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateArgumentsObject(Object* callee, int length); + MUST_USE_RESULT static Object* AllocateArgumentsObject(Object* callee, + int length); // Same as NewNumberFromDouble, but may return a preallocated/immutable // number object (e.g., minus_zero_value_, nan_value_) - static Object* NumberFromDouble(double value, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* NumberFromDouble( + double value, PretenureFlag pretenure = NOT_TENURED); // Allocated a HeapNumber from value. - static Object* AllocateHeapNumber(double value, PretenureFlag pretenure); - static Object* AllocateHeapNumber(double value); // pretenure = NOT_TENURED + MUST_USE_RESULT static Object* AllocateHeapNumber(double value, + PretenureFlag pretenure); + // pretenure = NOT_TENURED. + MUST_USE_RESULT static Object* AllocateHeapNumber(double value); // Converts an int into either a Smi or a HeapNumber object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static inline Object* NumberFromInt32(int32_t value); + MUST_USE_RESULT static inline Object* NumberFromInt32(int32_t value); // Converts an int into either a Smi or a HeapNumber object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static inline Object* NumberFromUint32(uint32_t value); + MUST_USE_RESULT static inline Object* NumberFromUint32(uint32_t value); // Allocates a new proxy object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateProxy(Address proxy, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* AllocateProxy( + Address proxy, + PretenureFlag pretenure = NOT_TENURED); // Allocates a new SharedFunctionInfo object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateSharedFunctionInfo(Object* name); + MUST_USE_RESULT static Object* AllocateSharedFunctionInfo(Object* name); // Allocates a new cons string object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateConsString(String* first, String* second); + MUST_USE_RESULT static Object* AllocateConsString(String* first, + String* second); // Allocates a new sub string object which is a substring of an underlying // string buffer stretching from the index start (inclusive) to the index @@ -585,19 +600,20 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateSubString(String* buffer, - int start, - int end, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* AllocateSubString( + String* buffer, + int start, + int end, + PretenureFlag pretenure = NOT_TENURED); // Allocate a new external string object, which is backed by a string // resource that resides outside the V8 heap. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - static Object* AllocateExternalStringFromAscii( + MUST_USE_RESULT static Object* AllocateExternalStringFromAscii( ExternalAsciiString::Resource* resource); - static Object* AllocateExternalStringFromTwoByte( + MUST_USE_RESULT static Object* AllocateExternalStringFromTwoByte( ExternalTwoByteString::Resource* resource); // Finalizes an external string by deleting the associated external @@ -609,9 +625,10 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - static inline Object* AllocateRaw(int size_in_bytes, - AllocationSpace space, - AllocationSpace retry_space); + MUST_USE_RESULT static inline Object* AllocateRaw( + int size_in_bytes, + AllocationSpace space, + AllocationSpace retry_space); // Initialize a filler object to keep the ability to iterate over the heap // when shortening objects. @@ -623,26 +640,26 @@ class Heap : public AllStatic { // self_reference. This allows generated code to reference its own Code // object by containing this pointer. // Please note this function does not perform a garbage collection. - static Object* CreateCode(const CodeDesc& desc, - Code::Flags flags, - Handle self_reference); + MUST_USE_RESULT static Object* CreateCode(const CodeDesc& desc, + Code::Flags flags, + Handle self_reference); - static Object* CopyCode(Code* code); + MUST_USE_RESULT static Object* CopyCode(Code* code); // Copy the code and scope info part of the code object, but insert // the provided data as the relocation information. - static Object* CopyCode(Code* code, Vector reloc_info); + MUST_USE_RESULT static Object* CopyCode(Code* code, Vector reloc_info); // Finds the symbol for string in the symbol table. // If not found, a new symbol is added to the table and returned. // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation // failed. // Please note this function does not perform a garbage collection. - static Object* LookupSymbol(Vector str); - static Object* LookupAsciiSymbol(const char* str) { + MUST_USE_RESULT static Object* LookupSymbol(Vector str); + MUST_USE_RESULT static Object* LookupAsciiSymbol(const char* str) { return LookupSymbol(CStrVector(str)); } - static Object* LookupSymbol(String* str); + MUST_USE_RESULT static Object* LookupSymbol(String* str); static bool LookupSymbolIfExists(String* str, String** symbol); static bool LookupTwoCharsSymbolIfExists(String* str, String** symbol); @@ -657,7 +674,7 @@ class Heap : public AllStatic { // string might stay non-flat even when not a failure is returned. // // Please note this function does not perform a garbage collection. - static inline Object* PrepareForCompare(String* str); + MUST_USE_RESULT static inline Object* PrepareForCompare(String* str); // Converts the given boolean condition to JavaScript boolean value. static Object* ToBoolean(bool condition) { @@ -817,6 +834,13 @@ class Heap : public AllStatic { roots_[kCodeStubsRootIndex] = value; } + // Support for computing object sizes for old objects during GCs. Returns + // a function that is guaranteed to be safe for computing object sizes in + // the current GC phase. + static HeapObjectCallback GcSafeSizeOfOldObjectFunction() { + return gc_safe_size_of_old_object_; + } + // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). static void public_set_non_monomorphic_cache(NumberDictionary* value) { roots_[kNonMonomorphicCacheRootIndex] = value; @@ -856,8 +880,10 @@ class Heap : public AllStatic { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - static Object* CreateSymbol(const char* str, int length, int hash); - static Object* CreateSymbol(String* str); + MUST_USE_RESULT static Object* CreateSymbol(const char* str, + int length, + int hash); + MUST_USE_RESULT static Object* CreateSymbol(String* str); // Write barrier support for address[offset] = o. static inline void RecordWrite(Address address, int offset); @@ -929,9 +955,9 @@ class Heap : public AllStatic { static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); // Allocate uninitialized fixed array. - static Object* AllocateRawFixedArray(int length); - static Object* AllocateRawFixedArray(int length, - PretenureFlag pretenure); + MUST_USE_RESULT static Object* AllocateRawFixedArray(int length); + MUST_USE_RESULT static Object* AllocateRawFixedArray(int length, + PretenureFlag pretenure); // True if we have reached the allocation limit in the old generation that // should force the next GC (caused normally) to be a full one. @@ -974,8 +1000,9 @@ class Heap : public AllStatic { kRootListLength }; - static Object* NumberToString(Object* number, - bool check_number_string_cache = true); + MUST_USE_RESULT static Object* NumberToString( + Object* number, + bool check_number_string_cache = true); static Map* MapForExternalArrayType(ExternalArrayType array_type); static RootListIndex RootIndexForExternalArrayType( @@ -1020,6 +1047,8 @@ class Heap : public AllStatic { static void ClearJSFunctionResultCaches(); + static void ClearNormalizedMapCaches(); + static GCTracer* tracer() { return tracer_; } private: @@ -1168,6 +1197,18 @@ class Heap : public AllStatic { static GCCallback global_gc_prologue_callback_; static GCCallback global_gc_epilogue_callback_; + // Support for computing object sizes during GC. + static HeapObjectCallback gc_safe_size_of_old_object_; + static int GcSafeSizeOfOldObject(HeapObject* object); + static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object); + + // Update the GC state. Called from the mark-compact collector. + static void MarkMapPointersAsEncoded(bool encoded) { + gc_safe_size_of_old_object_ = encoded + ? &GcSafeSizeOfOldObjectWithEncodedMap + : &GcSafeSizeOfOldObject; + } + // Checks whether a global GC is necessary static GarbageCollector SelectGarbageCollector(AllocationSpace space); @@ -1180,10 +1221,10 @@ class Heap : public AllStatic { // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // have to test the allocation space argument and (b) can reduce code size // (since both AllocateRaw and AllocateRawMap are inlined). - static inline Object* AllocateRawMap(); + MUST_USE_RESULT static inline Object* AllocateRawMap(); // Allocate an uninitialized object in the global property cell space. - static inline Object* AllocateRawCell(); + MUST_USE_RESULT static inline Object* AllocateRawCell(); // Initializes a JSObject based on its map. static void InitializeJSObjectFromMap(JSObject* obj, @@ -1221,7 +1262,6 @@ class Heap : public AllStatic { // Code to be run before and after mark-compact. static void MarkCompactPrologue(bool is_compacting); - static void MarkCompactEpilogue(bool is_compacting); // Completely clear the Instanceof cache (to stop it keeping objects alive // around a GC). @@ -1245,9 +1285,10 @@ class Heap : public AllStatic { // other parts of the VM could use it. Specifically, a function that creates // instances of type JS_FUNCTION_TYPE benefit from the use of this function. // Please note this does not perform a garbage collection. - static inline Object* InitializeFunction(JSFunction* function, - SharedFunctionInfo* shared, - Object* prototype); + MUST_USE_RESULT static inline Object* InitializeFunction( + JSFunction* function, + SharedFunctionInfo* shared, + Object* prototype); static GCTracer* tracer_; @@ -1257,10 +1298,6 @@ class Heap : public AllStatic { // Flush the number to string cache. static void FlushNumberStringCache(); - // Flush code from functions we do not expect to use again. The code will - // be replaced with a lazy compilable version. - static void FlushCode(); - static void UpdateSurvivalRateTrend(int start_new_space_size); enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING }; @@ -1317,11 +1354,15 @@ class Heap : public AllStatic { friend class DisallowAllocationFailure; friend class AlwaysAllocateScope; friend class LinearAllocationScope; + friend class MarkCompactCollector; }; class HeapStats { public: + static const int kStartMarker = 0xDECADE00; + static const int kEndMarker = 0xDECADE01; + int* start_marker; // 0 int* new_space_size; // 1 int* new_space_capacity; // 2 @@ -1861,7 +1902,7 @@ class TranscendentalCache { // Returns a heap number with f(input), where f is a math function specified // by the 'type' argument. - static inline Object* Get(Type type, double input) { + MUST_USE_RESULT static inline Object* Get(Type type, double input) { TranscendentalCache* cache = caches_[type]; if (cache == NULL) { caches_[type] = cache = new TranscendentalCache(type); @@ -1874,7 +1915,7 @@ class TranscendentalCache { static void Clear(); private: - inline Object* Get(double input) { + MUST_USE_RESULT inline Object* Get(double input) { Converter c; c.dbl = input; int hash = Hash(c); diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 35a90a4aca..a095ef7bf4 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_IA32) +#include "code-stubs.h" #include "codegen-inl.h" namespace v8 { @@ -95,10 +96,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { // edi: called object // eax: number of arguments __ bind(&non_function_call); - // CALL_NON_FUNCTION expects the non-function constructor as receiver - // (instead of the original receiver from the call site). The receiver is - // stack element argc+1. - __ mov(Operand(esp, eax, times_4, kPointerSize), edi); // Set expected number of arguments to zero (not changing eax). __ Set(ebx, Immediate(0)); __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); @@ -567,9 +564,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ SmiUntag(ebx); - __ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset)); - __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); __ cmp(eax, Operand(ebx)); __ j(not_equal, Handle(builtin(ArgumentsAdaptorTrampoline))); @@ -700,17 +696,6 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { } -// Load the built-in Array function from the current context. -static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { - // Load the global context. - __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(result, FieldOperand(result, GlobalObject::kGlobalContextOffset)); - // Load the Array function from the global context. - __ mov(result, - Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); -} - - // Number of empty elements to allocate for an empty array. static const int kPreallocatedArrayElements = 4; @@ -1100,7 +1085,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { Label generic_array_code; // Get the Array function. - GenerateLoadArrayFunction(masm, edi); + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi); if (FLAG_debug_code) { // Initial map for the builtin Array function shoud be a map. @@ -1136,7 +1121,7 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { // The array construct code is only set for the builtin Array function which // does always have a map. - GenerateLoadArrayFunction(masm, ebx); + __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ebx); __ cmp(edi, Operand(ebx)); __ Assert(equal, "Unexpected Array function"); // Initial map for the builtin Array function should be a map. @@ -1160,6 +1145,131 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { } +void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : number of arguments + // -- edi : constructor function + // -- esp[0] : return address + // -- esp[(argc - n) * 4] : arg[n] (zero-based) + // -- esp[(argc + 1) * 4] : receiver + // ----------------------------------- + __ IncrementCounter(&Counters::string_ctor_calls, 1); + + if (FLAG_debug_code) { + __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx); + __ cmp(edi, Operand(ecx)); + __ Assert(equal, "Unexpected String function"); + } + + // Load the first argument into eax and get rid of the rest + // (including the receiver). + Label no_arguments; + __ test(eax, Operand(eax)); + __ j(zero, &no_arguments); + __ mov(ebx, Operand(esp, eax, times_pointer_size, 0)); + __ pop(ecx); + __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize)); + __ push(ecx); + __ mov(eax, ebx); + + // Lookup the argument in the number to string cache. + Label not_cached, argument_is_string; + NumberToStringStub::GenerateLookupNumberStringCache( + masm, + eax, // Input. + ebx, // Result. + ecx, // Scratch 1. + edx, // Scratch 2. + false, // Input is known to be smi? + ¬_cached); + __ IncrementCounter(&Counters::string_ctor_cached_number, 1); + __ bind(&argument_is_string); + // ----------- S t a t e ------------- + // -- ebx : argument converted to string + // -- edi : constructor function + // -- esp[0] : return address + // ----------------------------------- + + // Allocate a JSValue and put the tagged pointer into eax. + Label gc_required; + __ AllocateInNewSpace(JSValue::kSize, + eax, // Result. + ecx, // New allocation top (we ignore it). + no_reg, + &gc_required, + TAG_OBJECT); + + // Set the map. + __ LoadGlobalFunctionInitialMap(edi, ecx); + if (FLAG_debug_code) { + __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset), + JSValue::kSize >> kPointerSizeLog2); + __ Assert(equal, "Unexpected string wrapper instance size"); + __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0); + __ Assert(equal, "Unexpected unused properties of string wrapper"); + } + __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx); + + // Set properties and elements. + __ Set(ecx, Immediate(Factory::empty_fixed_array())); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx); + + // Set the value. + __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx); + + // Ensure the object is fully initialized. + STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); + + // We're done. Return. + __ ret(0); + + // The argument was not found in the number to string cache. Check + // if it's a string already before calling the conversion builtin. + Label convert_argument; + __ bind(¬_cached); + STATIC_ASSERT(kSmiTag == 0); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &convert_argument); + Condition is_string = masm->IsObjectStringType(eax, ebx, ecx); + __ j(NegateCondition(is_string), &convert_argument); + __ mov(ebx, eax); + __ IncrementCounter(&Counters::string_ctor_string_value, 1); + __ jmp(&argument_is_string); + + // Invoke the conversion builtin and put the result into ebx. + __ bind(&convert_argument); + __ IncrementCounter(&Counters::string_ctor_conversions, 1); + __ EnterInternalFrame(); + __ push(edi); // Preserve the function. + __ push(eax); + __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); + __ pop(edi); + __ LeaveInternalFrame(); + __ mov(ebx, eax); + __ jmp(&argument_is_string); + + // Load the empty string into ebx, remove the receiver from the + // stack, and jump back to the case where the argument is a string. + __ bind(&no_arguments); + __ Set(ebx, Immediate(Factory::empty_string())); + __ pop(ecx); + __ lea(esp, Operand(esp, kPointerSize)); + __ push(ecx); + __ jmp(&argument_is_string); + + // At this point the argument is already a string. Call runtime to + // create a string wrapper. + __ bind(&gc_required); + __ IncrementCounter(&Counters::string_ctor_gc_required, 1); + __ EnterInternalFrame(); + __ push(ebx); + __ CallRuntime(Runtime::kNewStringWrapper, 1); + __ LeaveInternalFrame(); + __ ret(0); +} + + static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ push(ebp); __ mov(ebp, Operand(esp)); diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc new file mode 100644 index 0000000000..366b91ef60 --- /dev/null +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -0,0 +1,4615 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#if defined(V8_TARGET_ARCH_IA32) + +#include "code-stubs.h" +#include "bootstrapper.h" +#include "jsregexp.h" +#include "regexp-macro-assembler.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) +void FastNewClosureStub::Generate(MacroAssembler* masm) { + // Create a new closure from the given function info in new + // space. Set the context to the current context in esi. + Label gc; + __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function info from the stack. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + + // Compute the function map in the current global context and set that + // as the map of the allocated object. + __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); + __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); + __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); + + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ mov(ebx, Immediate(Factory::empty_fixed_array())); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); + __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), + Immediate(Factory::the_hole_value())); + __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); + __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); + __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); + + // Initialize the code pointer in the function to be the one + // found in the shared function info object. + __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx); + + // Return and remove the on-stack parameter. + __ ret(1 * kPointerSize); + + // Create a new closure through the slower runtime call. + __ bind(&gc); + __ pop(ecx); // Temporarily remove return address. + __ pop(edx); + __ push(esi); + __ push(edx); + __ push(ecx); // Restore return address. + __ TailCallRuntime(Runtime::kNewClosure, 2, 1); +} + + +void FastNewContextStub::Generate(MacroAssembler* masm) { + // Try to allocate the context in new space. + Label gc; + int length = slots_ + Context::MIN_CONTEXT_SLOTS; + __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, + eax, ebx, ecx, &gc, TAG_OBJECT); + + // Get the function from the stack. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + + // Setup the object header. + __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); + __ mov(FieldOperand(eax, Context::kLengthOffset), + Immediate(Smi::FromInt(length))); + + // Setup the fixed slots. + __ xor_(ebx, Operand(ebx)); // Set to NULL. + __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); + __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax); + __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx); + __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); + + // Copy the global object from the surrounding context. We go through the + // context in the function (ecx) to match the allocation behavior we have + // in the runtime system (see Heap::AllocateFunctionContext). + __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset)); + __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx); + + // Initialize the rest of the slots to undefined. + __ mov(ebx, Factory::undefined_value()); + for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { + __ mov(Operand(eax, Context::SlotOffset(i)), ebx); + } + + // Return and remove the on-stack parameter. + __ mov(esi, Operand(eax)); + __ ret(1 * kPointerSize); + + // Need to collect. Call into runtime system. + __ bind(&gc); + __ TailCallRuntime(Runtime::kNewContext, 1, 1); +} + + +void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { + // Stack layout on entry: + // + // [esp + kPointerSize]: constant elements. + // [esp + (2 * kPointerSize)]: literal index. + // [esp + (3 * kPointerSize)]: literals array. + + // All sizes here are multiples of kPointerSize. + int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; + int size = JSArray::kSize + elements_size; + + // Load boilerplate object into ecx and check if we need to create a + // boilerplate. + Label slow_case; + __ mov(ecx, Operand(esp, 3 * kPointerSize)); + __ mov(eax, Operand(esp, 2 * kPointerSize)); + STATIC_ASSERT(kPointerSize == 4); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(ecx, Factory::undefined_value()); + __ j(equal, &slow_case); + + if (FLAG_debug_code) { + const char* message; + Handle expected_map; + if (mode_ == CLONE_ELEMENTS) { + message = "Expected (writable) fixed array"; + expected_map = Factory::fixed_array_map(); + } else { + ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); + message = "Expected copy-on-write fixed array"; + expected_map = Factory::fixed_cow_array_map(); + } + __ push(ecx); + __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map); + __ Assert(equal, message); + __ pop(ecx); + } + + // Allocate both the JS array and the elements array in one big + // allocation. This avoids multiple limit checks. + __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); + + // Copy the JS array part. + for (int i = 0; i < JSArray::kSize; i += kPointerSize) { + if ((i != JSArray::kElementsOffset) || (length_ == 0)) { + __ mov(ebx, FieldOperand(ecx, i)); + __ mov(FieldOperand(eax, i), ebx); + } + } + + if (length_ > 0) { + // Get hold of the elements array of the boilerplate and setup the + // elements pointer in the resulting object. + __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); + __ lea(edx, Operand(eax, JSArray::kSize)); + __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); + + // Copy the elements array. + for (int i = 0; i < elements_size; i += kPointerSize) { + __ mov(ebx, FieldOperand(ecx, i)); + __ mov(FieldOperand(edx, i), ebx); + } + } + + // Return and remove the on-stack parameters. + __ ret(3 * kPointerSize); + + __ bind(&slow_case); + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); +} + + +// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). +void ToBooleanStub::Generate(MacroAssembler* masm) { + Label false_result, true_result, not_string; + __ mov(eax, Operand(esp, 1 * kPointerSize)); + + // 'null' => false. + __ cmp(eax, Factory::null_value()); + __ j(equal, &false_result); + + // Get the map and type of the heap object. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); + + // Undetectable => false. + __ test_b(FieldOperand(edx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(not_zero, &false_result); + + // JavaScript object => true. + __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE); + __ j(above_equal, &true_result); + + // String value => false iff empty. + __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); + __ j(above_equal, ¬_string); + STATIC_ASSERT(kSmiTag == 0); + __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); + __ j(zero, &false_result); + __ jmp(&true_result); + + __ bind(¬_string); + // HeapNumber => false iff +0, -0, or NaN. + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &true_result); + __ fldz(); + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ FCmp(); + __ j(zero, &false_result); + // Fall through to |true_result|. + + // Return 1/0 for true/false in eax. + __ bind(&true_result); + __ mov(eax, 1); + __ ret(1 * kPointerSize); + __ bind(&false_result); + __ mov(eax, 0); + __ ret(1 * kPointerSize); +} + + +const char* GenericBinaryOpStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", + op_name, + overwrite_name, + (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", + args_in_registers_ ? "RegArgs" : "StackArgs", + args_reversed_ ? "_R" : "", + static_operands_type_.ToString(), + BinaryOpIC::GetName(runtime_operands_type_)); + return name_; +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(right); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (!(left.is(left_arg) && right.is(right_arg))) { + if (left.is(right_arg) && right.is(left_arg)) { + if (IsOperationCommutative()) { + SetArgsReversed(); + } else { + __ xchg(left, right); + } + } else if (left.is(left_arg)) { + __ mov(right_arg, right); + } else if (right.is(right_arg)) { + __ mov(left_arg, left); + } else if (left.is(right_arg)) { + if (IsOperationCommutative()) { + __ mov(left_arg, right); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying left argument. + __ mov(left_arg, left); + __ mov(right_arg, right); + } + } else if (right.is(left_arg)) { + if (IsOperationCommutative()) { + __ mov(right_arg, left); + SetArgsReversed(); + } else { + // Order of moves important to avoid destroying right argument. + __ mov(right_arg, right); + __ mov(left_arg, left); + } + } else { + // Order of moves is not important. + __ mov(left_arg, left); + __ mov(right_arg, right); + } + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Register left, + Smi* right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(left); + __ push(Immediate(right)); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (left.is(left_arg)) { + __ mov(right_arg, Immediate(right)); + } else if (left.is(right_arg) && IsOperationCommutative()) { + __ mov(left_arg, Immediate(right)); + SetArgsReversed(); + } else { + // For non-commutative operations, left and right_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite left before moving + // it to left_arg. + __ mov(left_arg, left); + __ mov(right_arg, Immediate(right)); + } + + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +void GenericBinaryOpStub::GenerateCall( + MacroAssembler* masm, + Smi* left, + Register right) { + if (!ArgsInRegistersSupported()) { + // Pass arguments on the stack. + __ push(Immediate(left)); + __ push(right); + } else { + // The calling convention with registers is left in edx and right in eax. + Register left_arg = edx; + Register right_arg = eax; + if (right.is(right_arg)) { + __ mov(left_arg, Immediate(left)); + } else if (right.is(left_arg) && IsOperationCommutative()) { + __ mov(right_arg, Immediate(left)); + SetArgsReversed(); + } else { + // For non-commutative operations, right and left_arg might be + // the same register. Therefore, the order of the moves is + // important here in order to not overwrite right before moving + // it to right_arg. + __ mov(right_arg, right); + __ mov(left_arg, Immediate(left)); + } + // Update flags to indicate that arguments are in registers. + SetArgsInRegisters(); + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); + } + + // Call the stub. + __ CallStub(this); +} + + +class FloatingPointHelper : public AllStatic { + public: + + enum ArgLocation { + ARGS_ON_STACK, + ARGS_IN_REGISTERS + }; + + // Code pattern for loading a floating point value. Input value must + // be either a smi or a heap number object (fp value). Requirements: + // operand in register number. Returns operand as floating point number + // on FPU stack. + static void LoadFloatOperand(MacroAssembler* masm, Register number); + + // Code pattern for loading floating point values. Input values must + // be either smi or heap number objects (fp values). Requirements: + // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. + // Returns operands as floating point numbers on FPU stack. + static void LoadFloatOperands(MacroAssembler* masm, + Register scratch, + ArgLocation arg_location = ARGS_ON_STACK); + + // Similar to LoadFloatOperand but assumes that both operands are smis. + // Expects operands in edx, eax. + static void LoadFloatSmis(MacroAssembler* masm, Register scratch); + + // Test if operands are smi or number objects (fp). Requirements: + // operand_1 in eax, operand_2 in edx; falls through on float + // operands, jumps to the non_float label otherwise. + static void CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch); + + // Takes the operands in edx and eax and loads them as integers in eax + // and ecx. + static void LoadAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* operand_conversion_failure); + static void LoadNumbersAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* operand_conversion_failure); + static void LoadUnknownsAsIntegers(MacroAssembler* masm, + bool use_sse3, + Label* operand_conversion_failure); + + // Test if operands are smis or heap numbers and load them + // into xmm0 and xmm1 if they are. Operands are in edx and eax. + // Leaves operands unchanged. + static void LoadSSE2Operands(MacroAssembler* masm); + + // Test if operands are numbers (smi or HeapNumber objects), and load + // them into xmm0 and xmm1 if they are. Jump to label not_numbers if + // either operand is not a number. Operands are in edx and eax. + // Leaves operands unchanged. + static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); + + // Similar to LoadSSE2Operands but assumes that both operands are smis. + // Expects operands in edx, eax. + static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); +}; + + +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { + // 1. Move arguments into edx, eax except for DIV and MOD, which need the + // dividend in eax and edx free for the division. Use eax, ebx for those. + Comment load_comment(masm, "-- Load arguments"); + Register left = edx; + Register right = eax; + if (op_ == Token::DIV || op_ == Token::MOD) { + left = eax; + right = ebx; + if (HasArgsInRegisters()) { + __ mov(ebx, eax); + __ mov(eax, edx); + } + } + if (!HasArgsInRegisters()) { + __ mov(right, Operand(esp, 1 * kPointerSize)); + __ mov(left, Operand(esp, 2 * kPointerSize)); + } + + if (static_operands_type_.IsSmi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(left); + __ AbortIfNotSmi(right); + } + if (op_ == Token::BIT_OR) { + __ or_(right, Operand(left)); + GenerateReturn(masm); + return; + } else if (op_ == Token::BIT_AND) { + __ and_(right, Operand(left)); + GenerateReturn(masm); + return; + } else if (op_ == Token::BIT_XOR) { + __ xor_(right, Operand(left)); + GenerateReturn(masm); + return; + } + } + + // 2. Prepare the smi check of both operands by oring them together. + Comment smi_check_comment(masm, "-- Smi check arguments"); + Label not_smis; + Register combined = ecx; + ASSERT(!left.is(combined) && !right.is(combined)); + switch (op_) { + case Token::BIT_OR: + // Perform the operation into eax and smi check the result. Preserve + // eax in case the result is not a smi. + ASSERT(!left.is(ecx) && !right.is(ecx)); + __ mov(ecx, right); + __ or_(right, Operand(left)); // Bitwise or is commutative. + combined = right; + break; + + case Token::BIT_XOR: + case Token::BIT_AND: + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + __ mov(combined, right); + __ or_(combined, Operand(left)); + break; + + case Token::SHL: + case Token::SAR: + case Token::SHR: + // Move the right operand into ecx for the shift operation, use eax + // for the smi check register. + ASSERT(!left.is(ecx) && !right.is(ecx)); + __ mov(ecx, right); + __ or_(right, Operand(left)); + combined = right; + break; + + default: + break; + } + + // 3. Perform the smi check of the operands. + STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. + __ test(combined, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smis, not_taken); + + // 4. Operands are both smis, perform the operation leaving the result in + // eax and check the result if necessary. + Comment perform_smi(masm, "-- Perform smi operation"); + Label use_fp_on_smis; + switch (op_) { + case Token::BIT_OR: + // Nothing to do. + break; + + case Token::BIT_XOR: + ASSERT(right.is(eax)); + __ xor_(right, Operand(left)); // Bitwise xor is commutative. + break; + + case Token::BIT_AND: + ASSERT(right.is(eax)); + __ and_(right, Operand(left)); // Bitwise and is commutative. + break; + + case Token::SHL: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ shl_cl(left); + // Check that the *signed* result fits in a smi. + __ cmp(left, 0xc0000000); + __ j(sign, &use_fp_on_smis, not_taken); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::SAR: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ sar_cl(left); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::SHR: + // Remove tags from operands (but keep sign). + __ SmiUntag(left); + __ SmiUntag(ecx); + // Perform the operation. + __ shr_cl(left); + // Check that the *unsigned* result fits in a smi. + // Neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging. + // - 0x40000000: this number would convert to negative when + // Smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi. + __ test(left, Immediate(0xc0000000)); + __ j(not_zero, slow, not_taken); + // Tag the result and store it in register eax. + __ SmiTag(left); + __ mov(eax, left); + break; + + case Token::ADD: + ASSERT(right.is(eax)); + __ add(right, Operand(left)); // Addition is commutative. + __ j(overflow, &use_fp_on_smis, not_taken); + break; + + case Token::SUB: + __ sub(left, Operand(right)); + __ j(overflow, &use_fp_on_smis, not_taken); + __ mov(eax, left); + break; + + case Token::MUL: + // If the smi tag is 0 we can just leave the tag on one operand. + STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. + // We can't revert the multiplication if the result is not a smi + // so save the right operand. + __ mov(ebx, right); + // Remove tag from one of the operands (but keep sign). + __ SmiUntag(right); + // Do multiplication. + __ imul(right, Operand(left)); // Multiplication is commutative. + __ j(overflow, &use_fp_on_smis, not_taken); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(right, combined, &use_fp_on_smis); + break; + + case Token::DIV: + // We can't revert the division if the result is not a smi so + // save the left operand. + __ mov(edi, left); + // Check for 0 divisor. + __ test(right, Operand(right)); + __ j(zero, &use_fp_on_smis, not_taken); + // Sign extend left into edx:eax. + ASSERT(left.is(eax)); + __ cdq(); + // Divide edx:eax by right. + __ idiv(right); + // Check for the corner case of dividing the most negative smi by + // -1. We cannot use the overflow flag, since it is not set by idiv + // instruction. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ cmp(eax, 0x40000000); + __ j(equal, &use_fp_on_smis); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(eax, combined, &use_fp_on_smis); + // Check that the remainder is zero. + __ test(edx, Operand(edx)); + __ j(not_zero, &use_fp_on_smis); + // Tag the result and store it in register eax. + __ SmiTag(eax); + break; + + case Token::MOD: + // Check for 0 divisor. + __ test(right, Operand(right)); + __ j(zero, ¬_smis, not_taken); + + // Sign extend left into edx:eax. + ASSERT(left.is(eax)); + __ cdq(); + // Divide edx:eax by right. + __ idiv(right); + // Check for negative zero result. Use combined = left | right. + __ NegativeZeroTest(edx, combined, slow); + // Move remainder to register eax. + __ mov(eax, edx); + break; + + default: + UNREACHABLE(); + } + + // 5. Emit return of result in eax. + GenerateReturn(masm); + + // 6. For some operations emit inline code to perform floating point + // operations on known smis (e.g., if the result of the operation + // overflowed the smi range). + switch (op_) { + case Token::SHL: { + Comment perform_float(masm, "-- Perform float operation on smis"); + __ bind(&use_fp_on_smis); + // Result we want is in left == edx, so we can put the allocated heap + // number in eax. + __ AllocateHeapNumber(eax, ecx, ebx, slow); + // Store the result in the HeapNumber and return. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(left)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + // It's OK to overwrite the right argument on the stack because we + // are about to return. + __ mov(Operand(esp, 1 * kPointerSize), left); + __ fild_s(Operand(esp, 1 * kPointerSize)); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + GenerateReturn(masm); + break; + } + + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + Comment perform_float(masm, "-- Perform float operation on smis"); + __ bind(&use_fp_on_smis); + // Restore arguments to edx, eax. + switch (op_) { + case Token::ADD: + // Revert right = right + left. + __ sub(right, Operand(left)); + break; + case Token::SUB: + // Revert left = left - right. + __ add(left, Operand(right)); + break; + case Token::MUL: + // Right was clobbered but a copy is in ebx. + __ mov(right, ebx); + break; + case Token::DIV: + // Left was clobbered but a copy is in edi. Right is in ebx for + // division. + __ mov(edx, edi); + __ mov(eax, right); + break; + default: UNREACHABLE(); + break; + } + __ AllocateHeapNumber(ecx, ebx, no_reg, slow); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + FloatingPointHelper::LoadSSE2Smis(masm, ebx); + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); + } else { // SSE2 not available, use FPU. + FloatingPointHelper::LoadFloatSmis(masm, ebx); + switch (op_) { + case Token::ADD: __ faddp(1); break; + case Token::SUB: __ fsubp(1); break; + case Token::MUL: __ fmulp(1); break; + case Token::DIV: __ fdivp(1); break; + default: UNREACHABLE(); + } + __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); + } + __ mov(eax, ecx); + GenerateReturn(masm); + break; + } + + default: + break; + } + + // 7. Non-smi operands, fall out to the non-smi code with the operands in + // edx and eax. + Comment done_comment(masm, "-- Enter non-smi code"); + __ bind(¬_smis); + switch (op_) { + case Token::BIT_OR: + case Token::SHL: + case Token::SAR: + case Token::SHR: + // Right operand is saved in ecx and eax was destroyed by the smi + // check. + __ mov(eax, ecx); + break; + + case Token::DIV: + case Token::MOD: + // Operands are in eax, ebx at this point. + __ mov(edx, eax); + __ mov(eax, ebx); + break; + + default: + break; + } +} + + +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { + Label call_runtime; + + __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); + + // Generate fast case smi code if requested. This flag is set when the fast + // case smi code is not generated by the caller. Generating it here will speed + // up common operations. + if (ShouldGenerateSmiCode()) { + GenerateSmiCode(masm, &call_runtime); + } else if (op_ != Token::MOD) { // MOD goes straight to runtime. + if (!HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + } + + // Floating point case. + if (ShouldGenerateFPCode()) { + switch (op_) { + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: { + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + HasSmiCodeInStub()) { + // Execution reaches this point when the first non-smi argument occurs + // (and only if smi code is generated). This is the right moment to + // patch to HEAP_NUMBERS state. The transition is attempted only for + // the four basic operations. The stub stays in the DEFAULT state + // forever for all other operations (also if smi code is skipped). + GenerateTypeTransition(masm); + break; + } + + Label not_floats; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx); + __ AbortIfNotNumber(eax); + } + if (static_operands_type_.IsSmi()) { + if (FLAG_debug_code) { + __ AbortIfNotSmi(edx); + __ AbortIfNotSmi(eax); + } + FloatingPointHelper::LoadSSE2Smis(masm, ecx); + } else { + FloatingPointHelper::LoadSSE2Operands(masm); + } + } else { + FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); + } + + switch (op_) { + case Token::ADD: __ addsd(xmm0, xmm1); break; + case Token::SUB: __ subsd(xmm0, xmm1); break; + case Token::MUL: __ mulsd(xmm0, xmm1); break; + case Token::DIV: __ divsd(xmm0, xmm1); break; + default: UNREACHABLE(); + } + GenerateHeapResultAllocation(masm, &call_runtime); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + GenerateReturn(masm); + } else { // SSE2 not available, use FPU. + if (static_operands_type_.IsNumber()) { + if (FLAG_debug_code) { + // Assert at runtime that inputs are only numbers. + __ AbortIfNotNumber(edx); + __ AbortIfNotNumber(eax); + } + } else { + FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); + } + FloatingPointHelper::LoadFloatOperands( + masm, + ecx, + FloatingPointHelper::ARGS_IN_REGISTERS); + switch (op_) { + case Token::ADD: __ faddp(1); break; + case Token::SUB: __ fsubp(1); break; + case Token::MUL: __ fmulp(1); break; + case Token::DIV: __ fdivp(1); break; + default: UNREACHABLE(); + } + Label after_alloc_failure; + GenerateHeapResultAllocation(masm, &after_alloc_failure); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + GenerateReturn(masm); + __ bind(&after_alloc_failure); + __ ffree(); + __ jmp(&call_runtime); + } + __ bind(¬_floats); + if (runtime_operands_type_ == BinaryOpIC::DEFAULT && + !HasSmiCodeInStub()) { + // Execution reaches this point when the first non-number argument + // occurs (and only if smi code is skipped from the stub, otherwise + // the patching has already been done earlier in this case branch). + // Try patching to STRINGS for ADD operation. + if (op_ == Token::ADD) { + GenerateTypeTransition(masm); + } + } + break; + } + case Token::MOD: { + // For MOD we go directly to runtime in the non-smi case. + break; + } + case Token::BIT_OR: + case Token::BIT_AND: + case Token::BIT_XOR: + case Token::SAR: + case Token::SHL: + case Token::SHR: { + Label non_smi_result; + FloatingPointHelper::LoadAsIntegers(masm, + static_operands_type_, + use_sse3_, + &call_runtime); + switch (op_) { + case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; + case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; + case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::SAR: __ sar_cl(eax); break; + case Token::SHL: __ shl_cl(eax); break; + case Token::SHR: __ shr_cl(eax); break; + default: UNREACHABLE(); + } + if (op_ == Token::SHR) { + // Check if result is non-negative and fits in a smi. + __ test(eax, Immediate(0xc0000000)); + __ j(not_zero, &call_runtime); + } else { + // Check if result fits in a smi. + __ cmp(eax, 0xc0000000); + __ j(negative, &non_smi_result); + } + // Tag smi result and return. + __ SmiTag(eax); + GenerateReturn(masm); + + // All ops except SHR return a signed int32 that we load in + // a HeapNumber. + if (op_ != Token::SHR) { + __ bind(&non_smi_result); + // Allocate a heap number if needed. + __ mov(ebx, Operand(eax)); // ebx: result + Label skip_allocation; + switch (mode_) { + case OVERWRITE_LEFT: + case OVERWRITE_RIGHT: + // If the operand was an object, we skip the + // allocation of a heap number. + __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? + 1 * kPointerSize : 2 * kPointerSize)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Fall through! + case NO_OVERWRITE: + __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } + // Store the result in the HeapNumber and return. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(ebx)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + __ mov(Operand(esp, 1 * kPointerSize), ebx); + __ fild_s(Operand(esp, 1 * kPointerSize)); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + GenerateReturn(masm); + } + break; + } + default: UNREACHABLE(); break; + } + } + + // If all else fails, use the runtime system to get the correct + // result. If arguments was passed in registers now place them on the + // stack in the correct order below the return address. + + // Avoid hitting the string ADD code below when allocation fails in + // the floating point code above. + if (op_ != Token::ADD) { + __ bind(&call_runtime); + } + + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + switch (op_) { + case Token::ADD: { + // Test for string arguments before calling runtime. + + // If this stub has already generated FP-specific code then the arguments + // are already in edx, eax + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { + GenerateLoadArguments(masm); + } + + // Registers containing left and right operands respectively. + Register lhs, rhs; + if (HasArgsReversed()) { + lhs = eax; + rhs = edx; + } else { + lhs = edx; + rhs = eax; + } + + // Test if left operand is a string. + Label lhs_not_string; + __ test(lhs, Immediate(kSmiTagMask)); + __ j(zero, &lhs_not_string); + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); + __ j(above_equal, &lhs_not_string); + + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + __ TailCallStub(&string_add_left_stub); + + Label call_runtime_with_args; + // Left operand is not a string, test right. + __ bind(&lhs_not_string); + __ test(rhs, Immediate(kSmiTagMask)); + __ j(zero, &call_runtime_with_args); + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); + __ j(above_equal, &call_runtime_with_args); + + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); + __ TailCallStub(&string_add_right_stub); + + // Neither argument is a string. + __ bind(&call_runtime); + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + __ bind(&call_runtime_with_args); + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + } + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure) { + Label skip_allocation; + OverwriteMode mode = mode_; + if (HasArgsReversed()) { + if (mode == OVERWRITE_RIGHT) { + mode = OVERWRITE_LEFT; + } else if (mode == OVERWRITE_LEFT) { + mode = OVERWRITE_RIGHT; + } + } + switch (mode) { + case OVERWRITE_LEFT: { + // If the argument in edx is already an object, we skip the + // allocation of a heap number. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Allocate a heap number for the result. Keep eax and edx intact + // for the possible runtime call. + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); + // Now edx can be overwritten losing one of the arguments as we are + // now done and will not need it any more. + __ mov(edx, Operand(ebx)); + __ bind(&skip_allocation); + // Use object in edx as a result holder + __ mov(eax, Operand(edx)); + break; + } + case OVERWRITE_RIGHT: + // If the argument in eax is already an object, we skip the + // allocation of a heap number. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &skip_allocation, not_taken); + // Fall through! + case NO_OVERWRITE: + // Allocate a heap number for the result. Keep eax and edx intact + // for the possible runtime call. + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); + // Now eax can be overwritten losing one of the arguments as we are + // now done and will not need it any more. + __ mov(eax, ebx); + __ bind(&skip_allocation); + break; + default: UNREACHABLE(); + } +} + + +void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { + // If arguments are not passed in registers read them from the stack. + ASSERT(!HasArgsInRegisters()); + __ mov(eax, Operand(esp, 1 * kPointerSize)); + __ mov(edx, Operand(esp, 2 * kPointerSize)); +} + + +void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { + // If arguments are not passed in registers remove them from the stack before + // returning. + if (!HasArgsInRegisters()) { + __ ret(2 * kPointerSize); // Remove both operands + } else { + __ ret(0); + } +} + + +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { + ASSERT(HasArgsInRegisters()); + __ pop(ecx); + if (HasArgsReversed()) { + __ push(eax); + __ push(edx); + } else { + __ push(edx); + __ push(eax); + } + __ push(ecx); +} + + +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { + // Ensure the operands are on the stack. + if (HasArgsInRegisters()) { + GenerateRegisterArgsPush(masm); + } + + __ pop(ecx); // Save return address. + + // Left and right arguments are now on top. + // Push this stub's key. Although the operation and the type info are + // encoded into the key, the encoding is opaque, so push them too. + __ push(Immediate(Smi::FromInt(MinorKey()))); + __ push(Immediate(Smi::FromInt(op_))); + __ push(Immediate(Smi::FromInt(runtime_operands_type_))); + + __ push(ecx); // Push return address. + + // Patch the caller to an appropriate specialized stub and return the + // operation result to the caller of the stub. + __ TailCallExternalReference( + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), + 5, + 1); +} + + +Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + GenericBinaryOpStub stub(key, type_info); + return stub.GetCode(); +} + + +void TranscendentalCacheStub::Generate(MacroAssembler* masm) { + // Input on stack: + // esp[4]: argument (should be number). + // esp[0]: return address. + // Test that eax is a number. + Label runtime_call; + Label runtime_call_clear_stack; + Label input_not_smi; + Label loaded; + __ mov(eax, Operand(esp, kPointerSize)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &input_not_smi); + // Input is a smi. Untag and load it onto the FPU stack. + // Then load the low and high words of the double into ebx, edx. + STATIC_ASSERT(kSmiTagSize == 1); + __ sar(eax, 1); + __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ mov(Operand(esp, 0), eax); + __ fild_s(Operand(esp, 0)); + __ fst_d(Operand(esp, 0)); + __ pop(edx); + __ pop(ebx); + __ jmp(&loaded); + __ bind(&input_not_smi); + // Check if input is a HeapNumber. + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); + __ j(not_equal, &runtime_call); + // Input is a HeapNumber. Push it on the FPU stack and load its + // low and high words into ebx, edx. + __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); + __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); + + __ bind(&loaded); + // ST[0] == double value + // ebx = low 32 bits of double value + // edx = high 32 bits of double value + // Compute hash (the shifts are arithmetic): + // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); + __ mov(ecx, ebx); + __ xor_(ecx, Operand(edx)); + __ mov(eax, ecx); + __ sar(eax, 16); + __ xor_(ecx, Operand(eax)); + __ mov(eax, ecx); + __ sar(eax, 8); + __ xor_(ecx, Operand(eax)); + ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); + __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); + + // ST[0] == double value. + // ebx = low 32 bits of double value. + // edx = high 32 bits of double value. + // ecx = TranscendentalCache::hash(double value). + __ mov(eax, + Immediate(ExternalReference::transcendental_cache_array_address())); + // Eax points to cache array. + __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); + // Eax points to the cache for the type type_. + // If NULL, the cache hasn't been initialized yet, so go through runtime. + __ test(eax, Operand(eax)); + __ j(zero, &runtime_call_clear_stack); +#ifdef DEBUG + // Check that the layout of cache elements match expectations. + { TranscendentalCache::Element test_elem[2]; + char* elem_start = reinterpret_cast(&test_elem[0]); + char* elem2_start = reinterpret_cast(&test_elem[1]); + char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); + char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); + char* elem_out = reinterpret_cast(&(test_elem[0].output)); + CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. + CHECK_EQ(0, elem_in0 - elem_start); + CHECK_EQ(kIntSize, elem_in1 - elem_start); + CHECK_EQ(2 * kIntSize, elem_out - elem_start); + } +#endif + // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. + __ lea(ecx, Operand(ecx, ecx, times_2, 0)); + __ lea(ecx, Operand(eax, ecx, times_4, 0)); + // Check if cache matches: Double value is stored in uint32_t[2] array. + Label cache_miss; + __ cmp(ebx, Operand(ecx, 0)); + __ j(not_equal, &cache_miss); + __ cmp(edx, Operand(ecx, kIntSize)); + __ j(not_equal, &cache_miss); + // Cache hit! + __ mov(eax, Operand(ecx, 2 * kIntSize)); + __ fstp(0); + __ ret(kPointerSize); + + __ bind(&cache_miss); + // Update cache with new value. + // We are short on registers, so use no_reg as scratch. + // This gives slightly larger code. + __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); + GenerateOperation(masm); + __ mov(Operand(ecx, 0), ebx); + __ mov(Operand(ecx, kIntSize), edx); + __ mov(Operand(ecx, 2 * kIntSize), eax); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + __ ret(kPointerSize); + + __ bind(&runtime_call_clear_stack); + __ fstp(0); + __ bind(&runtime_call); + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); +} + + +Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { + switch (type_) { + // Add more cases when necessary. + case TranscendentalCache::SIN: return Runtime::kMath_sin; + case TranscendentalCache::COS: return Runtime::kMath_cos; + default: + UNIMPLEMENTED(); + return Runtime::kAbort; + } +} + + +void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { + // Only free register is edi. + Label done; + ASSERT(type_ == TranscendentalCache::SIN || + type_ == TranscendentalCache::COS); + // More transcendental types can be added later. + + // Both fsin and fcos require arguments in the range +/-2^63 and + // return NaN for infinities and NaN. They can share all code except + // the actual fsin/fcos operation. + Label in_range; + // If argument is outside the range -2^63..2^63, fsin/cos doesn't + // work. We must reduce it to the appropriate range. + __ mov(edi, edx); + __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. + int supported_exponent_limit = + (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; + __ cmp(Operand(edi), Immediate(supported_exponent_limit)); + __ j(below, &in_range, taken); + // Check for infinity and NaN. Both return NaN for sin. + __ cmp(Operand(edi), Immediate(0x7ff00000)); + Label non_nan_result; + __ j(not_equal, &non_nan_result, taken); + // Input is +/-Infinity or NaN. Result is NaN. + __ fstp(0); + // NaN is represented by 0x7ff8000000000000. + __ push(Immediate(0x7ff80000)); + __ push(Immediate(0)); + __ fld_d(Operand(esp, 0)); + __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ jmp(&done); + + __ bind(&non_nan_result); + + // Use fpmod to restrict argument to the range +/-2*PI. + __ mov(edi, eax); // Save eax before using fnstsw_ax. + __ fldpi(); + __ fadd(0); + __ fld(1); + // FPU Stack: input, 2*pi, input. + { + Label no_exceptions; + __ fwait(); + __ fnstsw_ax(); + // Clear if Illegal Operand or Zero Division exceptions are set. + __ test(Operand(eax), Immediate(5)); + __ j(zero, &no_exceptions); + __ fnclex(); + __ bind(&no_exceptions); + } + + // Compute st(0) % st(1) + { + Label partial_remainder_loop; + __ bind(&partial_remainder_loop); + __ fprem1(); + __ fwait(); + __ fnstsw_ax(); + __ test(Operand(eax), Immediate(0x400 /* C2 */)); + // If C2 is set, computation only has partial result. Loop to + // continue computation. + __ j(not_zero, &partial_remainder_loop); + } + // FPU Stack: input, 2*pi, input % 2*pi + __ fstp(2); + __ fstp(0); + __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). + + // FPU Stack: input % 2*pi + __ bind(&in_range); + switch (type_) { + case TranscendentalCache::SIN: + __ fsin(); + break; + case TranscendentalCache::COS: + __ fcos(); + break; + default: + UNREACHABLE(); + } + __ bind(&done); +} + + +// Get the integer part of a heap number. Surprisingly, all this bit twiddling +// is faster than using the built-in instructions on floating point registers. +// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the +// trashed registers. +void IntegerConvert(MacroAssembler* masm, + Register source, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); + Label done, right_exponent, normal_exponent; + Register scratch = ebx; + Register scratch2 = edi; + if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) { + CpuFeatures::Scope scope(SSE2); + __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); + return; + } + if (!type_info.IsInteger32() || !use_sse3) { + // Get exponent word. + __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); + // Get exponent alone in scratch2. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kExponentMask); + } + if (use_sse3) { + CpuFeatures::Scope scope(SSE3); + if (!type_info.IsInteger32()) { + // Check whether the exponent is too big for a 64 bit signed integer. + static const uint32_t kTooBigExponent = + (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); + __ j(greater_equal, conversion_failure); + } + // Load x87 register with heap number. + __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); + // Reserve space for 64 bit answer. + __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + // Do conversion, which cannot fail because we checked the exponent. + __ fisttp_d(Operand(esp, 0)); + __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. + __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + } else { + // Load ecx with zero. We use this either for the final shift or + // for the answer. + __ xor_(ecx, Operand(ecx)); + // Check whether the exponent matches a 32 bit signed int that cannot be + // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the + // exponent is 30 (biased). This is the exponent that we are fastest at and + // also the highest exponent we can handle here. + const uint32_t non_smi_exponent = + (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); + // If we have a match of the int32-but-not-Smi exponent then skip some + // logic. + __ j(equal, &right_exponent); + // If the exponent is higher than that then go to slow case. This catches + // numbers that don't fit in a signed int32, infinities and NaNs. + __ j(less, &normal_exponent); + + { + // Handle a big exponent. The only reason we have this code is that the + // >>> operator has a tendency to generate numbers with an exponent of 31. + const uint32_t big_non_smi_exponent = + (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; + __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); + __ j(not_equal, conversion_failure); + // We have the big exponent, typically from >>>. This means the number is + // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. + __ mov(scratch2, scratch); + __ and_(scratch2, HeapNumber::kMantissaMask); + // Put back the implicit 1. + __ or_(scratch2, 1 << HeapNumber::kExponentShift); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We just orred in the implicit bit so that took care of one and + // we want to use the full unsigned range so we subtract 1 bit from the + // shift distance. + const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; + __ shl(scratch2, big_shift_distance); + // Get the second half of the double. + __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 21 bits to get the most significant 11 bits or the low + // mantissa word. + __ shr(ecx, 32 - big_shift_distance); + __ or_(ecx, Operand(scratch2)); + // We have the answer in ecx, but we may need to negate it. + __ test(scratch, Operand(scratch)); + __ j(positive, &done); + __ neg(ecx); + __ jmp(&done); + } + + __ bind(&normal_exponent); + // Exponent word in scratch, exponent part of exponent word in scratch2. + // Zero in ecx. + // We know the exponent is smaller than 30 (biased). If it is less than + // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie + // it rounds to zero. + const uint32_t zero_exponent = + (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; + __ sub(Operand(scratch2), Immediate(zero_exponent)); + // ecx already has a Smi zero. + __ j(less, &done); + + // We have a shifted exponent between 0 and 30 in scratch2. + __ shr(scratch2, HeapNumber::kExponentShift); + __ mov(ecx, Immediate(30)); + __ sub(ecx, Operand(scratch2)); + + __ bind(&right_exponent); + // Here ecx is the shift, scratch is the exponent word. + // Get the top bits of the mantissa. + __ and_(scratch, HeapNumber::kMantissaMask); + // Put back the implicit 1. + __ or_(scratch, 1 << HeapNumber::kExponentShift); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We have kExponentShift + 1 significant bits int he low end of the + // word. Shift them to the top bits. + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ shl(scratch, shift_distance); + // Get the second half of the double. For some exponents we don't + // actually need this because the bits get shifted out again, but + // it's probably slower to test than just to do it. + __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 22 bits to get the most significant 10 bits or the low + // mantissa word. + __ shr(scratch2, 32 - shift_distance); + __ or_(scratch2, Operand(scratch)); + // Move down according to the exponent. + __ shr_cl(scratch2); + // Now the unsigned answer is in scratch2. We need to move it to ecx and + // we may need to fix the sign. + Label negative; + __ xor_(ecx, Operand(ecx)); + __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); + __ j(greater, &negative); + __ mov(ecx, scratch2); + __ jmp(&done); + __ bind(&negative); + __ sub(ecx, Operand(scratch2)); + __ bind(&done); + } +} + + +// Input: edx, eax are the left and right objects of a bit op. +// Output: eax, ecx are left and right integers for a bit op. +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + if (!type_info.IsDouble()) { + if (!type_info.IsSmi()) { + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &arg1_is_object); + } else { + if (FLAG_debug_code) __ AbortIfNotSmi(edx); + } + __ SmiUntag(edx); + __ jmp(&load_arg2); + } + + __ bind(&arg1_is_object); + + // Get the untagged integer version of the edx heap number in ecx. + IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); + __ mov(edx, ecx); + + // Here edx has the untagged integer, eax has a Smi or a heap number. + __ bind(&load_arg2); + if (!type_info.IsDouble()) { + // Test if arg2 is a Smi. + if (!type_info.IsSmi()) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &arg2_is_object); + } else { + if (FLAG_debug_code) __ AbortIfNotSmi(eax); + } + __ SmiUntag(eax); + __ mov(ecx, eax); + __ jmp(&done); + } + + __ bind(&arg2_is_object); + + // Get the untagged integer version of the eax heap number in ecx. + IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); + __ bind(&done); + __ mov(eax, edx); +} + + +// Input: edx, eax are the left and right objects of a bit op. +// Output: eax, ecx are left and right integers for a bit op. +void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, + bool use_sse3, + Label* conversion_failure) { + // Check float operands. + Label arg1_is_object, check_undefined_arg1; + Label arg2_is_object, check_undefined_arg2; + Label load_arg2, done; + + // Test if arg1 is a Smi. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &arg1_is_object); + + __ SmiUntag(edx); + __ jmp(&load_arg2); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg1); + __ cmp(edx, Factory::undefined_value()); + __ j(not_equal, conversion_failure); + __ mov(edx, Immediate(0)); + __ jmp(&load_arg2); + + __ bind(&arg1_is_object); + __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(ebx, Factory::heap_number_map()); + __ j(not_equal, &check_undefined_arg1); + + // Get the untagged integer version of the edx heap number in ecx. + IntegerConvert(masm, + edx, + TypeInfo::Unknown(), + use_sse3, + conversion_failure); + __ mov(edx, ecx); + + // Here edx has the untagged integer, eax has a Smi or a heap number. + __ bind(&load_arg2); + + // Test if arg2 is a Smi. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &arg2_is_object); + + __ SmiUntag(eax); + __ mov(ecx, eax); + __ jmp(&done); + + // If the argument is undefined it converts to zero (ECMA-262, section 9.5). + __ bind(&check_undefined_arg2); + __ cmp(eax, Factory::undefined_value()); + __ j(not_equal, conversion_failure); + __ mov(ecx, Immediate(0)); + __ jmp(&done); + + __ bind(&arg2_is_object); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(ebx, Factory::heap_number_map()); + __ j(not_equal, &check_undefined_arg2); + + // Get the untagged integer version of the eax heap number in ecx. + IntegerConvert(masm, + eax, + TypeInfo::Unknown(), + use_sse3, + conversion_failure); + __ bind(&done); + __ mov(eax, edx); +} + + +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, + TypeInfo type_info, + bool use_sse3, + Label* conversion_failure) { + if (type_info.IsNumber()) { + LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); + } else { + LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); + } +} + + +void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, + Register number) { + Label load_smi, done; + + __ test(number, Immediate(kSmiTagMask)); + __ j(zero, &load_smi, not_taken); + __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi); + __ SmiUntag(number); + __ push(number); + __ fild_s(Operand(esp, 0)); + __ pop(number); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { + Label load_smi_edx, load_eax, load_smi_eax, done; + // Load operand in edx into xmm0. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. + __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + + __ bind(&load_eax); + // Load operand in eax into xmm1. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. + __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_edx); + __ SmiUntag(edx); // Untag smi before converting to float. + __ cvtsi2sd(xmm0, Operand(edx)); + __ SmiTag(edx); // Retag smi for heap number overwriting test. + __ jmp(&load_eax); + + __ bind(&load_smi_eax); + __ SmiUntag(eax); // Untag smi before converting to float. + __ cvtsi2sd(xmm1, Operand(eax)); + __ SmiTag(eax); // Retag smi for heap number overwriting test. + + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, + Label* not_numbers) { + Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; + // Load operand in edx into xmm0, or branch to not_numbers. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(not_equal, not_numbers); // Argument in edx is not a number. + __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ bind(&load_eax); + // Load operand in eax into xmm1, or branch to not_numbers. + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(equal, &load_float_eax); + __ jmp(not_numbers); // Argument in eax is not a number. + __ bind(&load_smi_edx); + __ SmiUntag(edx); // Untag smi before converting to float. + __ cvtsi2sd(xmm0, Operand(edx)); + __ SmiTag(edx); // Retag smi for heap number overwriting test. + __ jmp(&load_eax); + __ bind(&load_smi_eax); + __ SmiUntag(eax); // Untag smi before converting to float. + __ cvtsi2sd(xmm1, Operand(eax)); + __ SmiTag(eax); // Retag smi for heap number overwriting test. + __ jmp(&done); + __ bind(&load_float_eax); + __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ bind(&done); +} + + +void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, + Register scratch) { + const Register left = edx; + const Register right = eax; + __ mov(scratch, left); + ASSERT(!scratch.is(right)); // We're about to clobber scratch. + __ SmiUntag(scratch); + __ cvtsi2sd(xmm0, Operand(scratch)); + + __ mov(scratch, right); + __ SmiUntag(scratch); + __ cvtsi2sd(xmm1, Operand(scratch)); +} + + +void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, + Register scratch, + ArgLocation arg_location) { + Label load_smi_1, load_smi_2, done_load_1, done; + if (arg_location == ARGS_IN_REGISTERS) { + __ mov(scratch, edx); + } else { + __ mov(scratch, Operand(esp, 2 * kPointerSize)); + } + __ test(scratch, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_1, not_taken); + __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); + __ bind(&done_load_1); + + if (arg_location == ARGS_IN_REGISTERS) { + __ mov(scratch, eax); + } else { + __ mov(scratch, Operand(esp, 1 * kPointerSize)); + } + __ test(scratch, Immediate(kSmiTagMask)); + __ j(zero, &load_smi_2, not_taken); + __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&load_smi_1); + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); + __ jmp(&done_load_1); + + __ bind(&load_smi_2); + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); + + __ bind(&done); +} + + +void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, + Register scratch) { + const Register left = edx; + const Register right = eax; + __ mov(scratch, left); + ASSERT(!scratch.is(right)); // We're about to clobber scratch. + __ SmiUntag(scratch); + __ push(scratch); + __ fild_s(Operand(esp, 0)); + + __ mov(scratch, right); + __ SmiUntag(scratch); + __ mov(Operand(esp, 0), scratch); + __ fild_s(Operand(esp, 0)); + __ pop(scratch); +} + + +void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, + Label* non_float, + Register scratch) { + Label test_other, done; + // Test if both operands are floats or smi -> scratch=k_is_float; + // Otherwise scratch = k_not_float. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &test_other, not_taken); // argument in edx is OK + __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); + __ cmp(scratch, Factory::heap_number_map()); + __ j(not_equal, non_float); // argument in edx is not a number -> NaN + + __ bind(&test_other); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); // argument in eax is OK + __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(scratch, Factory::heap_number_map()); + __ j(not_equal, non_float); // argument in eax is not a number -> NaN + + // Fall-through: Both operands are numbers. + __ bind(&done); +} + + +void GenericUnaryOpStub::Generate(MacroAssembler* masm) { + Label slow, done; + + if (op_ == Token::SUB) { + // Check whether the value is a smi. + Label try_float; + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &try_float, not_taken); + + if (negative_zero_ == kStrictNegativeZero) { + // Go slow case if the value of the expression is zero + // to make sure that we switch between 0 and -0. + __ test(eax, Operand(eax)); + __ j(zero, &slow, not_taken); + } + + // The value of the expression is a smi that is not zero. Try + // optimistic subtraction '0 - value'. + Label undo; + __ mov(edx, Operand(eax)); + __ Set(eax, Immediate(0)); + __ sub(eax, Operand(edx)); + __ j(no_overflow, &done, taken); + + // Restore eax and go slow case. + __ bind(&undo); + __ mov(eax, Operand(edx)); + __ jmp(&slow); + + // Try floating point case. + __ bind(&try_float); + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &slow); + if (overwrite_ == UNARY_OVERWRITE) { + __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); + __ xor_(edx, HeapNumber::kSignMask); // Flip sign. + __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); + } else { + __ mov(edx, Operand(eax)); + // edx: operand + __ AllocateHeapNumber(eax, ebx, ecx, &undo); + // eax: allocated 'empty' number + __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. + __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); + __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); + __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); + } + } else if (op_ == Token::BIT_NOT) { + // Check if the operand is a heap number. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ cmp(edx, Factory::heap_number_map()); + __ j(not_equal, &slow, not_taken); + + // Convert the heap number in eax to an untagged integer in ecx. + IntegerConvert(masm, + eax, + TypeInfo::Unknown(), + CpuFeatures::IsSupported(SSE3), + &slow); + + // Do the bitwise operation and check if the result fits in a smi. + Label try_float; + __ not_(ecx); + __ cmp(ecx, 0xc0000000); + __ j(sign, &try_float, not_taken); + + // Tag the result as a smi and we're done. + STATIC_ASSERT(kSmiTagSize == 1); + __ lea(eax, Operand(ecx, times_2, kSmiTag)); + __ jmp(&done); + + // Try to store the result in a heap number. + __ bind(&try_float); + if (overwrite_ == UNARY_NO_OVERWRITE) { + // Allocate a fresh heap number, but don't overwrite eax until + // we're sure we can do it without going through the slow case + // that needs the value in eax. + __ AllocateHeapNumber(ebx, edx, edi, &slow); + __ mov(eax, Operand(ebx)); + } + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + __ cvtsi2sd(xmm0, Operand(ecx)); + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + } else { + __ push(ecx); + __ fild_s(Operand(esp, 0)); + __ pop(ecx); + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); + } + } else { + UNIMPLEMENTED(); + } + + // Return from the stub. + __ bind(&done); + __ StubReturn(1); + + // Handle the slow case by jumping to the JavaScript builtin. + __ bind(&slow); + __ pop(ecx); // pop return address. + __ push(eax); + __ push(ecx); // push return address + switch (op_) { + case Token::SUB: + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + break; + case Token::BIT_NOT: + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { + // The key is in edx and the parameter count is in eax. + + // The displacement is used for skipping the frame pointer on the + // stack. It is the offset of the last parameter (if any) relative + // to the frame pointer. + static const int kDisplacement = 1 * kPointerSize; + + // Check that the key is a smi. + Label slow; + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow, not_taken); + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor; + __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor); + + // Check index against formal parameters count limit passed in + // through register eax. Use unsigned comparison to get negative + // check for free. + __ cmp(edx, Operand(eax)); + __ j(above_equal, &slow, not_taken); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebp, eax, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Arguments adaptor case: Check index against actual arguments + // limit found in the arguments adaptor frame. Use unsigned + // comparison to get negative check for free. + __ bind(&adaptor); + __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ cmp(edx, Operand(ecx)); + __ j(above_equal, &slow, not_taken); + + // Read the argument from the stack and return it. + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. + __ lea(ebx, Operand(ebx, ecx, times_2, 0)); + __ neg(edx); + __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); + __ ret(0); + + // Slow-case: Handle non-smi or out-of-bounds access to arguments + // by calling the runtime system. + __ bind(&slow); + __ pop(ebx); // Return address. + __ push(edx); + __ push(ebx); + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); +} + + +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { + // esp[0] : return address + // esp[4] : number of parameters + // esp[8] : receiver displacement + // esp[16] : function + + // The displacement is used for skipping the return address and the + // frame pointer on the stack. It is the offset of the last + // parameter (if any) relative to the frame pointer. + static const int kDisplacement = 2 * kPointerSize; + + // Check if the calling frame is an arguments adaptor frame. + Label adaptor_frame, try_allocate, runtime; + __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); + __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ j(equal, &adaptor_frame); + + // Get the length from the frame. + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ jmp(&try_allocate); + + // Patch the arguments.length and the parameters pointer. + __ bind(&adaptor_frame); + __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); + __ mov(Operand(esp, 1 * kPointerSize), ecx); + __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); + __ mov(Operand(esp, 2 * kPointerSize), edx); + + // Try the new space allocation. Start out with computing the size of + // the arguments object and the elements array. + Label add_arguments_object; + __ bind(&try_allocate); + __ test(ecx, Operand(ecx)); + __ j(zero, &add_arguments_object); + __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); + __ bind(&add_arguments_object); + __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize)); + + // Do the allocation of both objects in one go. + __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); + + // Get the arguments boilerplate from the current (global) context. + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); + __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); + __ mov(edi, Operand(edi, offset)); + + // Copy the JS object part. + for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { + __ mov(ebx, FieldOperand(edi, i)); + __ mov(FieldOperand(eax, i), ebx); + } + + // Setup the callee in-object property. + STATIC_ASSERT(Heap::arguments_callee_index == 0); + __ mov(ebx, Operand(esp, 3 * kPointerSize)); + __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx); + + // Get the length (smi tagged) and set that as an in-object property too. + STATIC_ASSERT(Heap::arguments_length_index == 1); + __ mov(ecx, Operand(esp, 1 * kPointerSize)); + __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx); + + // If there are no actual arguments, we're done. + Label done; + __ test(ecx, Operand(ecx)); + __ j(zero, &done); + + // Get the parameters pointer from the stack. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + + // Setup the elements pointer in the allocated arguments object and + // initialize the header in the elements fixed array. + __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); + __ mov(FieldOperand(edi, FixedArray::kMapOffset), + Immediate(Factory::fixed_array_map())); + __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); + // Untag the length for the loop below. + __ SmiUntag(ecx); + + // Copy the fixed array slots. + Label loop; + __ bind(&loop); + __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. + __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); + __ add(Operand(edi), Immediate(kPointerSize)); + __ sub(Operand(edx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &loop); + + // Return and remove the on-stack parameters. + __ bind(&done); + __ ret(3 * kPointerSize); + + // Do the runtime call to allocate the arguments object. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); +} + + +void RegExpExecStub::Generate(MacroAssembler* masm) { + // Just jump directly to runtime if native RegExp is not selected at compile + // time or if regexp entry in generated code is turned off runtime switch or + // at compilation. +#ifdef V8_INTERPRETED_REGEXP + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#else // V8_INTERPRETED_REGEXP + if (!FLAG_regexp_entry_native) { + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + return; + } + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: last_match_info (expected JSArray) + // esp[8]: previous index + // esp[12]: subject string + // esp[16]: JSRegExp object + + static const int kLastMatchInfoOffset = 1 * kPointerSize; + static const int kPreviousIndexOffset = 2 * kPointerSize; + static const int kSubjectOffset = 3 * kPointerSize; + static const int kJSRegExpOffset = 4 * kPointerSize; + + Label runtime, invoke_regexp; + + // Ensure that a RegExp stack is allocated. + ExternalReference address_of_regexp_stack_memory_address = + ExternalReference::address_of_regexp_stack_memory_address(); + ExternalReference address_of_regexp_stack_memory_size = + ExternalReference::address_of_regexp_stack_memory_size(); + __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ test(ebx, Operand(ebx)); + __ j(zero, &runtime, not_taken); + + // Check that the first argument is a JSRegExp object. + __ mov(eax, Operand(esp, kJSRegExpOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); + __ j(not_equal, &runtime); + // Check that the RegExp has been compiled (data contains a fixed array). + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + if (FLAG_debug_code) { + __ test(ecx, Immediate(kSmiTagMask)); + __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected"); + __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); + __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); + } + + // ecx: RegExp data (FixedArray) + // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. + __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); + __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ j(not_equal, &runtime); + + // ecx: RegExp data (FixedArray) + // Check that the number of captures fit in the static offsets vector buffer. + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. This + // uses the asumption that smis are 2 * their untagged value. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(Operand(edx), Immediate(2)); // edx was a smi. + // Check that the static offsets vector buffer is large enough. + __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); + __ j(above, &runtime); + + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the second argument is a string. + __ mov(eax, Operand(esp, kSubjectOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); + __ j(NegateCondition(is_string), &runtime); + // Get the length of the string to ebx. + __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); + + // ebx: Length of subject string as a smi + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the third argument is a positive smi less than the subject + // string length. A negative value will be greater (unsigned comparison). + __ mov(eax, Operand(esp, kPreviousIndexOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ cmp(eax, Operand(ebx)); + __ j(above_equal, &runtime); + + // ecx: RegExp data (FixedArray) + // edx: Number of capture registers + // Check that the fourth object is a JSArray object. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. + __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(eax, Factory::fixed_array_map()); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ SmiUntag(eax); + __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, Operand(eax)); + __ j(greater, &runtime); + + // ecx: RegExp data (FixedArray) + // Check the representation and encoding of the subject string. + Label seq_ascii_string, seq_two_byte_string, check_code; + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + // First check for flat two byte string. + __ and_(ebx, + kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); + STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be a flat ascii string. + __ test(Operand(ebx), + Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ j(zero, &seq_ascii_string); + + // Check for flat cons string. + // A flat cons string is a cons string where the second part is the empty + // string. In that case the subject string is just the first part of the cons + // string. Also in this case the first part of the cons string is known to be + // a sequential string or an external string. + STATIC_ASSERT(kExternalStringTag != 0); + STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); + __ test(Operand(ebx), + Immediate(kIsNotStringMask | kExternalStringTag)); + __ j(not_zero, &runtime); + // String is a cons string. + __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset)); + __ cmp(Operand(edx), Factory::empty_string()); + __ j(not_equal, &runtime); + __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + // String is a cons string with empty second part. + // eax: first part of cons string. + // ebx: map of first part of cons string. + // Is first part a flat two byte string? + __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), + kStringRepresentationMask | kStringEncodingMask); + STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); + __ j(zero, &seq_two_byte_string); + // Any other flat string must be ascii. + __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), + kStringRepresentationMask); + __ j(not_zero, &runtime); + + __ bind(&seq_ascii_string); + // eax: subject string (flat ascii) + // ecx: RegExp data (FixedArray) + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); + __ Set(edi, Immediate(1)); // Type is ascii. + __ jmp(&check_code); + + __ bind(&seq_two_byte_string); + // eax: subject string (flat two byte) + // ecx: RegExp data (FixedArray) + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); + __ Set(edi, Immediate(0)); // Type is two byte. + + __ bind(&check_code); + // Check that the irregexp code has been generated for the actual string + // encoding. If it has, the field contains a code object otherwise it contains + // the hole. + __ CmpObjectType(edx, CODE_TYPE, ebx); + __ j(not_equal, &runtime); + + // eax: subject string + // edx: code + // edi: encoding of subject string (1 if ascii, 0 if two_byte); + // Load used arguments before starting to push arguments for call to native + // RegExp code to avoid handling changing stack height. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ SmiUntag(ebx); // Previous index from smi. + + // eax: subject string + // ebx: previous index + // edx: code + // edi: encoding of subject string (1 if ascii 0 if two_byte); + // All checks done. Now push arguments for native regexp code. + __ IncrementCounter(&Counters::regexp_entry_native, 1); + + static const int kRegExpExecuteArguments = 7; + __ PrepareCallCFunction(kRegExpExecuteArguments, ecx); + + // Argument 7: Indicate that this is a direct call from JavaScript. + __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); + + // Argument 6: Start (high end) of backtracking stack memory area. + __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address)); + __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); + __ mov(Operand(esp, 5 * kPointerSize), ecx); + + // Argument 5: static offsets vector buffer. + __ mov(Operand(esp, 4 * kPointerSize), + Immediate(ExternalReference::address_of_static_offsets_vector())); + + // Argument 4: End of string data + // Argument 3: Start of string data + Label setup_two_byte, setup_rest; + __ test(edi, Operand(edi)); + __ mov(edi, FieldOperand(eax, String::kLengthOffset)); + __ j(zero, &setup_two_byte); + __ SmiUntag(edi); + __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + __ jmp(&setup_rest); + + __ bind(&setup_two_byte); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2). + __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. + __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. + + __ bind(&setup_rest); + + // Argument 2: Previous index. + __ mov(Operand(esp, 1 * kPointerSize), ebx); + + // Argument 1: Subject string. + __ mov(Operand(esp, 0 * kPointerSize), eax); + + // Locate the code entry and call it. + __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(edx, kRegExpExecuteArguments); + + // Check the result. + Label success; + __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); + __ j(equal, &success, taken); + Label failure; + __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); + __ j(equal, &failure, taken); + __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); + // If not exception it can only be retry. Handle that in the runtime system. + __ j(not_equal, &runtime); + // Result must now be exception. If there is no pending exception already a + // stack overflow (on the backtrack stack) was detected in RegExp code but + // haven't created the exception yet. Handle that in the runtime system. + // TODO(592): Rerunning the RegExp to get the stack overflow exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(eax, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ cmp(eax, Operand::StaticVariable(pending_exception)); + __ j(equal, &runtime); + __ bind(&failure); + // For failure and exception return null. + __ mov(Operand(eax), Factory::null_value()); + __ ret(4 * kPointerSize); + + // Load RegExp data. + __ bind(&success); + __ mov(eax, Operand(esp, kJSRegExpOffset)); + __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); + __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); + // Calculate number of capture registers (number_of_captures + 1) * 2. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(Operand(edx), Immediate(2)); // edx was a smi. + + // edx: Number of capture registers + // Load last_match_info which is still known to be a fast case JSArray. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + + // ebx: last_match_info backing store (FixedArray) + // edx: number of capture registers + // Store the capture count. + __ SmiTag(edx); // Number of capture registers to smi. + __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); + __ SmiUntag(edx); // Number of capture registers back from smi. + // Store last subject and last input. + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); + __ mov(ecx, ebx); + __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); + __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); + __ mov(ecx, ebx); + __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); + + // Get the static offsets vector filled by the native regexp code. + ExternalReference address_of_static_offsets_vector = + ExternalReference::address_of_static_offsets_vector(); + __ mov(ecx, Immediate(address_of_static_offsets_vector)); + + // ebx: last_match_info backing store (FixedArray) + // ecx: offsets vector + // edx: number of capture registers + Label next_capture, done; + // Capture register counter starts from number of capture registers and + // counts down until wraping after zero. + __ bind(&next_capture); + __ sub(Operand(edx), Immediate(1)); + __ j(negative, &done); + // Read the value from the static offsets vector buffer. + __ mov(edi, Operand(ecx, edx, times_int_size, 0)); + __ SmiTag(edi); + // Store the smi value in the last match info. + __ mov(FieldOperand(ebx, + edx, + times_pointer_size, + RegExpImpl::kFirstCaptureOffset), + edi); + __ jmp(&next_capture); + __ bind(&done); + + // Return last match info. + __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ ret(4 * kPointerSize); + + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); +#endif // V8_INTERPRETED_REGEXP +} + + +void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex)); + __ mov(number_string_cache, + Operand::StaticArray(scratch, times_pointer_size, roots_address)); + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. + __ sub(Operand(mask), Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label smi_hash_calculated; + Label load_result_from_cache; + if (object_is_smi) { + __ mov(scratch, object); + __ SmiUntag(scratch); + } else { + Label not_smi, hash_calculated; + STATIC_ASSERT(kSmiTag == 0); + __ test(object, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_smi); + __ mov(scratch, object); + __ SmiUntag(scratch); + __ jmp(&smi_hash_calculated); + __ bind(¬_smi); + __ cmp(FieldOperand(object, HeapObject::kMapOffset), + Factory::heap_number_map()); + __ j(not_equal, not_found); + STATIC_ASSERT(8 == kDoubleSize); + __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + // Object is heap number and hash is now in scratch. Calculate cache index. + __ and_(scratch, Operand(mask)); + Register index = scratch; + Register probe = mask; + __ mov(probe, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + __ test(probe, Immediate(kSmiTagMask)); + __ j(zero, not_found); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope fscope(SSE2); + __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); + __ ucomisd(xmm0, xmm1); + } else { + __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); + __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); + __ FCmp(); + } + __ j(parity_even, not_found); // Bail out if NaN is involved. + __ j(not_equal, not_found); // The cache did not contain this value. + __ jmp(&load_result_from_cache); + } + + __ bind(&smi_hash_calculated); + // Object is smi and hash is now in scratch. Calculate cache index. + __ and_(scratch, Operand(mask)); + Register index = scratch; + // Check if the entry is the smi we are looking for. + __ cmp(object, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + __ j(not_equal, not_found); + + // Get the result from the cache. + __ bind(&load_result_from_cache); + __ mov(result, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + __ IncrementCounter(&Counters::number_to_string_native, 1); +} + + +void NumberToStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + __ mov(ebx, Operand(esp, kPointerSize)); + + // Generate code to lookup number in the number string cache. + GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime); + __ ret(1 * kPointerSize); + + __ bind(&runtime); + // Handle number to string in the runtime system if not found in the cache. + __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); +} + + +static int NegativeComparisonResult(Condition cc) { + ASSERT(cc != equal); + ASSERT((cc == less) || (cc == less_equal) + || (cc == greater) || (cc == greater_equal)); + return (cc == greater || cc == greater_equal) ? LESS : GREATER; +} + +void CompareStub::Generate(MacroAssembler* masm) { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + Label check_unequal_objects, done; + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Identical objects can be compared fast, but there are some tricky cases + // for NaN and undefined. + { + Label not_identical; + __ cmp(eax, Operand(edx)); + __ j(not_equal, ¬_identical); + + if (cc_ != equal) { + // Check for undefined. undefined OP undefined is false even though + // undefined == undefined. + Label check_for_nan; + __ cmp(edx, Factory::undefined_value()); + __ j(not_equal, &check_for_nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ ret(0); + __ bind(&check_for_nan); + } + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + // Note: if cc_ != equal, never_nan_nan_ is not used. + if (never_nan_nan_ && (cc_ == equal)) { + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + } else { + Label heap_number; + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + __ j(equal, &heap_number); + if (cc_ != equal) { + // Call runtime on identical JSObjects. Otherwise return equal. + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(above_equal, ¬_identical); + } + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if + // it's not NaN. + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // We only accept QNaNs, which have bit 51 set. + // Read top bits of double representation (second word of value). + + // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., + // all bits in the mask are set. We only need to check the word + // that contains the exponent and high bit of the mantissa. + STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); + __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ xor_(eax, Operand(eax)); + // Shift value and mask so kQuietNaNHighBitsMask applies to topmost + // bits. + __ add(edx, Operand(edx)); + __ cmp(edx, kQuietNaNHighBitsMask << 1); + if (cc_ == equal) { + STATIC_ASSERT(EQUAL != 1); + __ setcc(above_equal, eax); + __ ret(0); + } else { + Label nan; + __ j(above_equal, &nan); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + __ bind(&nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ ret(0); + } + } + + __ bind(¬_identical); + } + + // Strict equality can quickly decide whether objects are equal. + // Non-strict object equality is slower, so it is handled later in the stub. + if (cc_ == equal && strict_) { + Label slow; // Fallthrough label. + Label not_smis; + // If we're doing a strict equality comparison, we don't have to do + // type conversion, so we generate code to do fast comparison for objects + // and oddballs. Non-smi numbers and strings still go through the usual + // slow-case code. + // If either is a Smi (we know that not both are), then they can only + // be equal if the other is a HeapNumber. If so, use the slow case. + STATIC_ASSERT(kSmiTag == 0); + ASSERT_EQ(0, Smi::FromInt(0)); + __ mov(ecx, Immediate(kSmiTagMask)); + __ and_(ecx, Operand(eax)); + __ test(ecx, Operand(edx)); + __ j(not_zero, ¬_smis); + // One operand is a smi. + + // Check whether the non-smi is a heap number. + STATIC_ASSERT(kSmiTagMask == 1); + // ecx still holds eax & kSmiTag, which is either zero or one. + __ sub(Operand(ecx), Immediate(0x01)); + __ mov(ebx, edx); + __ xor_(ebx, Operand(eax)); + __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, Operand(eax)); + // if eax was smi, ebx is now edx, else eax. + + // Check if the non-smi operand is a heap number. + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + // If heap number, handle it in the slow case. + __ j(equal, &slow); + // Return non-equal (ebx is not zero) + __ mov(eax, ebx); + __ ret(0); + + __ bind(¬_smis); + // If either operand is a JSObject or an oddball value, then they are not + // equal since their pointers are different + // There is no test for undetectability in strict equality. + + // Get the type of the first operand. + // If the first object is a JS object, we have done pointer comparison. + Label first_non_object; + STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(below, &first_non_object); + + // Return non-zero (eax is not zero) + Label return_not_equal; + STATIC_ASSERT(kHeapObjectTag != 0); + __ bind(&return_not_equal); + __ ret(0); + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx); + __ j(above_equal, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ CmpInstanceType(ecx, ODDBALL_TYPE); + __ j(equal, &return_not_equal); + + // Fall through to the general case. + __ bind(&slow); + } + + // Generate the number comparison code. + if (include_number_compare_) { + Label non_number_comparison; + Label unordered; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + CpuFeatures::Scope use_cmov(CMOV); + + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); + __ ucomisd(xmm0, xmm1); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, Operand(ecx)); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, Operand(ecx)); + __ ret(0); + } else { + FloatingPointHelper::CheckFloatOperands( + masm, &non_number_comparison, ebx); + FloatingPointHelper::LoadFloatOperand(masm, eax); + FloatingPointHelper::LoadFloatOperand(masm, edx); + __ FCmp(); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); + + Label below_label, above_label; + // Return a result of -1, 0, or 1, based on EFLAGS. + __ j(below, &below_label, not_taken); + __ j(above, &above_label, not_taken); + + __ xor_(eax, Operand(eax)); + __ ret(0); + + __ bind(&below_label); + __ mov(eax, Immediate(Smi::FromInt(-1))); + __ ret(0); + + __ bind(&above_label); + __ mov(eax, Immediate(Smi::FromInt(1))); + __ ret(0); + } + + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc_ != not_equal); + if (cc_ == less || cc_ == less_equal) { + __ mov(eax, Immediate(Smi::FromInt(1))); + } else { + __ mov(eax, Immediate(Smi::FromInt(-1))); + } + __ ret(0); + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); + } + + // Fast negative check for symbol-to-symbol equality. + Label check_for_strings; + if (cc_ == equal) { + BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); + BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); + + // We've already checked for object identity, so if both operands + // are symbols they aren't equal. Register eax already holds a + // non-zero value, which indicates not equal, so just return. + __ ret(0); + } + + __ bind(&check_for_strings); + + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, + &check_unequal_objects); + + // Inline comparison of ascii strings. + StringCompareStub::GenerateCompareFlatAsciiStrings(masm, + edx, + eax, + ecx, + ebx, + edi); +#ifdef DEBUG + __ Abort("Unexpected fall-through from string comparison"); +#endif + + __ bind(&check_unequal_objects); + if (cc_ == equal && !strict_) { + // Non-strict equality. Objects are unequal if + // they are both JSObjects and not undetectable, + // and their pointers are different. + Label not_both_objects; + Label return_unequal; + // At most one is a smi, so we can test for smi by adding the two. + // A smi plus a heap object has the low bit set, a heap object plus + // a heap object has the low bit clear. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagMask == 1); + __ lea(ecx, Operand(eax, edx, times_1, 0)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, ¬_both_objects); + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); + __ j(below, ¬_both_objects); + __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx); + __ j(below, ¬_both_objects); + // We do not bail out after this point. Both are JSObjects, and + // they are equal if and only if both are undetectable. + // The and of the undetectable flags is 1 if and only if they are equal. + __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal); + __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + __ j(zero, &return_unequal); + // The objects are both undetectable, so they both compare as the value + // undefined, and are equal. + __ Set(eax, Immediate(EQUAL)); + __ bind(&return_unequal); + // Return non-equal by returning the non-zero object pointer in eax, + // or return equal if we fell through to here. + __ ret(0); // rax, rdx were pushed + __ bind(¬_both_objects); + } + + // Push arguments below the return address. + __ pop(ecx); + __ push(edx); + __ push(eax); + + // Figure out which native to call and setup the arguments. + Builtins::JavaScript builtin; + if (cc_ == equal) { + builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + builtin = Builtins::COMPARE; + __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + } + + // Restore return address on the stack. + __ push(ecx); + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ InvokeBuiltin(builtin, JUMP_FUNCTION); +} + + +void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ test(object, Immediate(kSmiTagMask)); + __ j(zero, label); + __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, kIsSymbolMask | kIsNotStringMask); + __ cmp(scratch, kSymbolTag | kStringTag); + __ j(not_equal, label); +} + + +void StackCheckStub::Generate(MacroAssembler* masm) { + // Because builtins always remove the receiver from the stack, we + // have to fake one to avoid underflowing the stack. The receiver + // must be inserted below the return address on the stack so we + // temporarily store that in a register. + __ pop(eax); + __ push(Immediate(Smi::FromInt(0))); + __ push(eax); + + // Do tail-call to runtime routine. + __ TailCallRuntime(Runtime::kStackGuard, 1, 1); +} + + +void CallFunctionStub::Generate(MacroAssembler* masm) { + Label slow; + + // If the receiver might be a value (string, number or boolean) check for this + // and box it if it is. + if (ReceiverMightBeValue()) { + // Get the receiver from the stack. + // +1 ~ return address + Label receiver_is_value, receiver_is_js_object; + __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); + + // Check if receiver is a smi (which is a number value). + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &receiver_is_value, not_taken); + + // Check if the receiver is a valid JS object. + __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi); + __ j(above_equal, &receiver_is_js_object); + + // Call the runtime to box the value. + __ bind(&receiver_is_value); + __ EnterInternalFrame(); + __ push(eax); + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); + __ LeaveInternalFrame(); + __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); + + __ bind(&receiver_is_js_object); + } + + // Get the function to call from the stack. + // +2 ~ receiver, return address + __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); + + // Check that the function really is a JavaScript function. + __ test(edi, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + // Goto slow case if we do not have a function. + __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); + __ j(not_equal, &slow, not_taken); + + // Fast-case: Just invoke the function. + ParameterCount actual(argc_); + __ InvokeFunction(edi, actual, JUMP_FUNCTION); + + // Slow-case: Non-function called. + __ bind(&slow); + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead + // of the original receiver from the call site). + __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); + __ Set(eax, Immediate(argc_)); + __ Set(ebx, Immediate(0)); + __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); + Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); + __ jmp(adaptor, RelocInfo::CODE_TARGET); +} + + +void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { + // eax holds the exception. + + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop the sp to the top of the handler. + ExternalReference handler_address(Top::k_handler_address); + __ mov(esp, Operand::StaticVariable(handler_address)); + + // Restore next handler and frame pointer, discard handler state. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(Operand::StaticVariable(handler_address)); + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + __ pop(ebp); + __ pop(edx); // Remove state. + + // Before returning we restore the context from the frame pointer if + // not NULL. The frame pointer is NULL in the exception handler of + // a JS entry frame. + __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL. + Label skip; + __ cmp(ebp, 0); + __ j(equal, &skip, not_taken); + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + __ bind(&skip); + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ ret(0); +} + + +// If true, a Handle passed by value is passed and returned by +// using the location_ field directly. If false, it is passed and +// returned as a pointer to a handle. +#ifdef USING_BSD_ABI +static const bool kPassHandlesDirectly = true; +#else +static const bool kPassHandlesDirectly = false; +#endif + + +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + Label empty_handle; + Label prologue; + Label promote_scheduled_exception; + __ EnterApiExitFrame(kStackSpace, kArgc); + STATIC_ASSERT(kArgc == 4); + if (kPassHandlesDirectly) { + // When handles as passed directly we don't have to allocate extra + // space for and pass an out parameter. + __ mov(Operand(esp, 0 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer. + } else { + // The function expects three arguments to be passed but we allocate + // four to get space for the output cell. The argument slots are filled + // as follows: + // + // 3: output cell + // 2: arguments pointer + // 1: name + // 0: pointer to the output cell + // + // Note that this is one more "argument" than the function expects + // so the out cell will have to be popped explicitly after returning + // from the function. + __ mov(Operand(esp, 1 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer. + __ mov(ebx, esp); + __ add(Operand(ebx), Immediate(3 * kPointerSize)); + __ mov(Operand(esp, 0 * kPointerSize), ebx); // output + __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell. + } + // Call the api function! + __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY); + // Check if the function scheduled an exception. + ExternalReference scheduled_exception_address = + ExternalReference::scheduled_exception_address(); + __ cmp(Operand::StaticVariable(scheduled_exception_address), + Immediate(Factory::the_hole_value())); + __ j(not_equal, &promote_scheduled_exception, not_taken); + if (!kPassHandlesDirectly) { + // The returned value is a pointer to the handle holding the result. + // Dereference this to get to the location. + __ mov(eax, Operand(eax, 0)); + } + // Check if the result handle holds 0. + __ test(eax, Operand(eax)); + __ j(zero, &empty_handle, not_taken); + // It was non-zero. Dereference to get the result value. + __ mov(eax, Operand(eax, 0)); + __ bind(&prologue); + __ LeaveExitFrame(); + __ ret(0); + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + __ bind(&empty_handle); + // It was zero; the result is undefined. + __ mov(eax, Factory::undefined_value()); + __ jmp(&prologue); +} + + +void CEntryStub::GenerateCore(MacroAssembler* masm, + Label* throw_normal_exception, + Label* throw_termination_exception, + Label* throw_out_of_memory_exception, + bool do_gc, + bool always_allocate_scope, + int /* alignment_skew */) { + // eax: result parameter for PerformGC, if any + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // edi: number of arguments including receiver (C callee-saved) + // esi: pointer to the first argument (C callee-saved) + + // Result returned in eax, or eax+edx if result_size_ is 2. + + // Check stack alignment. + if (FLAG_debug_code) { + __ CheckStackAlignment(); + } + + if (do_gc) { + // Pass failure code returned from last attempt as first argument to + // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the + // stack alignment is known to be correct. This function takes one argument + // which is passed on the stack, and we know that the stack has been + // prepared to pass at least one argument. + __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. + __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate_scope) { + __ inc(Operand::StaticVariable(scope_depth)); + } + + // Call C function. + __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. + __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. + __ call(Operand(ebx)); + // Result is in eax or edx:eax - do not destroy these registers! + + if (always_allocate_scope) { + __ dec(Operand::StaticVariable(scope_depth)); + } + + // Make sure we're not trying to return 'the hole' from the runtime + // call as this may lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ cmp(eax, Factory::the_hole_value()); + __ j(not_equal, &okay); + __ int3(); + __ bind(&okay); + } + + // Check for failure result. + Label failure_returned; + STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); + __ lea(ecx, Operand(eax, 1)); + // Lower 2 bits of ecx are 0 iff eax has failure tag. + __ test(ecx, Immediate(kFailureTagMask)); + __ j(zero, &failure_returned, not_taken); + + // Exit the JavaScript to C++ exit frame. + __ LeaveExitFrame(); + __ ret(0); + + // Handling of failure. + __ bind(&failure_returned); + + Label retry; + // If the returned exception is RETRY_AFTER_GC continue at retry label + STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); + __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); + __ j(zero, &retry, taken); + + // Special handling of out of memory exceptions. + __ cmp(eax, reinterpret_cast(Failure::OutOfMemoryException())); + __ j(equal, throw_out_of_memory_exception); + + // Retrieve the pending exception and clear the variable. + ExternalReference pending_exception_address(Top::k_pending_exception_address); + __ mov(eax, Operand::StaticVariable(pending_exception_address)); + __ mov(edx, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ mov(Operand::StaticVariable(pending_exception_address), edx); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ cmp(eax, Factory::termination_exception()); + __ j(equal, throw_termination_exception); + + // Handle normal exception. + __ jmp(throw_normal_exception); + + // Retry. + __ bind(&retry); +} + + +void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, + UncatchableExceptionType type) { + // Adjust this code if not the case. + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + + // Drop sp to the top stack handler. + ExternalReference handler_address(Top::k_handler_address); + __ mov(esp, Operand::StaticVariable(handler_address)); + + // Unwind the handlers until the ENTRY handler is found. + Label loop, done; + __ bind(&loop); + // Load the type of the current stack handler. + const int kStateOffset = StackHandlerConstants::kStateOffset; + __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); + __ j(equal, &done); + // Fetch the next handler in the list. + const int kNextOffset = StackHandlerConstants::kNextOffset; + __ mov(esp, Operand(esp, kNextOffset)); + __ jmp(&loop); + __ bind(&done); + + // Set the top handler address to next handler past the current ENTRY handler. + STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); + __ pop(Operand::StaticVariable(handler_address)); + + if (type == OUT_OF_MEMORY) { + // Set external caught exception to false. + ExternalReference external_caught(Top::k_external_caught_exception_address); + __ mov(eax, false); + __ mov(Operand::StaticVariable(external_caught), eax); + + // Set pending exception and eax to out of memory exception. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(eax, reinterpret_cast(Failure::OutOfMemoryException())); + __ mov(Operand::StaticVariable(pending_exception), eax); + } + + // Clear the context pointer. + __ xor_(esi, Operand(esi)); + + // Restore fp from handler and discard handler state. + STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); + __ pop(ebp); + __ pop(edx); // State. + + STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); + __ ret(0); +} + + +void CEntryStub::Generate(MacroAssembler* masm) { + // eax: number of arguments including receiver + // ebx: pointer to C function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // esi: current context (C callee-saved) + // edi: JS function of the caller (C callee-saved) + + // NOTE: Invocations of builtins may return failure objects instead + // of a proper result. The builtin entry handles this by performing + // a garbage collection and retrying the builtin (twice). + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(); + + // eax: result parameter for PerformGC, if any (setup below) + // ebx: pointer to builtin function (C callee-saved) + // ebp: frame pointer (restored after C call) + // esp: stack pointer (restored after C call) + // edi: number of arguments including receiver (C callee-saved) + // esi: argv pointer (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ mov(eax, Immediate(reinterpret_cast(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); +} + + +void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { + Label invoke, exit; +#ifdef ENABLE_LOGGING_AND_PROFILING + Label not_outermost_js, not_outermost_js_2; +#endif + + // Setup frame. + __ push(ebp); + __ mov(ebp, Operand(esp)); + + // Push marker in two places. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ push(Immediate(Smi::FromInt(marker))); // context slot + __ push(Immediate(Smi::FromInt(marker))); // function slot + // Save callee-saved registers (C calling conventions). + __ push(edi); + __ push(esi); + __ push(ebx); + + // Save copies of the top frame descriptor on the stack. + ExternalReference c_entry_fp(Top::k_c_entry_fp_address); + __ push(Operand::StaticVariable(c_entry_fp)); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If this is the outermost JS call, set js_entry_sp value. + ExternalReference js_entry_sp(Top::k_js_entry_sp_address); + __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ j(not_equal, ¬_outermost_js); + __ mov(Operand::StaticVariable(js_entry_sp), ebp); + __ bind(¬_outermost_js); +#endif + + // Call a faked try-block that does the invoke. + __ call(&invoke); + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + ExternalReference pending_exception(Top::k_pending_exception_address); + __ mov(Operand::StaticVariable(pending_exception), eax); + __ mov(eax, reinterpret_cast(Failure::Exception())); + __ jmp(&exit); + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + + // Clear any pending exceptions. + __ mov(edx, + Operand::StaticVariable(ExternalReference::the_hole_value_location())); + __ mov(Operand::StaticVariable(pending_exception), edx); + + // Fake a receiver (NULL). + __ push(Immediate(0)); // receiver + + // Invoke the function by calling through JS entry trampoline + // builtin and pop the faked function when we return. Notice that we + // cannot store a reference to the trampoline code directly in this + // stub, because the builtin stubs may not have been generated yet. + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ mov(edx, Immediate(construct_entry)); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ mov(edx, Immediate(entry)); + } + __ mov(edx, Operand(edx, 0)); // deref address + __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); + __ call(Operand(edx)); + + // Unlink this frame from the handler chain. + __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address))); + // Pop next_sp. + __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); + +#ifdef ENABLE_LOGGING_AND_PROFILING + // If current EBP value is the same as js_entry_sp value, it means that + // the current function is the outermost. + __ cmp(ebp, Operand::StaticVariable(js_entry_sp)); + __ j(not_equal, ¬_outermost_js_2); + __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); + __ bind(¬_outermost_js_2); +#endif + + // Restore the top frame descriptor from the stack. + __ bind(&exit); + __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address))); + + // Restore callee-saved registers (C calling conventions). + __ pop(ebx); + __ pop(esi); + __ pop(edi); + __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers + + // Restore frame pointer and return. + __ pop(ebp); + __ ret(0); +} + + +void InstanceofStub::Generate(MacroAssembler* masm) { + // Get the object - go slow case if it's a smi. + Label slow; + __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + + // Check that the left hand is a JS object. + __ IsObjectJSObjectType(eax, eax, edx, &slow); + + // Get the prototype of the function. + __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address + // edx is function, eax is map. + + // Look up the function and the map in the instanceof cache. + Label miss; + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ j(not_equal, &miss); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); + __ ret(2 * kPointerSize); + + __ bind(&miss); + __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); + + // Check that the function prototype is a JS object. + __ test(ebx, Immediate(kSmiTagMask)); + __ j(zero, &slow, not_taken); + __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); + + // Register mapping: + // eax is object map. + // edx is function. + // ebx is function prototype. + __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx); + + __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + Label loop, is_instance, is_not_instance; + __ bind(&loop); + __ cmp(ecx, Operand(ebx)); + __ j(equal, &is_instance); + __ cmp(Operand(ecx), Immediate(Factory::null_value())); + __ j(equal, &is_not_instance); + __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); + __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ Set(eax, Immediate(0)); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ ret(2 * kPointerSize); + + __ bind(&is_not_instance); + __ Set(eax, Immediate(Smi::FromInt(1))); + __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); + __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); + __ ret(2 * kPointerSize); + + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); +} + + +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast(cc_) < (1 << 12)); + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + return ConditionField::encode(static_cast(cc_)) + | RegisterField::encode(false) // lhs_ and rhs_ are not used + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. +const char* CompareStub::GetName() { + ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); + + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; + switch (cc_) { + case less: cc_name = "LT"; break; + case greater: cc_name = "GT"; break; + case less_equal: cc_name = "LE"; break; + case greater_equal: cc_name = "GE"; break; + case equal: cc_name = "EQ"; break; + case not_equal: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* strict_name = ""; + if (strict_ && (cc_ == equal || cc_ == not_equal)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; +} + + +// ------------------------------------------------------------------------- +// StringCharCodeAtGenerator + +void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { + Label flat_string; + Label ascii_string; + Label got_char_code; + + // If the receiver is a smi trigger the non-string case. + STATIC_ASSERT(kSmiTag == 0); + __ test(object_, Immediate(kSmiTagMask)); + __ j(zero, receiver_not_string_); + + // Fetch the instance type of the receiver into result register. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the receiver is not a string trigger the non-string case. + __ test(result_, Immediate(kIsNotStringMask)); + __ j(not_zero, receiver_not_string_); + + // If the index is non-smi trigger the non-smi case. + STATIC_ASSERT(kSmiTag == 0); + __ test(index_, Immediate(kSmiTagMask)); + __ j(not_zero, &index_not_smi_); + + // Put smi-tagged index into scratch register. + __ mov(scratch_, index_); + __ bind(&got_smi_index_); + + // Check for index out of range. + __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset)); + __ j(above_equal, index_out_of_range_); + + // We need special handling for non-flat strings. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result_, Immediate(kStringRepresentationMask)); + __ j(zero, &flat_string); + + // Handle non-flat strings. + __ test(result_, Immediate(kIsConsStringMask)); + __ j(zero, &call_runtime_); + + // ConsString. + // Check whether the right hand side is the empty string (i.e. if + // this is really a flat string in a cons string). If that is not + // the case we would rather go to the runtime system now to flatten + // the string. + __ cmp(FieldOperand(object_, ConsString::kSecondOffset), + Immediate(Factory::empty_string())); + __ j(not_equal, &call_runtime_); + // Get the first of the two strings and load its instance type. + __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + // If the first cons component is also non-flat, then go to runtime. + STATIC_ASSERT(kSeqStringTag == 0); + __ test(result_, Immediate(kStringRepresentationMask)); + __ j(not_zero, &call_runtime_); + + // Check for 1-byte or 2-byte string. + __ bind(&flat_string); + STATIC_ASSERT(kAsciiStringTag != 0); + __ test(result_, Immediate(kStringEncodingMask)); + __ j(not_zero, &ascii_string); + + // 2-byte string. + // Load the 2-byte character code into the result register. + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); + __ movzx_w(result_, FieldOperand(object_, + scratch_, times_1, // Scratch is smi-tagged. + SeqTwoByteString::kHeaderSize)); + __ jmp(&got_char_code); + + // ASCII string. + // Load the byte into the result register. + __ bind(&ascii_string); + __ SmiUntag(scratch_); + __ movzx_b(result_, FieldOperand(object_, + scratch_, times_1, + SeqAsciiString::kHeaderSize)); + __ bind(&got_char_code); + __ SmiTag(result_); + __ bind(&exit_); +} + + +void StringCharCodeAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharCodeAt slow case"); + + // Index is not a smi. + __ bind(&index_not_smi_); + // If index is a heap number, try converting it to an integer. + __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ push(index_); // Consumed by runtime conversion function. + if (index_flags_ == STRING_INDEX_IS_NUMBER) { + __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); + } else { + ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); + // NumberToSmi discards numbers that are not exact integers. + __ CallRuntime(Runtime::kNumberToSmi, 1); + } + if (!scratch_.is(eax)) { + // Save the conversion result before the pop instructions below + // have a chance to overwrite it. + __ mov(scratch_, eax); + } + __ pop(index_); + __ pop(object_); + // Reload the instance type. + __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); + __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); + call_helper.AfterCall(masm); + // If index is still not a smi, it must be out of range. + STATIC_ASSERT(kSmiTag == 0); + __ test(scratch_, Immediate(kSmiTagMask)); + __ j(not_zero, index_out_of_range_); + // Otherwise, return to the fast path. + __ jmp(&got_smi_index_); + + // Call runtime. We get here when the receiver is a string and the + // index is a number, but the code of getting the actual character + // is too complex (e.g., when the string needs to be flattened). + __ bind(&call_runtime_); + call_helper.BeforeCall(masm); + __ push(object_); + __ push(index_); + __ CallRuntime(Runtime::kStringCharCodeAt, 2); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharCodeAt slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharFromCodeGenerator + +void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { + // Fast case of Heap::LookupSingleCharacterStringFromCode. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiShiftSize == 0); + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + __ test(code_, + Immediate(kSmiTagMask | + ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + __ j(not_zero, &slow_case_, not_taken); + + __ Set(result_, Immediate(Factory::single_character_string_cache())); + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiShiftSize == 0); + // At this point code register contains smi tagged ascii char code. + __ mov(result_, FieldOperand(result_, + code_, times_half_pointer_size, + FixedArray::kHeaderSize)); + __ cmp(result_, Factory::undefined_value()); + __ j(equal, &slow_case_, not_taken); + __ bind(&exit_); +} + + +void StringCharFromCodeGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + __ Abort("Unexpected fallthrough to CharFromCode slow case"); + + __ bind(&slow_case_); + call_helper.BeforeCall(masm); + __ push(code_); + __ CallRuntime(Runtime::kCharFromCode, 1); + if (!result_.is(eax)) { + __ mov(result_, eax); + } + call_helper.AfterCall(masm); + __ jmp(&exit_); + + __ Abort("Unexpected fallthrough from CharFromCode slow case"); +} + + +// ------------------------------------------------------------------------- +// StringCharAtGenerator + +void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); +} + + +void StringCharAtGenerator::GenerateSlow( + MacroAssembler* masm, const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); +} + + +void StringAddStub::Generate(MacroAssembler* masm) { + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + + // Load the two arguments. + __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. + __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. + + // Make sure that both arguments are strings if not known in advance. + if (flags_ == NO_STRING_ADD_FLAGS) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &string_add_runtime); + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); + __ j(above_equal, &string_add_runtime); + + // First argument is a a string, test second. + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &string_add_runtime); + __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); + __ j(above_equal, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi, + &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } + } + + // Both arguments are strings. + // eax: first string + // edx: second string + // Check if either of the strings are empty. In that case return the other. + Label second_not_zero_length, both_not_zero_length; + __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(ecx, Operand(ecx)); + __ j(not_zero, &second_not_zero_length); + // Second string is empty, result is first string which is already in eax. + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&second_not_zero_length); + __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); + STATIC_ASSERT(kSmiTag == 0); + __ test(ebx, Operand(ebx)); + __ j(not_zero, &both_not_zero_length); + // First string is empty, result is second string which is in edx. + __ mov(eax, edx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Both strings are non-empty. + // eax: first string + // ebx: length of first string as a smi + // ecx: length of second string as a smi + // edx: second string + // Look at the length of the result of adding the two strings. + Label string_add_flat_result, longer_than_two; + __ bind(&both_not_zero_length); + __ add(ebx, Operand(ecx)); + STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); + // Handle exceptionally long strings in the runtime system. + __ j(overflow, &string_add_runtime); + // Use the runtime system when adding two one character strings, as it + // contains optimizations for this specific case using the symbol table. + __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); + __ j(not_equal, &longer_than_two); + + // Check that both strings are non-external ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, + &string_add_runtime); + + // Get the two characters forming the new string. + __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); + __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); + + // Try to lookup two character string in symbol table. If it is not found + // just allocate a new one. + Label make_two_character_string, make_two_character_string_no_reload; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, ebx, ecx, eax, edx, edi, + &make_two_character_string_no_reload, &make_two_character_string); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Allocate a two character string. + __ bind(&make_two_character_string); + // Reload the arguments. + __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. + __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. + // Get the two characters forming the new string. + __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); + __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); + __ bind(&make_two_character_string_no_reload); + __ IncrementCounter(&Counters::string_add_make_two_char, 1); + __ AllocateAsciiString(eax, // Result. + 2, // Length. + edi, // Scratch 1. + edx, // Scratch 2. + &string_add_runtime); + // Pack both characters in ebx. + __ shl(ecx, kBitsPerByte); + __ or_(ebx, Operand(ecx)); + // Set the characters in the new string. + __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + __ bind(&longer_than_two); + // Check if resulting string will be flat. + __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); + __ j(below, &string_add_flat_result); + + // If result is not supposed to be flat allocate a cons string object. If both + // strings are ascii the result is an ascii cons string. + Label non_ascii, allocated, ascii_data; + __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); + __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); + __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); + __ and_(ecx, Operand(edi)); + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ test(ecx, Immediate(kAsciiStringTag)); + __ j(zero, &non_ascii); + __ bind(&ascii_data); + // Allocate an acsii cons string. + __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); + __ bind(&allocated); + // Fill the fields of the cons string. + if (FLAG_debug_code) __ AbortIfNotSmi(ebx); + __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); + __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), + Immediate(String::kEmptyHashField)); + __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); + __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); + __ mov(eax, ecx); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + __ bind(&non_ascii); + // At least one of the strings is two-byte. Check whether it happens + // to contain only ascii characters. + // ecx: first instance type AND second instance type. + // edi: second instance type. + __ test(ecx, Immediate(kAsciiDataHintMask)); + __ j(not_zero, &ascii_data); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ xor_(edi, Operand(ecx)); + STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); + __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); + __ j(equal, &ascii_data); + // Allocate a two byte cons string. + __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); + __ jmp(&allocated); + + // Handle creating a flat result. First check that both strings are not + // external strings. + // eax: first string + // ebx: length of resulting flat string as a smi + // edx: second string + __ bind(&string_add_flat_result); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ and_(ecx, kStringRepresentationMask); + __ cmp(ecx, kExternalStringTag); + __ j(equal, &string_add_runtime); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); + __ and_(ecx, kStringRepresentationMask); + __ cmp(ecx, kExternalStringTag); + __ j(equal, &string_add_runtime); + // Now check if both strings are ascii strings. + // eax: first string + // ebx: length of resulting flat string as a smi + // edx: second string + Label non_ascii_string_add_flat_result; + STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); + __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(zero, &non_ascii_string_add_flat_result); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(zero, &string_add_runtime); + + // Both strings are ascii strings. As they are short they are both flat. + // ebx: length of resulting flat string as a smi + __ SmiUntag(ebx); + __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime); + // eax: result string + __ mov(ecx, eax); + // Locate first character of result. + __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Load first argument and locate first character. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: first character of result + // edx: first char of first argument + // edi: length of first argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); + // Load second argument and locate first character. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: next character of result + // edx: first char of second argument + // edi: length of second argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Handle creating a flat two byte result. + // eax: first string - known to be two byte + // ebx: length of resulting flat string as a smi + // edx: second string + __ bind(&non_ascii_string_add_flat_result); + __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); + __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); + __ j(not_zero, &string_add_runtime); + // Both strings are two byte strings. As they are short they are both + // flat. + __ SmiUntag(ebx); + __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime); + // eax: result string + __ mov(ecx, eax); + // Locate first character of result. + __ add(Operand(ecx), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Load first argument and locate first character. + __ mov(edx, Operand(esp, 2 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: first character of result + // edx: first char of first argument + // edi: length of first argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); + // Load second argument and locate first character. + __ mov(edx, Operand(esp, 1 * kPointerSize)); + __ mov(edi, FieldOperand(edx, String::kLengthOffset)); + __ SmiUntag(edi); + __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // eax: result string + // ecx: next character of result + // edx: first char of second argument + // edi: length of second argument + StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); + __ IncrementCounter(&Counters::string_add_native, 1); + __ ret(2 * kPointerSize); + + // Just jump to runtime to add the two strings. + __ bind(&string_add_runtime); + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ test(arg, Immediate(kSmiTagMask)); + __ j(zero, ¬_string); + __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1); + __ j(below, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + false, + ¬_cached); + __ mov(arg, scratch1); + __ mov(Operand(esp, stack_offset), arg); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ test(arg, Immediate(kSmiTagMask)); + __ j(zero, slow); + __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1. + __ j(not_equal, slow); + __ test_b(FieldOperand(scratch1, Map::kBitField2Offset), + 1 << Map::kStringWrapperSafeForDefaultValueOf); + __ j(zero, slow); + __ mov(arg, FieldOperand(arg, JSValue::kValueOffset)); + __ mov(Operand(esp, stack_offset), arg); + + __ bind(&done); +} + + +void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + Label loop; + __ bind(&loop); + // This loop just copies one character at a time, as it is only used for very + // short strings. + if (ascii) { + __ mov_b(scratch, Operand(src, 0)); + __ mov_b(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(1)); + __ add(Operand(dest), Immediate(1)); + } else { + __ mov_w(scratch, Operand(src, 0)); + __ mov_w(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(2)); + __ add(Operand(dest), Immediate(2)); + } + __ sub(Operand(count), Immediate(1)); + __ j(not_zero, &loop); +} + + +void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii) { + // Copy characters using rep movs of doublewords. + // The destination is aligned on a 4 byte boundary because we are + // copying to the beginning of a newly allocated string. + ASSERT(dest.is(edi)); // rep movs destination + ASSERT(src.is(esi)); // rep movs source + ASSERT(count.is(ecx)); // rep movs count + ASSERT(!scratch.is(dest)); + ASSERT(!scratch.is(src)); + ASSERT(!scratch.is(count)); + + // Nothing to do for zero characters. + Label done; + __ test(count, Operand(count)); + __ j(zero, &done); + + // Make count the number of bytes to copy. + if (!ascii) { + __ shl(count, 1); + } + + // Don't enter the rep movs if there are less than 4 bytes to copy. + Label last_bytes; + __ test(count, Immediate(~3)); + __ j(zero, &last_bytes); + + // Copy from edi to esi using rep movs instruction. + __ mov(scratch, count); + __ sar(count, 2); // Number of doublewords to copy. + __ cld(); + __ rep_movs(); + + // Find number of bytes left. + __ mov(count, scratch); + __ and_(count, 3); + + // Check if there are more bytes to copy. + __ bind(&last_bytes); + __ test(count, Operand(count)); + __ j(zero, &done); + + // Copy remaining characters. + Label loop; + __ bind(&loop); + __ mov_b(scratch, Operand(src, 0)); + __ mov_b(Operand(dest, 0), scratch); + __ add(Operand(src), Immediate(1)); + __ add(Operand(dest), Immediate(1)); + __ sub(Operand(count), Immediate(1)); + __ j(not_zero, &loop); + + __ bind(&done); +} + + +void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_probed, + Label* not_found) { + // Register scratch3 is the general scratch register in this function. + Register scratch = scratch3; + + // Make sure that both characters are not digits as such strings has a + // different hash algorithm. Don't try to look for these in the symbol table. + Label not_array_index; + __ mov(scratch, c1); + __ sub(Operand(scratch), Immediate(static_cast('0'))); + __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ j(above, ¬_array_index); + __ mov(scratch, c2); + __ sub(Operand(scratch), Immediate(static_cast('0'))); + __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ j(below_equal, not_probed); + + __ bind(¬_array_index); + // Calculate the two character string hash. + Register hash = scratch1; + GenerateHashInit(masm, hash, c1, scratch); + GenerateHashAddCharacter(masm, hash, c2, scratch); + GenerateHashGetHash(masm, hash, scratch); + + // Collect the two characters in a register. + Register chars = c1; + __ shl(c2, kBitsPerByte); + __ or_(chars, Operand(c2)); + + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string. + + // Load the symbol table. + Register symbol_table = c2; + ExternalReference roots_address = ExternalReference::roots_address(); + __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); + __ mov(symbol_table, + Operand::StaticArray(scratch, times_pointer_size, roots_address)); + + // Calculate capacity mask from the symbol table capacity. + Register mask = scratch2; + __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ SmiUntag(mask); + __ sub(Operand(mask), Immediate(1)); + + // Registers + // chars: two character string, char 1 in byte 0 and char 2 in byte 1. + // hash: hash of two character string + // symbol_table: symbol table + // mask: capacity mask + // scratch: - + + // Perform a number of probes in the symbol table. + static const int kProbes = 4; + Label found_in_symbol_table; + Label next_probe[kProbes], next_probe_pop_mask[kProbes]; + for (int i = 0; i < kProbes; i++) { + // Calculate entry in symbol table. + __ mov(scratch, hash); + if (i > 0) { + __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); + } + __ and_(scratch, Operand(mask)); + + // Load the entry from the symbol table. + Register candidate = scratch; // Scratch register contains candidate. + STATIC_ASSERT(SymbolTable::kEntrySize == 1); + __ mov(candidate, + FieldOperand(symbol_table, + scratch, + times_pointer_size, + SymbolTable::kElementsStartOffset)); + + // If entry is undefined no string with this hash can be found. + __ cmp(candidate, Factory::undefined_value()); + __ j(equal, not_found); + + // If length is not 2 the string is not a candidate. + __ cmp(FieldOperand(candidate, String::kLengthOffset), + Immediate(Smi::FromInt(2))); + __ j(not_equal, &next_probe[i]); + + // As we are out of registers save the mask on the stack and use that + // register as a temporary. + __ push(mask); + Register temp = mask; + + // Check that the candidate is a non-external ascii string. + __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); + __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii( + temp, temp, &next_probe_pop_mask[i]); + + // Check if the two characters match. + __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); + __ and_(temp, 0x0000ffff); + __ cmp(chars, Operand(temp)); + __ j(equal, &found_in_symbol_table); + __ bind(&next_probe_pop_mask[i]); + __ pop(mask); + __ bind(&next_probe[i]); + } + + // No matching 2 character string found by probing. + __ jmp(not_found); + + // Scratch register contains result when we fall through to here. + Register result = scratch; + __ bind(&found_in_symbol_table); + __ pop(mask); // Pop saved mask from the stack. + if (!result.is(eax)) { + __ mov(eax, result); + } +} + + +void StringHelper::GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash = character + (character << 10); + __ mov(hash, character); + __ shl(hash, 10); + __ add(hash, Operand(character)); + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ sar(scratch, 6); + __ xor_(hash, Operand(scratch)); +} + + +void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch) { + // hash += character; + __ add(hash, Operand(character)); + // hash += hash << 10; + __ mov(scratch, hash); + __ shl(scratch, 10); + __ add(hash, Operand(scratch)); + // hash ^= hash >> 6; + __ mov(scratch, hash); + __ sar(scratch, 6); + __ xor_(hash, Operand(scratch)); +} + + +void StringHelper::GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch) { + // hash += hash << 3; + __ mov(scratch, hash); + __ shl(scratch, 3); + __ add(hash, Operand(scratch)); + // hash ^= hash >> 11; + __ mov(scratch, hash); + __ sar(scratch, 11); + __ xor_(hash, Operand(scratch)); + // hash += hash << 15; + __ mov(scratch, hash); + __ shl(scratch, 15); + __ add(hash, Operand(scratch)); + + // if (hash == 0) hash = 27; + Label hash_not_zero; + __ test(hash, Operand(hash)); + __ j(not_zero, &hash_not_zero); + __ mov(hash, Immediate(27)); + __ bind(&hash_not_zero); +} + + +void SubStringStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: to + // esp[8]: from + // esp[12]: string + + // Make sure first argument is a string. + __ mov(eax, Operand(esp, 3 * kPointerSize)); + STATIC_ASSERT(kSmiTag == 0); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &runtime); + Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); + __ j(NegateCondition(is_string), &runtime); + + // eax: string + // ebx: instance type + + // Calculate length of sub string using the smi values. + Label result_longer_than_two; + __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. + __ test(edx, Immediate(kSmiTagMask)); + __ j(not_zero, &runtime); + __ sub(ecx, Operand(edx)); + __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); + Label return_eax; + __ j(equal, &return_eax); + // Special handling of sub-strings of length 1 and 2. One character strings + // are handled in the runtime system (looked up in the single character + // cache). Two character strings are looked for in the symbol cache. + __ SmiUntag(ecx); // Result length is no longer smi. + __ cmp(ecx, 2); + __ j(greater, &result_longer_than_two); + __ j(less, &runtime); + + // Sub string of length 2 requested. + // eax: string + // ebx: instance type + // ecx: sub string length (value is 2) + // edx: from index (smi) + __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime); + + // Get the two characters forming the sub string. + __ SmiUntag(edx); // From index is no longer smi. + __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize)); + __ movzx_b(ecx, + FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1)); + + // Try to lookup two character string in symbol table. + Label make_two_character_string; + StringHelper::GenerateTwoCharacterSymbolTableProbe( + masm, ebx, ecx, eax, edx, edi, + &make_two_character_string, &make_two_character_string); + __ ret(3 * kPointerSize); + + __ bind(&make_two_character_string); + // Setup registers for allocating the two character string. + __ mov(eax, Operand(esp, 3 * kPointerSize)); + __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + __ Set(ecx, Immediate(2)); + + __ bind(&result_longer_than_two); + // eax: string + // ebx: instance type + // ecx: result string length + // Check for flat ascii string + Label non_ascii_flat; + __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat); + + // Allocate the result. + __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime); + + // eax: result string + // ecx: result string length + __ mov(edx, esi); // esi used by following code. + // Locate first character of result. + __ mov(edi, eax); + __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ mov(esi, Operand(esp, 3 * kPointerSize)); + __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from + __ SmiUntag(ebx); + __ add(esi, Operand(ebx)); + + // eax: result string + // ecx: result length + // edx: original value of esi + // edi: first character of result + // esi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); + __ mov(esi, edx); // Restore esi. + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(3 * kPointerSize); + + __ bind(&non_ascii_flat); + // eax: string + // ebx: instance type & kStringRepresentationMask | kStringEncodingMask + // ecx: result string length + // Check for flat two byte string + __ cmp(ebx, kSeqStringTag | kTwoByteStringTag); + __ j(not_equal, &runtime); + + // Allocate the result. + __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime); + + // eax: result string + // ecx: result string length + __ mov(edx, esi); // esi used by following code. + // Locate first character of result. + __ mov(edi, eax); + __ add(Operand(edi), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + // Load string argument and locate character of sub string start. + __ mov(esi, Operand(esp, 3 * kPointerSize)); + __ add(Operand(esi), + Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from + // As from is a smi it is 2 times the value which matches the size of a two + // byte character. + STATIC_ASSERT(kSmiTag == 0); + STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); + __ add(esi, Operand(ebx)); + + // eax: result string + // ecx: result length + // edx: original value of esi + // edi: first character of result + // esi: character of sub string start + StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); + __ mov(esi, edx); // Restore esi. + + __ bind(&return_eax); + __ IncrementCounter(&Counters::sub_string_native, 1); + __ ret(3 * kPointerSize); + + // Just jump to runtime to create the sub string. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kSubString, 3, 1); +} + + +void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3) { + Label result_not_equal; + Label result_greater; + Label compare_lengths; + + __ IncrementCounter(&Counters::string_compare_native, 1); + + // Find minimum length. + Label left_shorter; + __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); + __ mov(scratch3, scratch1); + __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); + + Register length_delta = scratch3; + + __ j(less_equal, &left_shorter); + // Right string is shorter. Change scratch1 to be length of right string. + __ sub(scratch1, Operand(length_delta)); + __ bind(&left_shorter); + + Register min_length = scratch1; + + // If either length is zero, just compare lengths. + __ test(min_length, Operand(min_length)); + __ j(zero, &compare_lengths); + + // Change index to run from -min_length to -1 by adding min_length + // to string start. This means that loop ends when index reaches zero, + // which doesn't need an additional compare. + __ SmiUntag(min_length); + __ lea(left, + FieldOperand(left, + min_length, times_1, + SeqAsciiString::kHeaderSize)); + __ lea(right, + FieldOperand(right, + min_length, times_1, + SeqAsciiString::kHeaderSize)); + __ neg(min_length); + + Register index = min_length; // index = -min_length; + + { + // Compare loop. + Label loop; + __ bind(&loop); + // Compare characters. + __ mov_b(scratch2, Operand(left, index, times_1, 0)); + __ cmpb(scratch2, Operand(right, index, times_1, 0)); + __ j(not_equal, &result_not_equal); + __ add(Operand(index), Immediate(1)); + __ j(not_zero, &loop); + } + + // Compare lengths - strings up to min-length are equal. + __ bind(&compare_lengths); + __ test(length_delta, Operand(length_delta)); + __ j(not_zero, &result_not_equal); + + // Result is EQUAL. + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&result_not_equal); + __ j(greater, &result_greater); + + // Result is LESS. + __ Set(eax, Immediate(Smi::FromInt(LESS))); + __ ret(0); + + // Result is GREATER. + __ bind(&result_greater); + __ Set(eax, Immediate(Smi::FromInt(GREATER))); + __ ret(0); +} + + +void StringCompareStub::Generate(MacroAssembler* masm) { + Label runtime; + + // Stack frame on entry. + // esp[0]: return address + // esp[4]: right string + // esp[8]: left string + + __ mov(edx, Operand(esp, 2 * kPointerSize)); // left + __ mov(eax, Operand(esp, 1 * kPointerSize)); // right + + Label not_same; + __ cmp(edx, Operand(eax)); + __ j(not_equal, ¬_same); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ IncrementCounter(&Counters::string_compare_native, 1); + __ ret(2 * kPointerSize); + + __ bind(¬_same); + + // Check that both objects are sequential ascii strings. + __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); + + // Compare flat ascii strings. + // Drop arguments from the stack. + __ pop(ecx); + __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ push(ecx); + GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); + + // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kStringCompare, 2, 1); +} + +#undef __ + +} } // namespace v8::internal + +#endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h new file mode 100644 index 0000000000..351636faf7 --- /dev/null +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -0,0 +1,376 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_IA32_CODE_STUBS_IA32_H_ +#define V8_IA32_CODE_STUBS_IA32_H_ + +#include "macro-assembler.h" +#include "code-stubs.h" +#include "ic-inl.h" + +namespace v8 { +namespace internal { + + +// Compute a transcendental math function natively, or call the +// TranscendentalCache runtime function. +class TranscendentalCacheStub: public CodeStub { + public: + explicit TranscendentalCacheStub(TranscendentalCache::Type type) + : type_(type) {} + void Generate(MacroAssembler* masm); + private: + TranscendentalCache::Type type_; + Major MajorKey() { return TranscendentalCache; } + int MinorKey() { return type_; } + Runtime::FunctionId RuntimeFunction(); + void GenerateOperation(MacroAssembler* masm); +}; + + +class ToBooleanStub: public CodeStub { + public: + ToBooleanStub() { } + + void Generate(MacroAssembler* masm); + + private: + Major MajorKey() { return ToBoolean; } + int MinorKey() { return 0; } +}; + + +// Flag that indicates how to generate code for the stub GenericBinaryOpStub. +enum GenericBinaryFlags { + NO_GENERIC_BINARY_FLAGS = 0, + NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. +}; + + +class GenericBinaryOpStub: public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + GenericBinaryFlags flags, + TypeInfo operands_type) + : op_(op), + mode_(mode), + flags_(flags), + args_in_registers_(false), + args_reversed_(false), + static_operands_type_(operands_type), + runtime_operands_type_(BinaryOpIC::DEFAULT), + name_(NULL) { + if (static_operands_type_.IsSmi()) { + mode_ = NO_OVERWRITE; + } + use_sse3_ = CpuFeatures::IsSupported(SSE3); + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); + } + + GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + flags_(FlagBits::decode(key)), + args_in_registers_(ArgsInRegistersBits::decode(key)), + args_reversed_(ArgsReversedBits::decode(key)), + use_sse3_(SSE3Bits::decode(key)), + static_operands_type_(TypeInfo::ExpandedRepresentation( + StaticTypeInfoBits::decode(key))), + runtime_operands_type_(runtime_operands_type), + name_(NULL) { + } + + // Generate code to call the stub with the supplied arguments. This will add + // code at the call site to prepare arguments either in registers or on the + // stack together with the actual call. + void GenerateCall(MacroAssembler* masm, Register left, Register right); + void GenerateCall(MacroAssembler* masm, Register left, Smi* right); + void GenerateCall(MacroAssembler* masm, Smi* left, Register right); + + bool ArgsInRegistersSupported() { + return op_ == Token::ADD || op_ == Token::SUB + || op_ == Token::MUL || op_ == Token::DIV; + } + + private: + Token::Value op_; + OverwriteMode mode_; + GenericBinaryFlags flags_; + bool args_in_registers_; // Arguments passed in registers not on the stack. + bool args_reversed_; // Left and right argument are swapped. + bool use_sse3_; + + // Number type information of operands, determined by code generator. + TypeInfo static_operands_type_; + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo runtime_operands_type_; + + char* name_; + + const char* GetName(); + +#ifdef DEBUG + void Print() { + PrintF("GenericBinaryOpStub %d (op %s), " + "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n", + MinorKey(), + Token::String(op_), + static_cast(mode_), + static_cast(flags_), + static_cast(args_in_registers_), + static_cast(args_reversed_), + static_operands_type_.ToString()); + } +#endif + + // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM. + class ModeBits: public BitField {}; + class OpBits: public BitField {}; + class SSE3Bits: public BitField {}; + class ArgsInRegistersBits: public BitField {}; + class ArgsReversedBits: public BitField {}; + class FlagBits: public BitField {}; + class StaticTypeInfoBits: public BitField {}; + class RuntimeTypeInfoBits: public BitField {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + // Encode the parameters in a unique 18 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | FlagBits::encode(flags_) + | SSE3Bits::encode(use_sse3_) + | ArgsInRegistersBits::encode(args_in_registers_) + | ArgsReversedBits::encode(args_reversed_) + | StaticTypeInfoBits::encode( + static_operands_type_.ThreeBitRepresentation()) + | RuntimeTypeInfoBits::encode(runtime_operands_type_); + } + + void Generate(MacroAssembler* masm); + void GenerateSmiCode(MacroAssembler* masm, Label* slow); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + + bool IsOperationCommutative() { + return (op_ == Token::ADD) || (op_ == Token::MUL); + } + + void SetArgsInRegisters() { args_in_registers_ = true; } + void SetArgsReversed() { args_reversed_ = true; } + bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } + bool HasArgsInRegisters() { return args_in_registers_; } + bool HasArgsReversed() { return args_reversed_; } + + bool ShouldGenerateSmiCode() { + return HasSmiCodeInStub() && + runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && + runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + bool ShouldGenerateFPCode() { + return runtime_operands_type_ != BinaryOpIC::STRINGS; + } + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(runtime_operands_type_); + } + + friend class CodeGenerator; +}; + + +class StringHelper : public AllStatic { + public: + // Generate code for copying characters using a simple loop. This should only + // be used in places where the number of characters is small and the + // additional setup and checking in GenerateCopyCharactersREP adds too much + // overhead. Copying of overlapping regions is not supported. + static void GenerateCopyCharacters(MacroAssembler* masm, + Register dest, + Register src, + Register count, + Register scratch, + bool ascii); + + // Generate code for copying characters using the rep movs instruction. + // Copies ecx characters from esi to edi. Copying of overlapping regions is + // not supported. + static void GenerateCopyCharactersREP(MacroAssembler* masm, + Register dest, // Must be edi. + Register src, // Must be esi. + Register count, // Must be ecx. + Register scratch, // Neither of above. + bool ascii); + + // Probe the symbol table for a two character string. If the string + // requires non-standard hashing a jump to the label not_probed is + // performed and registers c1 and c2 are preserved. In all other + // cases they are clobbered. If the string is not found by probing a + // jump to the label not_found is performed. This jump does not + // guarantee that the string is not in the symbol table. If the + // string is found the code falls through with the string in + // register eax. + static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + Register c1, + Register c2, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_probed, + Label* not_found); + + // Generate string hash. + static void GenerateHashInit(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashAddCharacter(MacroAssembler* masm, + Register hash, + Register character, + Register scratch); + static void GenerateHashGetHash(MacroAssembler* masm, + Register hash, + Register scratch); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); +}; + + +// Flag that indicates how to generate code for the stub StringAddStub. +enum StringAddFlags { + NO_STRING_ADD_FLAGS = 0, + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB +}; + + +class StringAddStub: public CodeStub { + public: + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} + + private: + Major MajorKey() { return StringAdd; } + int MinorKey() { return flags_; } + + void Generate(MacroAssembler* masm); + + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* slow); + + const StringAddFlags flags_; +}; + + +class SubStringStub: public CodeStub { + public: + SubStringStub() {} + + private: + Major MajorKey() { return SubString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class StringCompareStub: public CodeStub { + public: + explicit StringCompareStub() { + } + + // Compare two flat ascii strings and returns result in eax after popping two + // arguments from the stack. + static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, + Register left, + Register right, + Register scratch1, + Register scratch2, + Register scratch3); + + private: + Major MajorKey() { return StringCompare; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); +}; + + +class NumberToStringStub: public CodeStub { + public: + NumberToStringStub() { } + + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + static void GenerateLookupNumberStringCache(MacroAssembler* masm, + Register object, + Register result, + Register scratch1, + Register scratch2, + bool object_is_smi, + Label* not_found); + + private: + Major MajorKey() { return NumberToString; } + int MinorKey() { return 0; } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "NumberToStringStub"; } + +#ifdef DEBUG + void Print() { + PrintF("NumberToStringStub\n"); + } +#endif +}; + + +} } // namespace v8::internal + +#endif // V8_IA32_CODE_STUBS_IA32_H_ diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index cc89cc7db5..854052a65b 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -29,8 +29,9 @@ #if defined(V8_TARGET_ARCH_IA32) -#include "bootstrapper.h" #include "codegen-inl.h" +#include "bootstrapper.h" +#include "code-stubs.h" #include "compiler.h" #include "debug.h" #include "ic-inl.h" @@ -934,97 +935,6 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { } -class FloatingPointHelper : public AllStatic { - public: - - enum ArgLocation { - ARGS_ON_STACK, - ARGS_IN_REGISTERS - }; - - // Code pattern for loading a floating point value. Input value must - // be either a smi or a heap number object (fp value). Requirements: - // operand in register number. Returns operand as floating point number - // on FPU stack. - static void LoadFloatOperand(MacroAssembler* masm, Register number); - - // Code pattern for loading floating point values. Input values must - // be either smi or heap number objects (fp values). Requirements: - // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. - // Returns operands as floating point numbers on FPU stack. - static void LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location = ARGS_ON_STACK); - - // Similar to LoadFloatOperand but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadFloatSmis(MacroAssembler* masm, Register scratch); - - // Test if operands are smi or number objects (fp). Requirements: - // operand_1 in eax, operand_2 in edx; falls through on float - // operands, jumps to the non_float label otherwise. - static void CheckFloatOperands(MacroAssembler* masm, - Label* non_float, - Register scratch); - - // Takes the operands in edx and eax and loads them as integers in eax - // and ecx. - static void LoadAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* operand_conversion_failure); - static void LoadNumbersAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* operand_conversion_failure); - static void LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - Label* operand_conversion_failure); - - // Test if operands are smis or heap numbers and load them - // into xmm0 and xmm1 if they are. Operands are in edx and eax. - // Leaves operands unchanged. - static void LoadSSE2Operands(MacroAssembler* masm); - - // Test if operands are numbers (smi or HeapNumber objects), and load - // them into xmm0 and xmm1 if they are. Jump to label not_numbers if - // either operand is not a number. Operands are in edx and eax. - // Leaves operands unchanged. - static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); - - // Similar to LoadSSE2Operands but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); -}; - - -const char* GenericBinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", - op_name, - overwrite_name, - (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", - args_in_registers_ ? "RegArgs" : "StackArgs", - args_reversed_ ? "_R" : "", - static_operands_type_.ToString(), - BinaryOpIC::GetName(runtime_operands_type_)); - return name_; -} - - // Perform or call the specialized stub for a binary operation. Requires the // three registers left, right and dst to be distinct and spilled. This // deferred operation has up to three entry points: The main one calls the @@ -1501,12 +1411,12 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, StringAddStub stub(NO_STRING_CHECK_IN_STUB); answer = frame_->CallStub(&stub, 2); } else { - answer = - frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); + StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB); + answer = frame_->CallStub(&stub, 2); } } else if (right_is_string) { - answer = - frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); + StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB); + answer = frame_->CallStub(&stub, 2); } answer.set_type_info(TypeInfo::String()); frame_->Push(&answer); @@ -1541,7 +1451,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, overwrite_mode, NO_SMI_CODE_IN_STUB, operands_type); - answer = stub.GenerateCall(masm_, frame_, &left, &right); + answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right); } else if (right_is_smi_constant) { answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), false, overwrite_mode); @@ -1564,7 +1474,7 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, overwrite_mode, NO_GENERIC_BINARY_FLAGS, operands_type); - answer = stub.GenerateCall(masm_, frame_, &left, &right); + answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right); } } @@ -1573,6 +1483,20 @@ void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, } +Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub, + Result* left, + Result* right) { + if (stub->ArgsInRegistersSupported()) { + stub->SetArgsInRegisters(); + return frame_->CallStub(stub, left, right); + } else { + frame_->Push(left); + frame_->Push(right); + return frame_->CallStub(stub, 2); + } +} + + bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { Object* answer_object = Heap::undefined_value(); switch (op) { @@ -2772,41 +2696,6 @@ void CodeGenerator::Comparison(AstNode* node, ConstantSmiComparison(cc, strict, dest, &left_side, &right_side, left_side_constant_smi, right_side_constant_smi, is_loop_condition); - } else if (cc == equal && - (left_side_constant_null || right_side_constant_null)) { - // To make null checks efficient, we check if either the left side or - // the right side is the constant 'null'. - // If so, we optimize the code by inlining a null check instead of - // calling the (very) general runtime routine for checking equality. - Result operand = left_side_constant_null ? right_side : left_side; - right_side.Unuse(); - left_side.Unuse(); - operand.ToRegister(); - __ cmp(operand.reg(), Factory::null_value()); - if (strict) { - operand.Unuse(); - dest->Split(equal); - } else { - // The 'null' value is only equal to 'undefined' if using non-strict - // comparisons. - dest->true_target()->Branch(equal); - __ cmp(operand.reg(), Factory::undefined_value()); - dest->true_target()->Branch(equal); - __ test(operand.reg(), Immediate(kSmiTagMask)); - dest->false_target()->Branch(equal); - - // It can be an undetectable object. - // Use a scratch register in preference to spilling operand.reg(). - Result temp = allocator()->Allocate(); - ASSERT(temp.is_valid()); - __ mov(temp.reg(), - FieldOperand(operand.reg(), HeapObject::kMapOffset)); - __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - temp.Unuse(); - operand.Unuse(); - dest->Split(not_zero); - } } else if (left_side_constant_1_char_string || right_side_constant_1_char_string) { if (left_side_constant_1_char_string && right_side_constant_1_char_string) { @@ -3423,8 +3312,10 @@ void CodeGenerator::CallApplyLazy(Expression* applicand, __ j(zero, &build_args); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); __ j(not_equal, &build_args); + __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset)); + __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag)); Handle apply_code(Builtins::builtin(Builtins::FunctionApply)); - __ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code)); + __ cmp(Operand(ecx), Immediate(apply_code)); __ j(not_equal, &build_args); // Check that applicand is a function. @@ -5520,9 +5411,12 @@ void DeferredRegExpLiteral::Generate() { class DeferredAllocateInNewSpace: public DeferredCode { public: - DeferredAllocateInNewSpace(int size, Register target) - : size_(size), target_(target) { + DeferredAllocateInNewSpace(int size, + Register target, + int registers_to_save = 0) + : size_(size), target_(target), registers_to_save_(registers_to_save) { ASSERT(size >= kPointerSize && size <= Heap::MaxObjectSizeInNewSpace()); + ASSERT_EQ(0, registers_to_save & target.bit()); set_comment("[ DeferredAllocateInNewSpace"); } void Generate(); @@ -5530,15 +5424,28 @@ class DeferredAllocateInNewSpace: public DeferredCode { private: int size_; Register target_; + int registers_to_save_; }; void DeferredAllocateInNewSpace::Generate() { + for (int i = 0; i < kNumRegs; i++) { + if (registers_to_save_ & (1 << i)) { + Register save_register = { i }; + __ push(save_register); + } + } __ push(Immediate(Smi::FromInt(size_))); __ CallRuntime(Runtime::kAllocateInNewSpace, 1); if (!target_.is(eax)) { __ mov(target_, eax); } + for (int i = kNumRegs - 1; i >= 0; i--) { + if (registers_to_save_ & (1 << i)) { + Register save_register = { i }; + __ pop(save_register); + } + } } @@ -5712,12 +5619,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { frame_->Push(node->constant_elements()); int length = node->values()->length(); Result clone; - if (node->depth() > 1) { + if (node->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + clone = frame_->CallStub(&stub, 3); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1); + } else if (node->depth() > 1) { clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); clone = frame_->CallStub(&stub, 3); } frame_->Push(&clone); @@ -5727,12 +5640,9 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { for (int i = 0; i < length; i++) { Expression* value = node->values()->at(i); - // If value is a literal the property value is already set in the - // boilerplate object. - if (value->AsLiteral() != NULL) continue; - // If value is a materialized literal the property value is already set - // in the boilerplate object if it is simple. - if (CompileTimeValue::IsCompileTimeValue(value)) continue; + if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) { + continue; + } // The property must be set by generated code. Load(value); @@ -5796,12 +5706,9 @@ void CodeGenerator::EmitSlotAssignment(Assignment* node) { Load(node->value()); // Perform the binary operation. - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); // Construct the implicit binary operation. - BinaryOperation expr(node, node->binary_op(), node->target(), - node->value()); + BinaryOperation expr(node); GenericBinaryOperation(&expr, overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { @@ -5888,12 +5795,9 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { frame()->Push(&value); Load(node->value()); - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); // Construct the implicit binary operation. - BinaryOperation expr(node, node->binary_op(), node->target(), - node->value()); + BinaryOperation expr(node); GenericBinaryOperation(&expr, overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { @@ -5991,11 +5895,8 @@ void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { Load(node->value()); // Perform the binary operation. - bool overwrite_value = - (node->value()->AsBinaryOperation() != NULL && - node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - BinaryOperation expr(node, node->binary_op(), node->target(), - node->value()); + bool overwrite_value = node->value()->ResultOverwriteAllowed(); + BinaryOperation expr(node); GenericBinaryOperation(&expr, overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); } else { @@ -6407,11 +6308,10 @@ void CodeGenerator::VisitCallNew(CallNew* node) { // actual function to call is resolved after the arguments have been // evaluated. - // Compute function to call and use the global object as the - // receiver. There is no need to use the global proxy here because - // it will always be replaced with a newly allocated object. + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. Load(node->expression()); - LoadGlobal(); // Push the arguments ("left-to-right") on the stack. ZoneList* args = node->arguments(); @@ -6424,8 +6324,7 @@ void CodeGenerator::VisitCallNew(CallNew* node) { // constructor invocation. CodeForSourcePosition(node->position()); Result result = frame_->CallConstructor(arg_count); - // Replace the function on the stack with the result. - frame_->SetElementAt(0, &result); + frame_->Push(&result); } @@ -7359,6 +7258,88 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList* args) { } +void CodeGenerator::GenerateRegExpCloneResult(ZoneList* args) { + ASSERT_EQ(1, args->length()); + + Load(args->at(0)); + Result object_result = frame_->Pop(); + object_result.ToRegister(eax); + object_result.Unuse(); + { + VirtualFrame::SpilledScope spilled_scope; + + Label done; + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + + // Load JSRegExpResult map into edx. + // Arguments to this function should be results of calling RegExp exec, + // which is either an unmodified JSRegExpResult or null. Anything not having + // the unmodified JSRegExpResult map is returned unmodified. + // This also ensures that elements are fast. + __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX)); + __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset)); + __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX)); + __ cmp(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ j(not_equal, &done); + + if (FLAG_debug_code) { + // Check that object really has empty properties array, as the map + // should guarantee. + __ cmp(FieldOperand(eax, JSObject::kPropertiesOffset), + Immediate(Factory::empty_fixed_array())); + __ Check(equal, "JSRegExpResult: default map but non-empty properties."); + } + + DeferredAllocateInNewSpace* allocate_fallback = + new DeferredAllocateInNewSpace(JSRegExpResult::kSize, + ebx, + edx.bit() | eax.bit()); + + // All set, copy the contents to a new object. + __ AllocateInNewSpace(JSRegExpResult::kSize, + ebx, + ecx, + no_reg, + allocate_fallback->entry_label(), + TAG_OBJECT); + __ bind(allocate_fallback->exit_label()); + + // Copy all fields from eax to ebx. + STATIC_ASSERT(JSRegExpResult::kSize % (2 * kPointerSize) == 0); + // There is an even number of fields, so unroll the loop once + // for efficiency. + for (int i = 0; i < JSRegExpResult::kSize; i += 2 * kPointerSize) { + STATIC_ASSERT(JSObject::kMapOffset % (2 * kPointerSize) == 0); + if (i != JSObject::kMapOffset) { + // The map was already loaded into edx. + __ mov(edx, FieldOperand(eax, i)); + } + __ mov(ecx, FieldOperand(eax, i + kPointerSize)); + + STATIC_ASSERT(JSObject::kElementsOffset % (2 * kPointerSize) == 0); + if (i == JSObject::kElementsOffset) { + // If the elements array isn't empty, make it copy-on-write + // before copying it. + Label empty; + __ cmp(Operand(edx), Immediate(Factory::empty_fixed_array())); + __ j(equal, &empty); + __ mov(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(Factory::fixed_cow_array_map())); + __ bind(&empty); + } + __ mov(FieldOperand(ebx, i), edx); + __ mov(FieldOperand(ebx, i + kPointerSize), ecx); + } + __ mov(eax, ebx); + + __ bind(&done); + } + frame_->Push(eax); +} + + class DeferredSearchCache: public DeferredCode { public: DeferredSearchCache(Register dst, Register cache, Register key) @@ -7601,7 +7582,7 @@ void CodeGenerator::GenerateSwapElements(ZoneList* args) { KeyedLoadIC::kSlowCaseBitFieldMask); deferred->Branch(not_zero); - // Check the object's elements are in fast case. + // Check the object's elements are in fast case and writable. __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset)); __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); @@ -7951,6 +7932,42 @@ void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList* args) { } +void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Result value = frame_->Pop(); + value.ToRegister(); + ASSERT(value.is_valid()); + if (FLAG_debug_code) { + __ AbortIfNotString(value.reg()); + } + + __ test(FieldOperand(value.reg(), String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + + value.Unuse(); + destination()->Split(zero); +} + + +void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + Load(args->at(0)); + Result string = frame_->Pop(); + string.ToRegister(); + if (FLAG_debug_code) { + __ AbortIfNotString(string.reg()); + } + + Result number = allocator()->Allocate(); + ASSERT(number.is_valid()); + __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset)); + __ IndexFromHash(number.reg(), number.reg()); + string.Unuse(); + frame_->Push(&number); +} + + void CodeGenerator::VisitCallRuntime(CallRuntime* node) { ASSERT(!in_safe_int32_mode()); if (CheckForInlineRuntimeCall(node)) { @@ -8114,9 +8131,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { frame_->Push(&value); } else { Load(node->expression()); - bool can_overwrite = - (node->expression()->AsBinaryOperation() != NULL && - node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool can_overwrite = node->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; bool no_negative_zero = node->expression()->no_negative_zero(); @@ -8821,11 +8836,9 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // NOTE: The code below assumes that the slow cases (calls to runtime) // never return a constant/immutable object. OverwriteMode overwrite_mode = NO_OVERWRITE; - if (node->left()->AsBinaryOperation() != NULL && - node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) { + if (node->left()->ResultOverwriteAllowed()) { overwrite_mode = OVERWRITE_LEFT; - } else if (node->right()->AsBinaryOperation() != NULL && - node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) { + } else if (node->right()->ResultOverwriteAllowed()) { overwrite_mode = OVERWRITE_RIGHT; } @@ -9057,6 +9070,41 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } +void CodeGenerator::VisitCompareToNull(CompareToNull* node) { + ASSERT(!in_safe_int32_mode()); + Comment cmnt(masm_, "[ CompareToNull"); + + Load(node->expression()); + Result operand = frame_->Pop(); + operand.ToRegister(); + __ cmp(operand.reg(), Factory::null_value()); + if (node->is_strict()) { + operand.Unuse(); + destination()->Split(equal); + } else { + // The 'null' value is only equal to 'undefined' if using non-strict + // comparisons. + destination()->true_target()->Branch(equal); + __ cmp(operand.reg(), Factory::undefined_value()); + destination()->true_target()->Branch(equal); + __ test(operand.reg(), Immediate(kSmiTagMask)); + destination()->false_target()->Branch(equal); + + // It can be an undetectable object. + // Use a scratch register in preference to spilling operand.reg(). + Result temp = allocator()->Allocate(); + ASSERT(temp.is_valid()); + __ mov(temp.reg(), + FieldOperand(operand.reg(), HeapObject::kMapOffset)); + __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset), + 1 << Map::kIsUndetectable); + temp.Unuse(); + operand.Unuse(); + destination()->Split(not_zero); + } +} + + #ifdef DEBUG bool CodeGenerator::HasValidEntryRegisters() { return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0)) @@ -9496,15 +9544,10 @@ Result CodeGenerator::EmitKeyedLoad() { if (FLAG_debug_code) __ AbortIfNotSmi(key.reg()); } - // Get the elements array from the receiver and check that it - // is not a dictionary. + // Get the elements array from the receiver. __ mov(elements.reg(), FieldOperand(receiver.reg(), JSObject::kElementsOffset)); - if (FLAG_debug_code) { - __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), - Immediate(Factory::fixed_array_map())); - __ Assert(equal, "JSObject with fast elements map has slow elements"); - } + __ AssertFastElements(elements.reg()); // Check that the key is within bounds. __ cmp(key.reg(), @@ -9787,4406 +9830,6 @@ void Reference::SetValue(InitState init_state) { } -void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Create a new closure from the given function info in new - // space. Set the context to the current context in esi. - Label gc; - __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); - - // Get the function info from the stack. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - - // Compute the function map in the current global context and set that - // as the map of the allocated object. - __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset)); - __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); - __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); - - // Initialize the rest of the function. We don't have to update the - // write barrier because the allocated object is in new space. - __ mov(ebx, Immediate(Factory::empty_fixed_array())); - __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); - __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); - __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), - Immediate(Factory::the_hole_value())); - __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); - __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); - __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); - - // Initialize the code pointer in the function to be the one - // found in the shared function info object. - __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset)); - __ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx); - - // Return and remove the on-stack parameter. - __ ret(1 * kPointerSize); - - // Create a new closure through the slower runtime call. - __ bind(&gc); - __ pop(ecx); // Temporarily remove return address. - __ pop(edx); - __ push(esi); - __ push(edx); - __ push(ecx); // Restore return address. - __ TailCallRuntime(Runtime::kNewClosure, 2, 1); -} - - -void FastNewContextStub::Generate(MacroAssembler* masm) { - // Try to allocate the context in new space. - Label gc; - int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, - eax, ebx, ecx, &gc, TAG_OBJECT); - - // Get the function from the stack. - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - - // Setup the object header. - __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map()); - __ mov(FieldOperand(eax, Context::kLengthOffset), - Immediate(Smi::FromInt(length))); - - // Setup the fixed slots. - __ xor_(ebx, Operand(ebx)); // Set to NULL. - __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx); - __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax); - __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx); - __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx); - - // Copy the global object from the surrounding context. We go through the - // context in the function (ecx) to match the allocation behavior we have - // in the runtime system (see Heap::AllocateFunctionContext). - __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset)); - __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx); - - // Initialize the rest of the slots to undefined. - __ mov(ebx, Factory::undefined_value()); - for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { - __ mov(Operand(eax, Context::SlotOffset(i)), ebx); - } - - // Return and remove the on-stack parameter. - __ mov(esi, Operand(eax)); - __ ret(1 * kPointerSize); - - // Need to collect. Call into runtime system. - __ bind(&gc); - __ TailCallRuntime(Runtime::kNewContext, 1, 1); -} - - -void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [esp + kPointerSize]: constant elements. - // [esp + (2 * kPointerSize)]: literal index. - // [esp + (3 * kPointerSize)]: literals array. - - // All sizes here are multiples of kPointerSize. - int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; - int size = JSArray::kSize + elements_size; - - // Load boilerplate object into ecx and check if we need to create a - // boilerplate. - Label slow_case; - __ mov(ecx, Operand(esp, 3 * kPointerSize)); - __ mov(eax, Operand(esp, 2 * kPointerSize)); - STATIC_ASSERT(kPointerSize == 4); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax)); - __ cmp(ecx, Factory::undefined_value()); - __ j(equal, &slow_case); - - // Allocate both the JS array and the elements array in one big - // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); - - // Copy the JS array part. - for (int i = 0; i < JSArray::kSize; i += kPointerSize) { - if ((i != JSArray::kElementsOffset) || (length_ == 0)) { - __ mov(ebx, FieldOperand(ecx, i)); - __ mov(FieldOperand(eax, i), ebx); - } - } - - if (length_ > 0) { - // Get hold of the elements array of the boilerplate and setup the - // elements pointer in the resulting object. - __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); - __ lea(edx, Operand(eax, JSArray::kSize)); - __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); - - // Copy the elements array. - for (int i = 0; i < elements_size; i += kPointerSize) { - __ mov(ebx, FieldOperand(ecx, i)); - __ mov(FieldOperand(edx, i), ebx); - } - } - - // Return and remove the on-stack parameters. - __ ret(3 * kPointerSize); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); -} - - -// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined). -void ToBooleanStub::Generate(MacroAssembler* masm) { - Label false_result, true_result, not_string; - __ mov(eax, Operand(esp, 1 * kPointerSize)); - - // 'null' => false. - __ cmp(eax, Factory::null_value()); - __ j(equal, &false_result); - - // Get the map and type of the heap object. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); - - // Undetectable => false. - __ test_b(FieldOperand(edx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(not_zero, &false_result); - - // JavaScript object => true. - __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE); - __ j(above_equal, &true_result); - - // String value => false iff empty. - __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); - __ j(above_equal, ¬_string); - STATIC_ASSERT(kSmiTag == 0); - __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0)); - __ j(zero, &false_result); - __ jmp(&true_result); - - __ bind(¬_string); - // HeapNumber => false iff +0, -0, or NaN. - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &true_result); - __ fldz(); - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ FCmp(); - __ j(zero, &false_result); - // Fall through to |true_result|. - - // Return 1/0 for true/false in eax. - __ bind(&true_result); - __ mov(eax, 1); - __ ret(1 * kPointerSize); - __ bind(&false_result); - __ mov(eax, 0); - __ ret(1 * kPointerSize); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(right); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (!(left.is(left_arg) && right.is(right_arg))) { - if (left.is(right_arg) && right.is(left_arg)) { - if (IsOperationCommutative()) { - SetArgsReversed(); - } else { - __ xchg(left, right); - } - } else if (left.is(left_arg)) { - __ mov(right_arg, right); - } else if (right.is(right_arg)) { - __ mov(left_arg, left); - } else if (left.is(right_arg)) { - if (IsOperationCommutative()) { - __ mov(left_arg, right); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying left argument. - __ mov(left_arg, left); - __ mov(right_arg, right); - } - } else if (right.is(left_arg)) { - if (IsOperationCommutative()) { - __ mov(right_arg, left); - SetArgsReversed(); - } else { - // Order of moves important to avoid destroying right argument. - __ mov(right_arg, right); - __ mov(left_arg, left); - } - } else { - // Order of moves is not important. - __ mov(left_arg, left); - __ mov(right_arg, right); - } - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Register left, - Smi* right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(left); - __ push(Immediate(right)); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (left.is(left_arg)) { - __ mov(right_arg, Immediate(right)); - } else if (left.is(right_arg) && IsOperationCommutative()) { - __ mov(left_arg, Immediate(right)); - SetArgsReversed(); - } else { - // For non-commutative operations, left and right_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite left before moving - // it to left_arg. - __ mov(left_arg, left); - __ mov(right_arg, Immediate(right)); - } - - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -void GenericBinaryOpStub::GenerateCall( - MacroAssembler* masm, - Smi* left, - Register right) { - if (!ArgsInRegistersSupported()) { - // Pass arguments on the stack. - __ push(Immediate(left)); - __ push(right); - } else { - // The calling convention with registers is left in edx and right in eax. - Register left_arg = edx; - Register right_arg = eax; - if (right.is(right_arg)) { - __ mov(left_arg, Immediate(left)); - } else if (right.is(left_arg) && IsOperationCommutative()) { - __ mov(right_arg, Immediate(left)); - SetArgsReversed(); - } else { - // For non-commutative operations, right and left_arg might be - // the same register. Therefore, the order of the moves is - // important here in order to not overwrite right before moving - // it to right_arg. - __ mov(right_arg, right); - __ mov(left_arg, Immediate(left)); - } - // Update flags to indicate that arguments are in registers. - SetArgsInRegisters(); - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1); - } - - // Call the stub. - __ CallStub(this); -} - - -Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm, - VirtualFrame* frame, - Result* left, - Result* right) { - if (ArgsInRegistersSupported()) { - SetArgsInRegisters(); - return frame->CallStub(this, left, right); - } else { - frame->Push(left); - frame->Push(right); - return frame->CallStub(this, 2); - } -} - - -void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { - // 1. Move arguments into edx, eax except for DIV and MOD, which need the - // dividend in eax and edx free for the division. Use eax, ebx for those. - Comment load_comment(masm, "-- Load arguments"); - Register left = edx; - Register right = eax; - if (op_ == Token::DIV || op_ == Token::MOD) { - left = eax; - right = ebx; - if (HasArgsInRegisters()) { - __ mov(ebx, eax); - __ mov(eax, edx); - } - } - if (!HasArgsInRegisters()) { - __ mov(right, Operand(esp, 1 * kPointerSize)); - __ mov(left, Operand(esp, 2 * kPointerSize)); - } - - if (static_operands_type_.IsSmi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(left); - __ AbortIfNotSmi(right); - } - if (op_ == Token::BIT_OR) { - __ or_(right, Operand(left)); - GenerateReturn(masm); - return; - } else if (op_ == Token::BIT_AND) { - __ and_(right, Operand(left)); - GenerateReturn(masm); - return; - } else if (op_ == Token::BIT_XOR) { - __ xor_(right, Operand(left)); - GenerateReturn(masm); - return; - } - } - - // 2. Prepare the smi check of both operands by oring them together. - Comment smi_check_comment(masm, "-- Smi check arguments"); - Label not_smis; - Register combined = ecx; - ASSERT(!left.is(combined) && !right.is(combined)); - switch (op_) { - case Token::BIT_OR: - // Perform the operation into eax and smi check the result. Preserve - // eax in case the result is not a smi. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, Operand(left)); // Bitwise or is commutative. - combined = right; - break; - - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - __ mov(combined, right); - __ or_(combined, Operand(left)); - break; - - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Move the right operand into ecx for the shift operation, use eax - // for the smi check register. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, Operand(left)); - combined = right; - break; - - default: - break; - } - - // 3. Perform the smi check of the operands. - STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. - __ test(combined, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smis, not_taken); - - // 4. Operands are both smis, perform the operation leaving the result in - // eax and check the result if necessary. - Comment perform_smi(masm, "-- Perform smi operation"); - Label use_fp_on_smis; - switch (op_) { - case Token::BIT_OR: - // Nothing to do. - break; - - case Token::BIT_XOR: - ASSERT(right.is(eax)); - __ xor_(right, Operand(left)); // Bitwise xor is commutative. - break; - - case Token::BIT_AND: - ASSERT(right.is(eax)); - __ and_(right, Operand(left)); // Bitwise and is commutative. - break; - - case Token::SHL: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shl_cl(left); - // Check that the *signed* result fits in a smi. - __ cmp(left, 0xc0000000); - __ j(sign, &use_fp_on_smis, not_taken); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SAR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ sar_cl(left); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SHR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shr_cl(left); - // Check that the *unsigned* result fits in a smi. - // Neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging. - // - 0x40000000: this number would convert to negative when - // Smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi. - __ test(left, Immediate(0xc0000000)); - __ j(not_zero, slow, not_taken); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::ADD: - ASSERT(right.is(eax)); - __ add(right, Operand(left)); // Addition is commutative. - __ j(overflow, &use_fp_on_smis, not_taken); - break; - - case Token::SUB: - __ sub(left, Operand(right)); - __ j(overflow, &use_fp_on_smis, not_taken); - __ mov(eax, left); - break; - - case Token::MUL: - // If the smi tag is 0 we can just leave the tag on one operand. - STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. - // We can't revert the multiplication if the result is not a smi - // so save the right operand. - __ mov(ebx, right); - // Remove tag from one of the operands (but keep sign). - __ SmiUntag(right); - // Do multiplication. - __ imul(right, Operand(left)); // Multiplication is commutative. - __ j(overflow, &use_fp_on_smis, not_taken); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(right, combined, &use_fp_on_smis); - break; - - case Token::DIV: - // We can't revert the division if the result is not a smi so - // save the left operand. - __ mov(edi, left); - // Check for 0 divisor. - __ test(right, Operand(right)); - __ j(zero, &use_fp_on_smis, not_taken); - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for the corner case of dividing the most negative smi by - // -1. We cannot use the overflow flag, since it is not set by idiv - // instruction. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ cmp(eax, 0x40000000); - __ j(equal, &use_fp_on_smis); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(eax, combined, &use_fp_on_smis); - // Check that the remainder is zero. - __ test(edx, Operand(edx)); - __ j(not_zero, &use_fp_on_smis); - // Tag the result and store it in register eax. - __ SmiTag(eax); - break; - - case Token::MOD: - // Check for 0 divisor. - __ test(right, Operand(right)); - __ j(zero, ¬_smis, not_taken); - - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(edx, combined, slow); - // Move remainder to register eax. - __ mov(eax, edx); - break; - - default: - UNREACHABLE(); - } - - // 5. Emit return of result in eax. - GenerateReturn(masm); - - // 6. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - switch (op_) { - case Token::SHL: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Result we want is in left == edx, so we can put the allocated heap - // number in eax. - __ AllocateHeapNumber(eax, ecx, ebx, slow); - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(left)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - // It's OK to overwrite the right argument on the stack because we - // are about to return. - __ mov(Operand(esp, 1 * kPointerSize), left); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - GenerateReturn(masm); - break; - } - - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Restore arguments to edx, eax. - switch (op_) { - case Token::ADD: - // Revert right = right + left. - __ sub(right, Operand(left)); - break; - case Token::SUB: - // Revert left = left - right. - __ add(left, Operand(right)); - break; - case Token::MUL: - // Right was clobbered but a copy is in ebx. - __ mov(right, ebx); - break; - case Token::DIV: - // Left was clobbered but a copy is in edi. Right is in ebx for - // division. - __ mov(edx, edi); - __ mov(eax, right); - break; - default: UNREACHABLE(); - break; - } - __ AllocateHeapNumber(ecx, ebx, no_reg, slow); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - FloatingPointHelper::LoadSSE2Smis(masm, ebx); - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); - } else { // SSE2 not available, use FPU. - FloatingPointHelper::LoadFloatSmis(masm, ebx); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); - } - __ mov(eax, ecx); - GenerateReturn(masm); - break; - } - - default: - break; - } - - // 7. Non-smi operands, fall out to the non-smi code with the operands in - // edx and eax. - Comment done_comment(masm, "-- Enter non-smi code"); - __ bind(¬_smis); - switch (op_) { - case Token::BIT_OR: - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Right operand is saved in ecx and eax was destroyed by the smi - // check. - __ mov(eax, ecx); - break; - - case Token::DIV: - case Token::MOD: - // Operands are in eax, ebx at this point. - __ mov(edx, eax); - __ mov(eax, ebx); - break; - - default: - break; - } -} - - -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { - Label call_runtime; - - __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); - - // Generate fast case smi code if requested. This flag is set when the fast - // case smi code is not generated by the caller. Generating it here will speed - // up common operations. - if (ShouldGenerateSmiCode()) { - GenerateSmiCode(masm, &call_runtime); - } else if (op_ != Token::MOD) { // MOD goes straight to runtime. - if (!HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - } - - // Floating point case. - if (ShouldGenerateFPCode()) { - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - HasSmiCodeInStub()) { - // Execution reaches this point when the first non-smi argument occurs - // (and only if smi code is generated). This is the right moment to - // patch to HEAP_NUMBERS state. The transition is attempted only for - // the four basic operations. The stub stays in the DEFAULT state - // forever for all other operations (also if smi code is skipped). - GenerateTypeTransition(masm); - break; - } - - Label not_floats; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx); - __ AbortIfNotNumber(eax); - } - if (static_operands_type_.IsSmi()) { - if (FLAG_debug_code) { - __ AbortIfNotSmi(edx); - __ AbortIfNotSmi(eax); - } - FloatingPointHelper::LoadSSE2Smis(masm, ecx); - } else { - FloatingPointHelper::LoadSSE2Operands(masm); - } - } else { - FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); - } - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - GenerateHeapResultAllocation(masm, &call_runtime); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - GenerateReturn(masm); - } else { // SSE2 not available, use FPU. - if (static_operands_type_.IsNumber()) { - if (FLAG_debug_code) { - // Assert at runtime that inputs are only numbers. - __ AbortIfNotNumber(edx); - __ AbortIfNotNumber(eax); - } - } else { - FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); - } - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - GenerateReturn(masm); - __ bind(&after_alloc_failure); - __ ffree(); - __ jmp(&call_runtime); - } - __ bind(¬_floats); - if (runtime_operands_type_ == BinaryOpIC::DEFAULT && - !HasSmiCodeInStub()) { - // Execution reaches this point when the first non-number argument - // occurs (and only if smi code is skipped from the stub, otherwise - // the patching has already been done earlier in this case branch). - // Try patching to STRINGS for ADD operation. - if (op_ == Token::ADD) { - GenerateTypeTransition(masm); - } - } - break; - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label non_smi_result; - FloatingPointHelper::LoadAsIntegers(masm, - static_operands_type_, - use_sse3_, - &call_runtime); - switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); - } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result); - } - // Tag smi result and return. - __ SmiTag(eax); - GenerateReturn(masm); - - // All ops except SHR return a signed int32 that we load in - // a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - GenerateReturn(masm); - } - break; - } - default: UNREACHABLE(); break; - } - } - - // If all else fails, use the runtime system to get the correct - // result. If arguments was passed in registers now place them on the - // stack in the correct order below the return address. - __ bind(&call_runtime); - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - switch (op_) { - case Token::ADD: { - // Test for string arguments before calling runtime. - Label not_strings, not_string1, string1, string1_smi2; - - // If this stub has already generated FP-specific code then the arguments - // are already in edx, eax - if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { - GenerateLoadArguments(masm); - } - - // Registers containing left and right operands respectively. - Register lhs, rhs; - if (HasArgsReversed()) { - lhs = eax; - rhs = edx; - } else { - lhs = edx; - rhs = eax; - } - - // Test if first argument is a string. - __ test(lhs, Immediate(kSmiTagMask)); - __ j(zero, ¬_string1); - __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, ¬_string1); - - // First argument is a string, test second. - __ test(rhs, Immediate(kSmiTagMask)); - __ j(zero, &string1_smi2); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &string1); - - // First and second argument are strings. Jump to the string add stub. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); - __ TailCallStub(&string_add_stub); - - __ bind(&string1_smi2); - // First argument is a string, second is a smi. Try to lookup the number - // string for the smi in the number string cache. - NumberToStringStub::GenerateLookupNumberStringCache( - masm, rhs, edi, ebx, ecx, true, &string1); - - // Replace second argument on stack and tailcall string add stub to make - // the result. - __ mov(Operand(esp, 1 * kPointerSize), edi); - __ TailCallStub(&string_add_stub); - - // Only first argument is a string. - __ bind(&string1); - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); - - // First argument was not a string, test second. - __ bind(¬_string1); - __ test(rhs, Immediate(kSmiTagMask)); - __ j(zero, ¬_strings); - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, ¬_strings); - - // Only second argument is a string. - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); - - __ bind(¬_strings); - // Neither argument is a string. - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - } - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure) { - Label skip_allocation; - OverwriteMode mode = mode_; - if (HasArgsReversed()) { - if (mode == OVERWRITE_RIGHT) { - mode = OVERWRITE_LEFT; - } else if (mode == OVERWRITE_LEFT) { - mode = OVERWRITE_RIGHT; - } - } - switch (mode) { - case OVERWRITE_LEFT: { - // If the argument in edx is already an object, we skip the - // allocation of a heap number. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now edx can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(edx, Operand(ebx)); - __ bind(&skip_allocation); - // Use object in edx as a result holder - __ mov(eax, Operand(edx)); - break; - } - case OVERWRITE_RIGHT: - // If the argument in eax is already an object, we skip the - // allocation of a heap number. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &skip_allocation, not_taken); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now eax can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(eax, ebx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } -} - - -void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { - // If arguments are not passed in registers read them from the stack. - ASSERT(!HasArgsInRegisters()); - __ mov(eax, Operand(esp, 1 * kPointerSize)); - __ mov(edx, Operand(esp, 2 * kPointerSize)); -} - - -void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { - // If arguments are not passed in registers remove them from the stack before - // returning. - if (!HasArgsInRegisters()) { - __ ret(2 * kPointerSize); // Remove both operands - } else { - __ ret(0); - } -} - - -void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - ASSERT(HasArgsInRegisters()); - __ pop(ecx); - if (HasArgsReversed()) { - __ push(eax); - __ push(edx); - } else { - __ push(edx); - __ push(eax); - } - __ push(ecx); -} - - -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - // Ensure the operands are on the stack. - if (HasArgsInRegisters()) { - GenerateRegisterArgsPush(masm); - } - - __ pop(ecx); // Save return address. - - // Left and right arguments are now on top. - // Push this stub's key. Although the operation and the type info are - // encoded into the key, the encoding is opaque, so push them too. - __ push(Immediate(Smi::FromInt(MinorKey()))); - __ push(Immediate(Smi::FromInt(op_))); - __ push(Immediate(Smi::FromInt(runtime_operands_type_))); - - __ push(ecx); // Push return address. - - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), - 5, - 1); -} - - -Handle GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - GenericBinaryOpStub stub(key, type_info); - return stub.GetCode(); -} - - -void TranscendentalCacheStub::Generate(MacroAssembler* masm) { - // Input on stack: - // esp[4]: argument (should be number). - // esp[0]: return address. - // Test that eax is a number. - Label runtime_call; - Label runtime_call_clear_stack; - Label input_not_smi; - Label loaded; - __ mov(eax, Operand(esp, kPointerSize)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &input_not_smi); - // Input is a smi. Untag and load it onto the FPU stack. - // Then load the low and high words of the double into ebx, edx. - STATIC_ASSERT(kSmiTagSize == 1); - __ sar(eax, 1); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); - __ mov(Operand(esp, 0), eax); - __ fild_s(Operand(esp, 0)); - __ fst_d(Operand(esp, 0)); - __ pop(edx); - __ pop(ebx); - __ jmp(&loaded); - __ bind(&input_not_smi); - // Check if input is a HeapNumber. - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(Operand(ebx), Immediate(Factory::heap_number_map())); - __ j(not_equal, &runtime_call); - // Input is a HeapNumber. Push it on the FPU stack and load its - // low and high words into ebx, edx. - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); - __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset)); - - __ bind(&loaded); - // ST[0] == double value - // ebx = low 32 bits of double value - // edx = high 32 bits of double value - // Compute hash (the shifts are arithmetic): - // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); - __ mov(ecx, ebx); - __ xor_(ecx, Operand(edx)); - __ mov(eax, ecx); - __ sar(eax, 16); - __ xor_(ecx, Operand(eax)); - __ mov(eax, ecx); - __ sar(eax, 8); - __ xor_(ecx, Operand(eax)); - ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); - __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1)); - - // ST[0] == double value. - // ebx = low 32 bits of double value. - // edx = high 32 bits of double value. - // ecx = TranscendentalCache::hash(double value). - __ mov(eax, - Immediate(ExternalReference::transcendental_cache_array_address())); - // Eax points to cache array. - __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0]))); - // Eax points to the cache for the type type_. - // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ test(eax, Operand(eax)); - __ j(zero, &runtime_call_clear_stack); -#ifdef DEBUG - // Check that the layout of cache elements match expectations. - { TranscendentalCache::Element test_elem[2]; - char* elem_start = reinterpret_cast(&test_elem[0]); - char* elem2_start = reinterpret_cast(&test_elem[1]); - char* elem_in0 = reinterpret_cast(&(test_elem[0].in[0])); - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. - CHECK_EQ(0, elem_in0 - elem_start); - CHECK_EQ(kIntSize, elem_in1 - elem_start); - CHECK_EQ(2 * kIntSize, elem_out - elem_start); - } -#endif - // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12]. - __ lea(ecx, Operand(ecx, ecx, times_2, 0)); - __ lea(ecx, Operand(eax, ecx, times_4, 0)); - // Check if cache matches: Double value is stored in uint32_t[2] array. - Label cache_miss; - __ cmp(ebx, Operand(ecx, 0)); - __ j(not_equal, &cache_miss); - __ cmp(edx, Operand(ecx, kIntSize)); - __ j(not_equal, &cache_miss); - // Cache hit! - __ mov(eax, Operand(ecx, 2 * kIntSize)); - __ fstp(0); - __ ret(kPointerSize); - - __ bind(&cache_miss); - // Update cache with new value. - // We are short on registers, so use no_reg as scratch. - // This gives slightly larger code. - __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); - GenerateOperation(masm); - __ mov(Operand(ecx, 0), ebx); - __ mov(Operand(ecx, kIntSize), edx); - __ mov(Operand(ecx, 2 * kIntSize), eax); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(kPointerSize); - - __ bind(&runtime_call_clear_stack); - __ fstp(0); - __ bind(&runtime_call); - __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); -} - - -Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { - switch (type_) { - // Add more cases when necessary. - case TranscendentalCache::SIN: return Runtime::kMath_sin; - case TranscendentalCache::COS: return Runtime::kMath_cos; - default: - UNIMPLEMENTED(); - return Runtime::kAbort; - } -} - - -void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { - // Only free register is edi. - Label done; - ASSERT(type_ == TranscendentalCache::SIN || - type_ == TranscendentalCache::COS); - // More transcendental types can be added later. - - // Both fsin and fcos require arguments in the range +/-2^63 and - // return NaN for infinities and NaN. They can share all code except - // the actual fsin/fcos operation. - Label in_range; - // If argument is outside the range -2^63..2^63, fsin/cos doesn't - // work. We must reduce it to the appropriate range. - __ mov(edi, edx); - __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. - int supported_exponent_limit = - (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; - __ cmp(Operand(edi), Immediate(supported_exponent_limit)); - __ j(below, &in_range, taken); - // Check for infinity and NaN. Both return NaN for sin. - __ cmp(Operand(edi), Immediate(0x7ff00000)); - Label non_nan_result; - __ j(not_equal, &non_nan_result, taken); - // Input is +/-Infinity or NaN. Result is NaN. - __ fstp(0); - // NaN is represented by 0x7ff8000000000000. - __ push(Immediate(0x7ff80000)); - __ push(Immediate(0)); - __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(2 * kPointerSize)); - __ jmp(&done); - - __ bind(&non_nan_result); - - // Use fpmod to restrict argument to the range +/-2*PI. - __ mov(edi, eax); // Save eax before using fnstsw_ax. - __ fldpi(); - __ fadd(0); - __ fld(1); - // FPU Stack: input, 2*pi, input. - { - Label no_exceptions; - __ fwait(); - __ fnstsw_ax(); - // Clear if Illegal Operand or Zero Division exceptions are set. - __ test(Operand(eax), Immediate(5)); - __ j(zero, &no_exceptions); - __ fnclex(); - __ bind(&no_exceptions); - } - - // Compute st(0) % st(1) - { - Label partial_remainder_loop; - __ bind(&partial_remainder_loop); - __ fprem1(); - __ fwait(); - __ fnstsw_ax(); - __ test(Operand(eax), Immediate(0x400 /* C2 */)); - // If C2 is set, computation only has partial result. Loop to - // continue computation. - __ j(not_zero, &partial_remainder_loop); - } - // FPU Stack: input, 2*pi, input % 2*pi - __ fstp(2); - __ fstp(0); - __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer). - - // FPU Stack: input % 2*pi - __ bind(&in_range); - switch (type_) { - case TranscendentalCache::SIN: - __ fsin(); - break; - case TranscendentalCache::COS: - __ fcos(); - break; - default: - UNREACHABLE(); - } - __ bind(&done); -} - - -// Get the integer part of a heap number. Surprisingly, all this bit twiddling -// is faster than using the built-in instructions on floating point registers. -// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the -// trashed registers. -void IntegerConvert(MacroAssembler* masm, - Register source, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); - Label done, right_exponent, normal_exponent; - Register scratch = ebx; - Register scratch2 = edi; - if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) { - CpuFeatures::Scope scope(SSE2); - __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset)); - return; - } - if (!type_info.IsInteger32() || !use_sse3) { - // Get exponent word. - __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); - // Get exponent alone in scratch2. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kExponentMask); - } - if (use_sse3) { - CpuFeatures::Scope scope(SSE3); - if (!type_info.IsInteger32()) { - // Check whether the exponent is too big for a 64 bit signed integer. - static const uint32_t kTooBigExponent = - (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); - __ j(greater_equal, conversion_failure); - } - // Load x87 register with heap number. - __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); - // Reserve space for 64 bit answer. - __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. - // Do conversion, which cannot fail because we checked the exponent. - __ fisttp_d(Operand(esp, 0)); - __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. - __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. - } else { - // Load ecx with zero. We use this either for the final shift or - // for the answer. - __ xor_(ecx, Operand(ecx)); - // Check whether the exponent matches a 32 bit signed int that cannot be - // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the - // exponent is 30 (biased). This is the exponent that we are fastest at and - // also the highest exponent we can handle here. - const uint32_t non_smi_exponent = - (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); - // If we have a match of the int32-but-not-Smi exponent then skip some - // logic. - __ j(equal, &right_exponent); - // If the exponent is higher than that then go to slow case. This catches - // numbers that don't fit in a signed int32, infinities and NaNs. - __ j(less, &normal_exponent); - - { - // Handle a big exponent. The only reason we have this code is that the - // >>> operator has a tendency to generate numbers with an exponent of 31. - const uint32_t big_non_smi_exponent = - (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); - __ j(not_equal, conversion_failure); - // We have the big exponent, typically from >>>. This means the number is - // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. - __ mov(scratch2, scratch); - __ and_(scratch2, HeapNumber::kMantissaMask); - // Put back the implicit 1. - __ or_(scratch2, 1 << HeapNumber::kExponentShift); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We just orred in the implicit bit so that took care of one and - // we want to use the full unsigned range so we subtract 1 bit from the - // shift distance. - const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1; - __ shl(scratch2, big_shift_distance); - // Get the second half of the double. - __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 21 bits to get the most significant 11 bits or the low - // mantissa word. - __ shr(ecx, 32 - big_shift_distance); - __ or_(ecx, Operand(scratch2)); - // We have the answer in ecx, but we may need to negate it. - __ test(scratch, Operand(scratch)); - __ j(positive, &done); - __ neg(ecx); - __ jmp(&done); - } - - __ bind(&normal_exponent); - // Exponent word in scratch, exponent part of exponent word in scratch2. - // Zero in ecx. - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie - // it rounds to zero. - const uint32_t zero_exponent = - (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(Operand(scratch2), Immediate(zero_exponent)); - // ecx already has a Smi zero. - __ j(less, &done); - - // We have a shifted exponent between 0 and 30 in scratch2. - __ shr(scratch2, HeapNumber::kExponentShift); - __ mov(ecx, Immediate(30)); - __ sub(ecx, Operand(scratch2)); - - __ bind(&right_exponent); - // Here ecx is the shift, scratch is the exponent word. - // Get the top bits of the mantissa. - __ and_(scratch, HeapNumber::kMantissaMask); - // Put back the implicit 1. - __ or_(scratch, 1 << HeapNumber::kExponentShift); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We have kExponentShift + 1 significant bits int he low end of the - // word. Shift them to the top bits. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ shl(scratch, shift_distance); - // Get the second half of the double. For some exponents we don't - // actually need this because the bits get shifted out again, but - // it's probably slower to test than just to do it. - __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the most significant 10 bits or the low - // mantissa word. - __ shr(scratch2, 32 - shift_distance); - __ or_(scratch2, Operand(scratch)); - // Move down according to the exponent. - __ shr_cl(scratch2); - // Now the unsigned answer is in scratch2. We need to move it to ecx and - // we may need to fix the sign. - Label negative; - __ xor_(ecx, Operand(ecx)); - __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); - __ j(greater, &negative); - __ mov(ecx, scratch2); - __ jmp(&done); - __ bind(&negative); - __ sub(ecx, Operand(scratch2)); - __ bind(&done); - } -} - - -// Input: edx, eax are the left and right objects of a bit op. -// Output: eax, ecx are left and right integers for a bit op. -void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - if (!type_info.IsDouble()) { - if (!type_info.IsSmi()) { - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &arg1_is_object); - } else { - if (FLAG_debug_code) __ AbortIfNotSmi(edx); - } - __ SmiUntag(edx); - __ jmp(&load_arg2); - } - - __ bind(&arg1_is_object); - - // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); - __ mov(edx, ecx); - - // Here edx has the untagged integer, eax has a Smi or a heap number. - __ bind(&load_arg2); - if (!type_info.IsDouble()) { - // Test if arg2 is a Smi. - if (!type_info.IsSmi()) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &arg2_is_object); - } else { - if (FLAG_debug_code) __ AbortIfNotSmi(eax); - } - __ SmiUntag(eax); - __ mov(ecx, eax); - __ jmp(&done); - } - - __ bind(&arg2_is_object); - - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); - __ bind(&done); - __ mov(eax, edx); -} - - -// Input: edx, eax are the left and right objects of a bit op. -// Output: eax, ecx are left and right integers for a bit op. -void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - Label* conversion_failure) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - // Test if arg1 is a Smi. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &arg1_is_object); - - __ SmiUntag(edx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - __ cmp(edx, Factory::undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(edx, Immediate(0)); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); - __ cmp(ebx, Factory::heap_number_map()); - __ j(not_equal, &check_undefined_arg1); - - // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, - edx, - TypeInfo::Unknown(), - use_sse3, - conversion_failure); - __ mov(edx, ecx); - - // Here edx has the untagged integer, eax has a Smi or a heap number. - __ bind(&load_arg2); - - // Test if arg2 is a Smi. - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &arg2_is_object); - - __ SmiUntag(eax); - __ mov(ecx, eax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ cmp(eax, Factory::undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(ecx, Immediate(0)); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(ebx, Factory::heap_number_map()); - __ j(not_equal, &check_undefined_arg2); - - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, - eax, - TypeInfo::Unknown(), - use_sse3, - conversion_failure); - __ bind(&done); - __ mov(eax, edx); -} - - -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - TypeInfo type_info, - bool use_sse3, - Label* conversion_failure) { - if (type_info.IsNumber()) { - LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); - } else { - LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); - } -} - - -void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, - Register number) { - Label load_smi, done; - - __ test(number, Immediate(kSmiTagMask)); - __ j(zero, &load_smi, not_taken); - __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi); - __ SmiUntag(number); - __ push(number); - __ fild_s(Operand(esp, 0)); - __ pop(number); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { - Label load_smi_edx, load_eax, load_smi_eax, done; - // Load operand in edx into xmm0. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); - - __ bind(&load_eax); - // Load operand in eax into xmm1. - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_edx); - __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); - __ SmiTag(edx); // Retag smi for heap number overwriting test. - __ jmp(&load_eax); - - __ bind(&load_smi_eax); - __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); - __ SmiTag(eax); // Retag smi for heap number overwriting test. - - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, - Label* not_numbers) { - Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done; - // Load operand in edx into xmm0, or branch to not_numbers. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map()); - __ j(not_equal, not_numbers); // Argument in edx is not a number. - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); - __ bind(&load_eax); - // Load operand in eax into xmm1, or branch to not_numbers. - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi. - __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map()); - __ j(equal, &load_float_eax); - __ jmp(not_numbers); // Argument in eax is not a number. - __ bind(&load_smi_edx); - __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); - __ SmiTag(edx); // Retag smi for heap number overwriting test. - __ jmp(&load_eax); - __ bind(&load_smi_eax); - __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); - __ SmiTag(eax); // Retag smi for heap number overwriting test. - __ jmp(&done); - __ bind(&load_float_eax); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); - __ bind(&done); -} - - -void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, Operand(scratch)); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, Operand(scratch)); -} - - -void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location) { - Label load_smi_1, load_smi_2, done_load_1, done; - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, edx); - } else { - __ mov(scratch, Operand(esp, 2 * kPointerSize)); - } - __ test(scratch, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_1, not_taken); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ bind(&done_load_1); - - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, eax); - } else { - __ mov(scratch, Operand(esp, 1 * kPointerSize)); - } - __ test(scratch, Immediate(kSmiTagMask)); - __ j(zero, &load_smi_2, not_taken); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ jmp(&done); - - __ bind(&load_smi_1); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - __ jmp(&done_load_1); - - __ bind(&load_smi_2); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ mov(Operand(esp, 0), scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); -} - - -void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, - Label* non_float, - Register scratch) { - Label test_other, done; - // Test if both operands are floats or smi -> scratch=k_is_float; - // Otherwise scratch = k_not_float. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &test_other, not_taken); // argument in edx is OK - __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset)); - __ cmp(scratch, Factory::heap_number_map()); - __ j(not_equal, non_float); // argument in edx is not a number -> NaN - - __ bind(&test_other); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &done); // argument in eax is OK - __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(scratch, Factory::heap_number_map()); - __ j(not_equal, non_float); // argument in eax is not a number -> NaN - - // Fall-through: Both operands are numbers. - __ bind(&done); -} - - -void GenericUnaryOpStub::Generate(MacroAssembler* masm) { - Label slow, done; - - if (op_ == Token::SUB) { - // Check whether the value is a smi. - Label try_float; - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &try_float, not_taken); - - if (negative_zero_ == kStrictNegativeZero) { - // Go slow case if the value of the expression is zero - // to make sure that we switch between 0 and -0. - __ test(eax, Operand(eax)); - __ j(zero, &slow, not_taken); - } - - // The value of the expression is a smi that is not zero. Try - // optimistic subtraction '0 - value'. - Label undo; - __ mov(edx, Operand(eax)); - __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); - __ j(no_overflow, &done, taken); - - // Restore eax and go slow case. - __ bind(&undo); - __ mov(eax, Operand(edx)); - __ jmp(&slow); - - // Try floating point case. - __ bind(&try_float); - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &slow); - if (overwrite_ == UNARY_OVERWRITE) { - __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset)); - __ xor_(edx, HeapNumber::kSignMask); // Flip sign. - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx); - } else { - __ mov(edx, Operand(eax)); - // edx: operand - __ AllocateHeapNumber(eax, ebx, ecx, &undo); - // eax: allocated 'empty' number - __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ xor_(ecx, HeapNumber::kSignMask); // Flip sign. - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx); - __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset)); - __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx); - } - } else if (op_ == Token::BIT_NOT) { - // Check if the operand is a heap number. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(edx, Factory::heap_number_map()); - __ j(not_equal, &slow, not_taken); - - // Convert the heap number in eax to an untagged integer in ecx. - IntegerConvert(masm, - eax, - TypeInfo::Unknown(), - CpuFeatures::IsSupported(SSE3), - &slow); - - // Do the bitwise operation and check if the result fits in a smi. - Label try_float; - __ not_(ecx); - __ cmp(ecx, 0xc0000000); - __ j(sign, &try_float, not_taken); - - // Tag the result as a smi and we're done. - STATIC_ASSERT(kSmiTagSize == 1); - __ lea(eax, Operand(ecx, times_2, kSmiTag)); - __ jmp(&done); - - // Try to store the result in a heap number. - __ bind(&try_float); - if (overwrite_ == UNARY_NO_OVERWRITE) { - // Allocate a fresh heap number, but don't overwrite eax until - // we're sure we can do it without going through the slow case - // that needs the value in eax. - __ AllocateHeapNumber(ebx, edx, edi, &slow); - __ mov(eax, Operand(ebx)); - } - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ecx)); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ push(ecx); - __ fild_s(Operand(esp, 0)); - __ pop(ecx); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - } else { - UNIMPLEMENTED(); - } - - // Return from the stub. - __ bind(&done); - __ StubReturn(1); - - // Handle the slow case by jumping to the JavaScript builtin. - __ bind(&slow); - __ pop(ecx); // pop return address. - __ push(eax); - __ push(ecx); // push return address - switch (op_) { - case Token::SUB: - __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); - break; - case Token::BIT_NOT: - __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { - // The key is in edx and the parameter count is in eax. - - // The displacement is used for skipping the frame pointer on the - // stack. It is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = 1 * kPointerSize; - - // Check that the key is a smi. - Label slow; - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow, not_taken); - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor; - __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adaptor); - - // Check index against formal parameters count limit passed in - // through register eax. Use unsigned comparison to get negative - // check for free. - __ cmp(edx, Operand(eax)); - __ j(above_equal, &slow, not_taken); - - // Read the argument from the stack and return it. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. - __ lea(ebx, Operand(ebp, eax, times_2, 0)); - __ neg(edx); - __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); - __ ret(0); - - // Arguments adaptor case: Check index against actual arguments - // limit found in the arguments adaptor frame. Use unsigned - // comparison to get negative check for free. - __ bind(&adaptor); - __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(edx, Operand(ecx)); - __ j(above_equal, &slow, not_taken); - - // Read the argument from the stack and return it. - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these. - __ lea(ebx, Operand(ebx, ecx, times_2, 0)); - __ neg(edx); - __ mov(eax, Operand(ebx, edx, times_2, kDisplacement)); - __ ret(0); - - // Slow-case: Handle non-smi or out-of-bounds access to arguments - // by calling the runtime system. - __ bind(&slow); - __ pop(ebx); // Return address. - __ push(edx); - __ push(ebx); - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); -} - - -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { - // esp[0] : return address - // esp[4] : number of parameters - // esp[8] : receiver displacement - // esp[16] : function - - // The displacement is used for skipping the return address and the - // frame pointer on the stack. It is the offset of the last - // parameter (if any) relative to the frame pointer. - static const int kDisplacement = 2 * kPointerSize; - - // Check if the calling frame is an arguments adaptor frame. - Label adaptor_frame, try_allocate, runtime; - __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); - __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); - __ j(equal, &adaptor_frame); - - // Get the length from the frame. - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - __ jmp(&try_allocate); - - // Patch the arguments.length and the parameters pointer. - __ bind(&adaptor_frame); - __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ mov(Operand(esp, 1 * kPointerSize), ecx); - __ lea(edx, Operand(edx, ecx, times_2, kDisplacement)); - __ mov(Operand(esp, 2 * kPointerSize), edx); - - // Try the new space allocation. Start out with computing the size of - // the arguments object and the elements array. - Label add_arguments_object; - __ bind(&try_allocate); - __ test(ecx, Operand(ecx)); - __ j(zero, &add_arguments_object); - __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); - __ bind(&add_arguments_object); - __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize)); - - // Do the allocation of both objects in one go. - __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); - - // Get the arguments boilerplate from the current (global) context. - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); - __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); - __ mov(edi, Operand(edi, offset)); - - // Copy the JS object part. - for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { - __ mov(ebx, FieldOperand(edi, i)); - __ mov(FieldOperand(eax, i), ebx); - } - - // Setup the callee in-object property. - STATIC_ASSERT(Heap::arguments_callee_index == 0); - __ mov(ebx, Operand(esp, 3 * kPointerSize)); - __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx); - - // Get the length (smi tagged) and set that as an in-object property too. - STATIC_ASSERT(Heap::arguments_length_index == 1); - __ mov(ecx, Operand(esp, 1 * kPointerSize)); - __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx); - - // If there are no actual arguments, we're done. - Label done; - __ test(ecx, Operand(ecx)); - __ j(zero, &done); - - // Get the parameters pointer from the stack. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - - // Setup the elements pointer in the allocated arguments object and - // initialize the header in the elements fixed array. - __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize)); - __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi); - __ mov(FieldOperand(edi, FixedArray::kMapOffset), - Immediate(Factory::fixed_array_map())); - __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx); - // Untag the length for the loop below. - __ SmiUntag(ecx); - - // Copy the fixed array slots. - Label loop; - __ bind(&loop); - __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. - __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); - __ add(Operand(edi), Immediate(kPointerSize)); - __ sub(Operand(edx), Immediate(kPointerSize)); - __ dec(ecx); - __ j(not_zero, &loop); - - // Return and remove the on-stack parameters. - __ bind(&done); - __ ret(3 * kPointerSize); - - // Do the runtime call to allocate the arguments object. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); -} - - -void RegExpExecStub::Generate(MacroAssembler* masm) { - // Just jump directly to runtime if native RegExp is not selected at compile - // time or if regexp entry in generated code is turned off runtime switch or - // at compilation. -#ifdef V8_INTERPRETED_REGEXP - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#else // V8_INTERPRETED_REGEXP - if (!FLAG_regexp_entry_native) { - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); - return; - } - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: last_match_info (expected JSArray) - // esp[8]: previous index - // esp[12]: subject string - // esp[16]: JSRegExp object - - static const int kLastMatchInfoOffset = 1 * kPointerSize; - static const int kPreviousIndexOffset = 2 * kPointerSize; - static const int kSubjectOffset = 3 * kPointerSize; - static const int kJSRegExpOffset = 4 * kPointerSize; - - Label runtime, invoke_regexp; - - // Ensure that a RegExp stack is allocated. - ExternalReference address_of_regexp_stack_memory_address = - ExternalReference::address_of_regexp_stack_memory_address(); - ExternalReference address_of_regexp_stack_memory_size = - ExternalReference::address_of_regexp_stack_memory_size(); - __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ test(ebx, Operand(ebx)); - __ j(zero, &runtime, not_taken); - - // Check that the first argument is a JSRegExp object. - __ mov(eax, Operand(esp, kJSRegExpOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); - __ j(not_equal, &runtime); - // Check that the RegExp has been compiled (data contains a fixed array). - __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); - if (FLAG_debug_code) { - __ test(ecx, Immediate(kSmiTagMask)); - __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected"); - __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx); - __ Check(equal, "Unexpected type for RegExp data, FixedArray expected"); - } - - // ecx: RegExp data (FixedArray) - // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. - __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); - __ j(not_equal, &runtime); - - // ecx: RegExp data (FixedArray) - // Check that the number of captures fit in the static offsets vector buffer. - __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); - __ j(above, &runtime); - - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the second argument is a string. - __ mov(eax, Operand(esp, kSubjectOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); - __ j(NegateCondition(is_string), &runtime); - // Get the length of the string to ebx. - __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); - - // ebx: Length of subject string as a smi - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ mov(eax, Operand(esp, kPreviousIndexOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ cmp(eax, Operand(ebx)); - __ j(above_equal, &runtime); - - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the fourth object is a JSArray object. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); - __ j(not_equal, &runtime); - // Check that the JSArray is in fast case. - __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); - __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); - __ cmp(eax, Factory::fixed_array_map()); - __ j(not_equal, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); - __ SmiUntag(eax); - __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, Operand(eax)); - __ j(greater, &runtime); - - // ecx: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_ascii_string, seq_two_byte_string, check_code; - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); - // First check for flat two byte string. - __ and_(ebx, - kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); - STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be a flat ascii string. - __ test(Operand(ebx), - Immediate(kIsNotStringMask | kStringRepresentationMask)); - __ j(zero, &seq_ascii_string); - - // Check for flat cons string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - STATIC_ASSERT(kExternalStringTag != 0); - STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); - __ test(Operand(ebx), - Immediate(kIsNotStringMask | kExternalStringTag)); - __ j(not_zero, &runtime); - // String is a cons string. - __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset)); - __ cmp(Operand(edx), Factory::empty_string()); - __ j(not_equal, &runtime); - __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - // String is a cons string with empty second part. - // eax: first part of cons string. - // ebx: map of first part of cons string. - // Is first part a flat two byte string? - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask | kStringEncodingMask); - STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string); - // Any other flat string must be ascii. - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask); - __ j(not_zero, &runtime); - - __ bind(&seq_ascii_string); - // eax: subject string (flat ascii) - // ecx: RegExp data (FixedArray) - __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); - __ Set(edi, Immediate(1)); // Type is ascii. - __ jmp(&check_code); - - __ bind(&seq_two_byte_string); - // eax: subject string (flat two byte) - // ecx: RegExp data (FixedArray) - __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); - __ Set(edi, Immediate(0)); // Type is two byte. - - __ bind(&check_code); - // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it contains - // the hole. - __ CmpObjectType(edx, CODE_TYPE, ebx); - __ j(not_equal, &runtime); - - // eax: subject string - // edx: code - // edi: encoding of subject string (1 if ascii, 0 if two_byte); - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ mov(ebx, Operand(esp, kPreviousIndexOffset)); - __ SmiUntag(ebx); // Previous index from smi. - - // eax: subject string - // ebx: previous index - // edx: code - // edi: encoding of subject string (1 if ascii 0 if two_byte); - // All checks done. Now push arguments for native regexp code. - __ IncrementCounter(&Counters::regexp_entry_native, 1); - - static const int kRegExpExecuteArguments = 7; - __ PrepareCallCFunction(kRegExpExecuteArguments, ecx); - - // Argument 7: Indicate that this is a direct call from JavaScript. - __ mov(Operand(esp, 6 * kPointerSize), Immediate(1)); - - // Argument 6: Start (high end) of backtracking stack memory area. - __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address)); - __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ mov(Operand(esp, 5 * kPointerSize), ecx); - - // Argument 5: static offsets vector buffer. - __ mov(Operand(esp, 4 * kPointerSize), - Immediate(ExternalReference::address_of_static_offsets_vector())); - - // Argument 4: End of string data - // Argument 3: Start of string data - Label setup_two_byte, setup_rest; - __ test(edi, Operand(edi)); - __ mov(edi, FieldOperand(eax, String::kLengthOffset)); - __ j(zero, &setup_two_byte); - __ SmiUntag(edi); - __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize)); - __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. - __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); - __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. - __ jmp(&setup_rest); - - __ bind(&setup_two_byte); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2). - __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize)); - __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. - __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize)); - __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. - - __ bind(&setup_rest); - - // Argument 2: Previous index. - __ mov(Operand(esp, 1 * kPointerSize), ebx); - - // Argument 1: Subject string. - __ mov(Operand(esp, 0 * kPointerSize), eax); - - // Locate the code entry and call it. - __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(edx, kRegExpExecuteArguments); - - // Check the result. - Label success; - __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS); - __ j(equal, &success, taken); - Label failure; - __ cmp(eax, NativeRegExpMacroAssembler::FAILURE); - __ j(equal, &failure, taken); - __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION); - // If not exception it can only be retry. Handle that in the runtime system. - __ j(not_equal, &runtime); - // Result must now be exception. If there is no pending exception already a - // stack overflow (on the backtrack stack) was detected in RegExp code but - // haven't created the exception yet. Handle that in the runtime system. - // TODO(592): Rerunning the RegExp to get the stack overflow exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ cmp(eax, Operand::StaticVariable(pending_exception)); - __ j(equal, &runtime); - __ bind(&failure); - // For failure and exception return null. - __ mov(Operand(eax), Factory::null_value()); - __ ret(4 * kPointerSize); - - // Load RegExp data. - __ bind(&success); - __ mov(eax, Operand(esp, kJSRegExpOffset)); - __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); - __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. - - // edx: Number of capture registers - // Load last_match_info which is still known to be a fast case JSArray. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); - - // ebx: last_match_info backing store (FixedArray) - // edx: number of capture registers - // Store the capture count. - __ SmiTag(edx); // Number of capture registers to smi. - __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx); - __ SmiUntag(edx); // Number of capture registers back from smi. - // Store last subject and last input. - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi); - __ mov(eax, Operand(esp, kSubjectOffset)); - __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); - __ mov(ecx, ebx); - __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi); - - // Get the static offsets vector filled by the native regexp code. - ExternalReference address_of_static_offsets_vector = - ExternalReference::address_of_static_offsets_vector(); - __ mov(ecx, Immediate(address_of_static_offsets_vector)); - - // ebx: last_match_info backing store (FixedArray) - // ecx: offsets vector - // edx: number of capture registers - Label next_capture, done; - // Capture register counter starts from number of capture registers and - // counts down until wraping after zero. - __ bind(&next_capture); - __ sub(Operand(edx), Immediate(1)); - __ j(negative, &done); - // Read the value from the static offsets vector buffer. - __ mov(edi, Operand(ecx, edx, times_int_size, 0)); - __ SmiTag(edi); - // Store the smi value in the last match info. - __ mov(FieldOperand(ebx, - edx, - times_pointer_size, - RegExpImpl::kFirstCaptureOffset), - edi); - __ jmp(&next_capture); - __ bind(&done); - - // Return last match info. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ ret(4 * kPointerSize); - - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); -#endif // V8_INTERPRETED_REGEXP -} - - -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch1; - Register scratch = scratch2; - - // Load the number string cache. - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex)); - __ mov(number_string_cache, - Operand::StaticArray(scratch, times_pointer_size, roots_address)); - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); - __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(Operand(mask), Immediate(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label smi_hash_calculated; - Label load_result_from_cache; - if (object_is_smi) { - __ mov(scratch, object); - __ SmiUntag(scratch); - } else { - Label not_smi, hash_calculated; - STATIC_ASSERT(kSmiTag == 0); - __ test(object, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_smi); - __ mov(scratch, object); - __ SmiUntag(scratch); - __ jmp(&smi_hash_calculated); - __ bind(¬_smi); - __ cmp(FieldOperand(object, HeapObject::kMapOffset), - Factory::heap_number_map()); - __ j(not_equal, not_found); - STATIC_ASSERT(8 == kDoubleSize); - __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); - Register index = scratch; - Register probe = mask; - __ mov(probe, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ test(probe, Immediate(kSmiTagMask)); - __ j(zero, not_found); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); - __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm1); - } else { - __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); - __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); - __ FCmp(); - } - __ j(parity_even, not_found); // Bail out if NaN is involved. - __ j(not_equal, not_found); // The cache did not contain this value. - __ jmp(&load_result_from_cache); - } - - __ bind(&smi_hash_calculated); - // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); - Register index = scratch; - // Check if the entry is the smi we are looking for. - __ cmp(object, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ j(not_equal, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ mov(result, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(&Counters::number_to_string_native, 1); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ mov(ebx, Operand(esp, kPointerSize)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime); - __ ret(1 * kPointerSize); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - -static int NegativeComparisonResult(Condition cc) { - ASSERT(cc != equal); - ASSERT((cc == less) || (cc == less_equal) - || (cc == greater) || (cc == greater_equal)); - return (cc == greater || cc == greater_equal) ? LESS : GREATER; -} - - -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - Label check_unequal_objects, done; - - // NOTICE! This code is only reached after a smi-fast-case check, so - // it is certain that at least one operand isn't a smi. - - // Identical objects can be compared fast, but there are some tricky cases - // for NaN and undefined. - { - Label not_identical; - __ cmp(eax, Operand(edx)); - __ j(not_equal, ¬_identical); - - if (cc_ != equal) { - // Check for undefined. undefined OP undefined is false even though - // undefined == undefined. - Label check_for_nan; - __ cmp(edx, Factory::undefined_value()); - __ j(not_equal, &check_for_nan); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - __ ret(0); - __ bind(&check_for_nan); - } - - // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), - // so we do the second best thing - test it ourselves. - // Note: if cc_ != equal, never_nan_nan_ is not used. - if (never_nan_nan_ && (cc_ == equal)) { - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - } else { - Label heap_number; - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - __ j(equal, &heap_number); - if (cc_ != equal) { - // Call runtime on identical JSObjects. Otherwise return equal. - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(above_equal, ¬_identical); - } - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if - // it's not NaN. - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // We only accept QNaNs, which have bit 51 set. - // Read top bits of double representation (second word of value). - - // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., - // all bits in the mask are set. We only need to check the word - // that contains the exponent and high bit of the mantissa. - STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); - __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ xor_(eax, Operand(eax)); - // Shift value and mask so kQuietNaNHighBitsMask applies to topmost - // bits. - __ add(edx, Operand(edx)); - __ cmp(edx, kQuietNaNHighBitsMask << 1); - if (cc_ == equal) { - STATIC_ASSERT(EQUAL != 1); - __ setcc(above_equal, eax); - __ ret(0); - } else { - Label nan; - __ j(above_equal, &nan); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - __ bind(&nan); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - __ ret(0); - } - } - - __ bind(¬_identical); - } - - // Strict equality can quickly decide whether objects are equal. - // Non-strict object equality is slower, so it is handled later in the stub. - if (cc_ == equal && strict_) { - Label slow; // Fallthrough label. - Label not_smis; - // If we're doing a strict equality comparison, we don't have to do - // type conversion, so we generate code to do fast comparison for objects - // and oddballs. Non-smi numbers and strings still go through the usual - // slow-case code. - // If either is a Smi (we know that not both are), then they can only - // be equal if the other is a HeapNumber. If so, use the slow case. - STATIC_ASSERT(kSmiTag == 0); - ASSERT_EQ(0, Smi::FromInt(0)); - __ mov(ecx, Immediate(kSmiTagMask)); - __ and_(ecx, Operand(eax)); - __ test(ecx, Operand(edx)); - __ j(not_zero, ¬_smis); - // One operand is a smi. - - // Check whether the non-smi is a heap number. - STATIC_ASSERT(kSmiTagMask == 1); - // ecx still holds eax & kSmiTag, which is either zero or one. - __ sub(Operand(ecx), Immediate(0x01)); - __ mov(ebx, edx); - __ xor_(ebx, Operand(eax)); - __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. - __ xor_(ebx, Operand(eax)); - // if eax was smi, ebx is now edx, else eax. - - // Check if the non-smi operand is a heap number. - __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), - Immediate(Factory::heap_number_map())); - // If heap number, handle it in the slow case. - __ j(equal, &slow); - // Return non-equal (ebx is not zero) - __ mov(eax, ebx); - __ ret(0); - - __ bind(¬_smis); - // If either operand is a JSObject or an oddball value, then they are not - // equal since their pointers are different - // There is no test for undetectability in strict equality. - - // Get the type of the first operand. - // If the first object is a JS object, we have done pointer comparison. - Label first_non_object; - STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(below, &first_non_object); - - // Return non-zero (eax is not zero) - Label return_not_equal; - STATIC_ASSERT(kHeapObjectTag != 0); - __ bind(&return_not_equal); - __ ret(0); - - __ bind(&first_non_object); - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(ecx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx); - __ j(above_equal, &return_not_equal); - - // Check for oddballs: true, false, null, undefined. - __ CmpInstanceType(ecx, ODDBALL_TYPE); - __ j(equal, &return_not_equal); - - // Fall through to the general case. - __ bind(&slow); - } - - // Generate the number comparison code. - if (include_number_compare_) { - Label non_number_comparison; - Label unordered; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - CpuFeatures::Scope use_cmov(CMOV); - - FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); - __ ucomisd(xmm0, xmm1); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, not_taken); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); - __ ret(0); - } else { - FloatingPointHelper::CheckFloatOperands( - masm, &non_number_comparison, ebx); - FloatingPointHelper::LoadFloatOperand(masm, eax); - FloatingPointHelper::LoadFloatOperand(masm, edx); - __ FCmp(); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, not_taken); - - Label below_label, above_label; - // Return a result of -1, 0, or 1, based on EFLAGS. - __ j(below, &below_label, not_taken); - __ j(above, &above_label, not_taken); - - __ xor_(eax, Operand(eax)); - __ ret(0); - - __ bind(&below_label); - __ mov(eax, Immediate(Smi::FromInt(-1))); - __ ret(0); - - __ bind(&above_label); - __ mov(eax, Immediate(Smi::FromInt(1))); - __ ret(0); - } - - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ mov(eax, Immediate(Smi::FromInt(1))); - } else { - __ mov(eax, Immediate(Smi::FromInt(-1))); - } - __ ret(0); - - // The number comparison code did not provide a valid result. - __ bind(&non_number_comparison); - } - - // Fast negative check for symbol-to-symbol equality. - Label check_for_strings; - if (cc_ == equal) { - BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); - BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); - - // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register eax already holds a - // non-zero value, which indicates not equal, so just return. - __ ret(0); - } - - __ bind(&check_for_strings); - - __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, - &check_unequal_objects); - - // Inline comparison of ascii strings. - StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - edx, - eax, - ecx, - ebx, - edi); -#ifdef DEBUG - __ Abort("Unexpected fall-through from string comparison"); -#endif - - __ bind(&check_unequal_objects); - if (cc_ == equal && !strict_) { - // Non-strict equality. Objects are unequal if - // they are both JSObjects and not undetectable, - // and their pointers are different. - Label not_both_objects; - Label return_unequal; - // At most one is a smi, so we can test for smi by adding the two. - // A smi plus a heap object has the low bit set, a heap object plus - // a heap object has the low bit clear. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagMask == 1); - __ lea(ecx, Operand(eax, edx, times_1, 0)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, ¬_both_objects); - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); - __ j(below, ¬_both_objects); - __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx); - __ j(below, ¬_both_objects); - // We do not bail out after this point. Both are JSObjects, and - // they are equal if and only if both are undetectable. - // The and of the undetectable flags is 1 if and only if they are equal. - __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(zero, &return_unequal); - __ test_b(FieldOperand(ebx, Map::kBitFieldOffset), - 1 << Map::kIsUndetectable); - __ j(zero, &return_unequal); - // The objects are both undetectable, so they both compare as the value - // undefined, and are equal. - __ Set(eax, Immediate(EQUAL)); - __ bind(&return_unequal); - // Return non-equal by returning the non-zero object pointer in eax, - // or return equal if we fell through to here. - __ ret(0); // rax, rdx were pushed - __ bind(¬_both_objects); - } - - // Push arguments below the return address. - __ pop(ecx); - __ push(edx); - __ push(eax); - - // Figure out which native to call and setup the arguments. - Builtins::JavaScript builtin; - if (cc_ == equal) { - builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - builtin = Builtins::COMPARE; - __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - } - - // Restore return address on the stack. - __ push(ecx); - - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ InvokeBuiltin(builtin, JUMP_FUNCTION); -} - - -void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch) { - __ test(object, Immediate(kSmiTagMask)); - __ j(zero, label); - __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); - __ and_(scratch, kIsSymbolMask | kIsNotStringMask); - __ cmp(scratch, kSymbolTag | kStringTag); - __ j(not_equal, label); -} - - -void StackCheckStub::Generate(MacroAssembler* masm) { - // Because builtins always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. The receiver - // must be inserted below the return address on the stack so we - // temporarily store that in a register. - __ pop(eax); - __ push(Immediate(Smi::FromInt(0))); - __ push(eax); - - // Do tail-call to runtime routine. - __ TailCallRuntime(Runtime::kStackGuard, 1, 1); -} - - -void CallFunctionStub::Generate(MacroAssembler* masm) { - Label slow; - - // If the receiver might be a value (string, number or boolean) check for this - // and box it if it is. - if (ReceiverMightBeValue()) { - // Get the receiver from the stack. - // +1 ~ return address - Label receiver_is_value, receiver_is_js_object; - __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize)); - - // Check if receiver is a smi (which is a number value). - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &receiver_is_value, not_taken); - - // Check if the receiver is a valid JS object. - __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi); - __ j(above_equal, &receiver_is_js_object); - - // Call the runtime to box the value. - __ bind(&receiver_is_value); - __ EnterInternalFrame(); - __ push(eax); - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ LeaveInternalFrame(); - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax); - - __ bind(&receiver_is_js_object); - } - - // Get the function to call from the stack. - // +2 ~ receiver, return address - __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize)); - - // Check that the function really is a JavaScript function. - __ test(edi, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - // Goto slow case if we do not have a function. - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); - __ j(not_equal, &slow, not_taken); - - // Fast-case: Just invoke the function. - ParameterCount actual(argc_); - __ InvokeFunction(edi, actual, JUMP_FUNCTION); - - // Slow-case: Non-function called. - __ bind(&slow); - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead - // of the original receiver from the call site). - __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi); - __ Set(eax, Immediate(argc_)); - __ Set(ebx, Immediate(0)); - __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION); - Handle adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); - __ jmp(adaptor, RelocInfo::CODE_TARGET); -} - - -void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { - // eax holds the exception. - - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop the sp to the top of the handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Restore next handler and frame pointer, discard handler state. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // Remove state. - - // Before returning we restore the context from the frame pointer if - // not NULL. The frame pointer is NULL in the exception handler of - // a JS entry frame. - __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL. - Label skip; - __ cmp(ebp, 0); - __ j(equal, &skip, not_taken); - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); - __ bind(&skip); - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); -} - - -// If true, a Handle passed by value is passed and returned by -// using the location_ field directly. If false, it is passed and -// returned as a pointer to a handle. -#ifdef USING_BSD_ABI -static const bool kPassHandlesDirectly = true; -#else -static const bool kPassHandlesDirectly = false; -#endif - - -void ApiGetterEntryStub::Generate(MacroAssembler* masm) { - Label empty_handle; - Label prologue; - Label promote_scheduled_exception; - __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc); - STATIC_ASSERT(kArgc == 4); - if (kPassHandlesDirectly) { - // When handles as passed directly we don't have to allocate extra - // space for and pass an out parameter. - __ mov(Operand(esp, 0 * kPointerSize), ebx); // name. - __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer. - } else { - // The function expects three arguments to be passed but we allocate - // four to get space for the output cell. The argument slots are filled - // as follows: - // - // 3: output cell - // 2: arguments pointer - // 1: name - // 0: pointer to the output cell - // - // Note that this is one more "argument" than the function expects - // so the out cell will have to be popped explicitly after returning - // from the function. - __ mov(Operand(esp, 1 * kPointerSize), ebx); // name. - __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer. - __ mov(ebx, esp); - __ add(Operand(ebx), Immediate(3 * kPointerSize)); - __ mov(Operand(esp, 0 * kPointerSize), ebx); // output - __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell. - } - // Call the api function! - __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY); - // Check if the function scheduled an exception. - ExternalReference scheduled_exception_address = - ExternalReference::scheduled_exception_address(); - __ cmp(Operand::StaticVariable(scheduled_exception_address), - Immediate(Factory::the_hole_value())); - __ j(not_equal, &promote_scheduled_exception, not_taken); - if (!kPassHandlesDirectly) { - // The returned value is a pointer to the handle holding the result. - // Dereference this to get to the location. - __ mov(eax, Operand(eax, 0)); - } - // Check if the result handle holds 0. - __ test(eax, Operand(eax)); - __ j(zero, &empty_handle, not_taken); - // It was non-zero. Dereference to get the result value. - __ mov(eax, Operand(eax, 0)); - __ bind(&prologue); - __ LeaveExitFrame(ExitFrame::MODE_NORMAL); - __ ret(0); - __ bind(&promote_scheduled_exception); - __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); - __ bind(&empty_handle); - // It was zero; the result is undefined. - __ mov(eax, Factory::undefined_value()); - __ jmp(&prologue); -} - - -void CEntryStub::GenerateCore(MacroAssembler* masm, - Label* throw_normal_exception, - Label* throw_termination_exception, - Label* throw_out_of_memory_exception, - bool do_gc, - bool always_allocate_scope, - int /* alignment_skew */) { - // eax: result parameter for PerformGC, if any - // ebx: pointer to C function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // edi: number of arguments including receiver (C callee-saved) - // esi: pointer to the first argument (C callee-saved) - - // Result returned in eax, or eax+edx if result_size_ is 2. - - // Check stack alignment. - if (FLAG_debug_code) { - __ CheckStackAlignment(); - } - - if (do_gc) { - // Pass failure code returned from last attempt as first argument to - // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the - // stack alignment is known to be correct. This function takes one argument - // which is passed on the stack, and we know that the stack has been - // prepared to pass at least one argument. - __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. - __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); - } - - ExternalReference scope_depth = - ExternalReference::heap_always_allocate_scope_depth(); - if (always_allocate_scope) { - __ inc(Operand::StaticVariable(scope_depth)); - } - - // Call C function. - __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. - __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. - __ call(Operand(ebx)); - // Result is in eax or edx:eax - do not destroy these registers! - - if (always_allocate_scope) { - __ dec(Operand::StaticVariable(scope_depth)); - } - - // Make sure we're not trying to return 'the hole' from the runtime - // call as this may lead to crashes in the IC code later. - if (FLAG_debug_code) { - Label okay; - __ cmp(eax, Factory::the_hole_value()); - __ j(not_equal, &okay); - __ int3(); - __ bind(&okay); - } - - // Check for failure result. - Label failure_returned; - STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); - __ lea(ecx, Operand(eax, 1)); - // Lower 2 bits of ecx are 0 iff eax has failure tag. - __ test(ecx, Immediate(kFailureTagMask)); - __ j(zero, &failure_returned, not_taken); - - // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(mode_); - __ ret(0); - - // Handling of failure. - __ bind(&failure_returned); - - Label retry; - // If the returned exception is RETRY_AFTER_GC continue at retry label - STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); - __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); - __ j(zero, &retry, taken); - - // Special handling of out of memory exceptions. - __ cmp(eax, reinterpret_cast(Failure::OutOfMemoryException())); - __ j(equal, throw_out_of_memory_exception); - - // Retrieve the pending exception and clear the variable. - ExternalReference pending_exception_address(Top::k_pending_exception_address); - __ mov(eax, Operand::StaticVariable(pending_exception_address)); - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ mov(Operand::StaticVariable(pending_exception_address), edx); - - // Special handling of termination exceptions which are uncatchable - // by javascript code. - __ cmp(eax, Factory::termination_exception()); - __ j(equal, throw_termination_exception); - - // Handle normal exception. - __ jmp(throw_normal_exception); - - // Retry. - __ bind(&retry); -} - - -void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, - UncatchableExceptionType type) { - // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); - - // Drop sp to the top stack handler. - ExternalReference handler_address(Top::k_handler_address); - __ mov(esp, Operand::StaticVariable(handler_address)); - - // Unwind the handlers until the ENTRY handler is found. - Label loop, done; - __ bind(&loop); - // Load the type of the current stack handler. - const int kStateOffset = StackHandlerConstants::kStateOffset; - __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY)); - __ j(equal, &done); - // Fetch the next handler in the list. - const int kNextOffset = StackHandlerConstants::kNextOffset; - __ mov(esp, Operand(esp, kNextOffset)); - __ jmp(&loop); - __ bind(&done); - - // Set the top handler address to next handler past the current ENTRY handler. - STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); - __ pop(Operand::StaticVariable(handler_address)); - - if (type == OUT_OF_MEMORY) { - // Set external caught exception to false. - ExternalReference external_caught(Top::k_external_caught_exception_address); - __ mov(eax, false); - __ mov(Operand::StaticVariable(external_caught), eax); - - // Set pending exception and eax to out of memory exception. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(eax, reinterpret_cast(Failure::OutOfMemoryException())); - __ mov(Operand::StaticVariable(pending_exception), eax); - } - - // Clear the context pointer. - __ xor_(esi, Operand(esi)); - - // Restore fp from handler and discard handler state. - STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize); - __ pop(ebp); - __ pop(edx); // State. - - STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); - __ ret(0); -} - - -void CEntryStub::Generate(MacroAssembler* masm) { - // eax: number of arguments including receiver - // ebx: pointer to C function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // esi: current context (C callee-saved) - // edi: JS function of the caller (C callee-saved) - - // NOTE: Invocations of builtins may return failure objects instead - // of a proper result. The builtin entry handles this by performing - // a garbage collection and retrying the builtin (twice). - - // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(mode_); - - // eax: result parameter for PerformGC, if any (setup below) - // ebx: pointer to builtin function (C callee-saved) - // ebp: frame pointer (restored after C call) - // esp: stack pointer (restored after C call) - // edi: number of arguments including receiver (C callee-saved) - // esi: argv pointer (C callee-saved) - - Label throw_normal_exception; - Label throw_termination_exception; - Label throw_out_of_memory_exception; - - // Call into the runtime system. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - false, - false); - - // Do space-specific GC and retry runtime call. - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - false); - - // Do full GC and retry runtime call one final time. - Failure* failure = Failure::InternalError(); - __ mov(eax, Immediate(reinterpret_cast(failure))); - GenerateCore(masm, - &throw_normal_exception, - &throw_termination_exception, - &throw_out_of_memory_exception, - true, - true); - - __ bind(&throw_out_of_memory_exception); - GenerateThrowUncatchable(masm, OUT_OF_MEMORY); - - __ bind(&throw_termination_exception); - GenerateThrowUncatchable(masm, TERMINATION); - - __ bind(&throw_normal_exception); - GenerateThrowTOS(masm); -} - - -void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - Label invoke, exit; -#ifdef ENABLE_LOGGING_AND_PROFILING - Label not_outermost_js, not_outermost_js_2; -#endif - - // Setup frame. - __ push(ebp); - __ mov(ebp, Operand(esp)); - - // Push marker in two places. - int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ push(Immediate(Smi::FromInt(marker))); // context slot - __ push(Immediate(Smi::FromInt(marker))); // function slot - // Save callee-saved registers (C calling conventions). - __ push(edi); - __ push(esi); - __ push(ebx); - - // Save copies of the top frame descriptor on the stack. - ExternalReference c_entry_fp(Top::k_c_entry_fp_address); - __ push(Operand::StaticVariable(c_entry_fp)); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If this is the outermost JS call, set js_entry_sp value. - ExternalReference js_entry_sp(Top::k_js_entry_sp_address); - __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0)); - __ j(not_equal, ¬_outermost_js); - __ mov(Operand::StaticVariable(js_entry_sp), ebp); - __ bind(¬_outermost_js); -#endif - - // Call a faked try-block that does the invoke. - __ call(&invoke); - - // Caught exception: Store result (exception) in the pending - // exception field in the JSEnv and return a failure sentinel. - ExternalReference pending_exception(Top::k_pending_exception_address); - __ mov(Operand::StaticVariable(pending_exception), eax); - __ mov(eax, reinterpret_cast(Failure::Exception())); - __ jmp(&exit); - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); - - // Clear any pending exceptions. - __ mov(edx, - Operand::StaticVariable(ExternalReference::the_hole_value_location())); - __ mov(Operand::StaticVariable(pending_exception), edx); - - // Fake a receiver (NULL). - __ push(Immediate(0)); // receiver - - // Invoke the function by calling through JS entry trampoline - // builtin and pop the faked function when we return. Notice that we - // cannot store a reference to the trampoline code directly in this - // stub, because the builtin stubs may not have been generated yet. - if (is_construct) { - ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); - __ mov(edx, Immediate(construct_entry)); - } else { - ExternalReference entry(Builtins::JSEntryTrampoline); - __ mov(edx, Immediate(entry)); - } - __ mov(edx, Operand(edx, 0)); // deref address - __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ call(Operand(edx)); - - // Unlink this frame from the handler chain. - __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address))); - // Pop next_sp. - __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); - -#ifdef ENABLE_LOGGING_AND_PROFILING - // If current EBP value is the same as js_entry_sp value, it means that - // the current function is the outermost. - __ cmp(ebp, Operand::StaticVariable(js_entry_sp)); - __ j(not_equal, ¬_outermost_js_2); - __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); - __ bind(¬_outermost_js_2); -#endif - - // Restore the top frame descriptor from the stack. - __ bind(&exit); - __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address))); - - // Restore callee-saved registers (C calling conventions). - __ pop(ebx); - __ pop(esi); - __ pop(edi); - __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers - - // Restore frame pointer and return. - __ pop(ebp); - __ ret(0); -} - - -void InstanceofStub::Generate(MacroAssembler* masm) { - // Get the object - go slow case if it's a smi. - Label slow; - __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - - // Check that the left hand is a JS object. - __ IsObjectJSObjectType(eax, eax, edx, &slow); - - // Get the prototype of the function. - __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address - // edx is function, eax is map. - - // Look up the function and the map in the instanceof cache. - Label miss; - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); - __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ j(not_equal, &miss); - __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); - __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ j(not_equal, &miss); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address)); - __ ret(2 * kPointerSize); - - __ bind(&miss); - __ TryGetFunctionPrototype(edx, ebx, ecx, &slow); - - // Check that the function prototype is a JS object. - __ test(ebx, Immediate(kSmiTagMask)); - __ j(zero, &slow, not_taken); - __ IsObjectJSObjectType(ebx, ecx, ecx, &slow); - - // Register mapping: - // eax is object map. - // edx is function. - // ebx is function prototype. - __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx); - - __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset)); - - // Loop through the prototype chain looking for the function prototype. - Label loop, is_instance, is_not_instance; - __ bind(&loop); - __ cmp(ecx, Operand(ebx)); - __ j(equal, &is_instance); - __ cmp(Operand(ecx), Immediate(Factory::null_value())); - __ j(equal, &is_not_instance); - __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset)); - __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset)); - __ jmp(&loop); - - __ bind(&is_instance); - __ Set(eax, Immediate(0)); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ ret(2 * kPointerSize); - - __ bind(&is_not_instance); - __ Set(eax, Immediate(Smi::FromInt(1))); - __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex)); - __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax); - __ ret(2 * kPointerSize); - - // Slow-case: Go through the JavaScript implementation. - __ bind(&slow); - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT(static_cast(cc_) < (1 << 12)); - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - return ConditionField::encode(static_cast(cc_)) - | RegisterField::encode(false) // lhs_ and rhs_ are not used - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); - if (name_ == NULL) return "OOM"; - - const char* cc_name; - switch (cc_) { - case less: cc_name = "LT"; break; - case greater: cc_name = "GT"; break; - case less_equal: cc_name = "LE"; break; - case greater_equal: cc_name = "GE"; break; - case equal: cc_name = "EQ"; break; - case not_equal: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - - const char* strict_name = ""; - if (strict_ && (cc_ == equal || cc_ == not_equal)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - OS::SNPrintF(Vector(name_, kMaxNameLength), - "CompareStub_%s%s%s%s", - cc_name, - strict_name, - never_nan_nan_name, - include_number_compare_name); - return name_; -} - - -// ------------------------------------------------------------------------- -// StringCharCodeAtGenerator - -void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { - Label flat_string; - Label ascii_string; - Label got_char_code; - - // If the receiver is a smi trigger the non-string case. - STATIC_ASSERT(kSmiTag == 0); - __ test(object_, Immediate(kSmiTagMask)); - __ j(zero, receiver_not_string_); - - // Fetch the instance type of the receiver into result register. - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the receiver is not a string trigger the non-string case. - __ test(result_, Immediate(kIsNotStringMask)); - __ j(not_zero, receiver_not_string_); - - // If the index is non-smi trigger the non-smi case. - STATIC_ASSERT(kSmiTag == 0); - __ test(index_, Immediate(kSmiTagMask)); - __ j(not_zero, &index_not_smi_); - - // Put smi-tagged index into scratch register. - __ mov(scratch_, index_); - __ bind(&got_smi_index_); - - // Check for index out of range. - __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset)); - __ j(above_equal, index_out_of_range_); - - // We need special handling for non-flat strings. - STATIC_ASSERT(kSeqStringTag == 0); - __ test(result_, Immediate(kStringRepresentationMask)); - __ j(zero, &flat_string); - - // Handle non-flat strings. - __ test(result_, Immediate(kIsConsStringMask)); - __ j(zero, &call_runtime_); - - // ConsString. - // Check whether the right hand side is the empty string (i.e. if - // this is really a flat string in a cons string). If that is not - // the case we would rather go to the runtime system now to flatten - // the string. - __ cmp(FieldOperand(object_, ConsString::kSecondOffset), - Immediate(Factory::empty_string())); - __ j(not_equal, &call_runtime_); - // Get the first of the two strings and load its instance type. - __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset)); - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - // If the first cons component is also non-flat, then go to runtime. - STATIC_ASSERT(kSeqStringTag == 0); - __ test(result_, Immediate(kStringRepresentationMask)); - __ j(not_zero, &call_runtime_); - - // Check for 1-byte or 2-byte string. - __ bind(&flat_string); - STATIC_ASSERT(kAsciiStringTag != 0); - __ test(result_, Immediate(kStringEncodingMask)); - __ j(not_zero, &ascii_string); - - // 2-byte string. - // Load the 2-byte character code into the result register. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ movzx_w(result_, FieldOperand(object_, - scratch_, times_1, // Scratch is smi-tagged. - SeqTwoByteString::kHeaderSize)); - __ jmp(&got_char_code); - - // ASCII string. - // Load the byte into the result register. - __ bind(&ascii_string); - __ SmiUntag(scratch_); - __ movzx_b(result_, FieldOperand(object_, - scratch_, times_1, - SeqAsciiString::kHeaderSize)); - __ bind(&got_char_code); - __ SmiTag(result_); - __ bind(&exit_); -} - - -void StringCharCodeAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharCodeAt slow case"); - - // Index is not a smi. - __ bind(&index_not_smi_); - // If index is a heap number, try converting it to an integer. - __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ push(index_); // Consumed by runtime conversion function. - if (index_flags_ == STRING_INDEX_IS_NUMBER) { - __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); - } else { - ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); - // NumberToSmi discards numbers that are not exact integers. - __ CallRuntime(Runtime::kNumberToSmi, 1); - } - if (!scratch_.is(eax)) { - // Save the conversion result before the pop instructions below - // have a chance to overwrite it. - __ mov(scratch_, eax); - } - __ pop(index_); - __ pop(object_); - // Reload the instance type. - __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset)); - __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset)); - call_helper.AfterCall(masm); - // If index is still not a smi, it must be out of range. - STATIC_ASSERT(kSmiTag == 0); - __ test(scratch_, Immediate(kSmiTagMask)); - __ j(not_zero, index_out_of_range_); - // Otherwise, return to the fast path. - __ jmp(&got_smi_index_); - - // Call runtime. We get here when the receiver is a string and the - // index is a number, but the code of getting the actual character - // is too complex (e.g., when the string needs to be flattened). - __ bind(&call_runtime_); - call_helper.BeforeCall(masm); - __ push(object_); - __ push(index_); - __ CallRuntime(Runtime::kStringCharCodeAt, 2); - if (!result_.is(eax)) { - __ mov(result_, eax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharCodeAt slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharFromCodeGenerator - -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { - // Fast case of Heap::LookupSingleCharacterStringFromCode. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); - __ test(code_, - Immediate(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); - __ j(not_zero, &slow_case_, not_taken); - - __ Set(result_, Immediate(Factory::single_character_string_cache())); - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiShiftSize == 0); - // At this point code register contains smi tagged ascii char code. - __ mov(result_, FieldOperand(result_, - code_, times_half_pointer_size, - FixedArray::kHeaderSize)); - __ cmp(result_, Factory::undefined_value()); - __ j(equal, &slow_case_, not_taken); - __ bind(&exit_); -} - - -void StringCharFromCodeGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - __ Abort("Unexpected fallthrough to CharFromCode slow case"); - - __ bind(&slow_case_); - call_helper.BeforeCall(masm); - __ push(code_); - __ CallRuntime(Runtime::kCharFromCode, 1); - if (!result_.is(eax)) { - __ mov(result_, eax); - } - call_helper.AfterCall(masm); - __ jmp(&exit_); - - __ Abort("Unexpected fallthrough from CharFromCode slow case"); -} - - -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - -void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; - - // Load the two arguments. - __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. - __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. - - // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &string_add_runtime); - __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx); - __ j(above_equal, &string_add_runtime); - - // First argument is a a string, test second. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &string_add_runtime); - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx); - __ j(above_equal, &string_add_runtime); - } - - // Both arguments are strings. - // eax: first string - // edx: second string - // Check if either of the strings are empty. In that case return the other. - Label second_not_zero_length, both_not_zero_length; - __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(ecx, Operand(ecx)); - __ j(not_zero, &second_not_zero_length); - // Second string is empty, result is first string which is already in eax. - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&second_not_zero_length); - __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); - STATIC_ASSERT(kSmiTag == 0); - __ test(ebx, Operand(ebx)); - __ j(not_zero, &both_not_zero_length); - // First string is empty, result is second string which is in edx. - __ mov(eax, edx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Both strings are non-empty. - // eax: first string - // ebx: length of first string as a smi - // ecx: length of second string as a smi - // edx: second string - // Look at the length of the result of adding the two strings. - Label string_add_flat_result, longer_than_two; - __ bind(&both_not_zero_length); - __ add(ebx, Operand(ecx)); - STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); - // Handle exceptionally long strings in the runtime system. - __ j(overflow, &string_add_runtime); - // Use the runtime system when adding two one character strings, as it - // contains optimizations for this specific case using the symbol table. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); - __ j(not_equal, &longer_than_two); - - // Check that both strings are non-external ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, - &string_add_runtime); - - // Get the two characters forming the sub string. - __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); - - // Try to lookup two character string in symbol table. If it is not found - // just allocate a new one. - Label make_two_character_string, make_flat_ascii_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, ebx, ecx, eax, edx, edi, &make_two_character_string); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - __ bind(&make_two_character_string); - __ Set(ebx, Immediate(Smi::FromInt(2))); - __ jmp(&make_flat_ascii_string); - - __ bind(&longer_than_two); - // Check if resulting string will be flat. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); - __ j(below, &string_add_flat_result); - - // If result is not supposed to be flat allocate a cons string object. If both - // strings are ascii the result is an ascii cons string. - Label non_ascii, allocated, ascii_data; - __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); - __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ and_(ecx, Operand(edi)); - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ test(ecx, Immediate(kAsciiStringTag)); - __ j(zero, &non_ascii); - __ bind(&ascii_data); - // Allocate an acsii cons string. - __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime); - __ bind(&allocated); - // Fill the fields of the cons string. - if (FLAG_debug_code) __ AbortIfNotSmi(ebx); - __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx); - __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset), - Immediate(String::kEmptyHashField)); - __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax); - __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx); - __ mov(eax, ecx); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - __ bind(&non_ascii); - // At least one of the strings is two-byte. Check whether it happens - // to contain only ascii characters. - // ecx: first instance type AND second instance type. - // edi: second instance type. - __ test(ecx, Immediate(kAsciiDataHintMask)); - __ j(not_zero, &ascii_data); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ xor_(edi, Operand(ecx)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); - __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); - __ j(equal, &ascii_data); - // Allocate a two byte cons string. - __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime); - __ jmp(&allocated); - - // Handle creating a flat result. First check that both strings are not - // external strings. - // eax: first string - // ebx: length of resulting flat string as a smi - // edx: second string - __ bind(&string_add_flat_result); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ and_(ecx, kStringRepresentationMask); - __ cmp(ecx, kExternalStringTag); - __ j(equal, &string_add_runtime); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ and_(ecx, kStringRepresentationMask); - __ cmp(ecx, kExternalStringTag); - __ j(equal, &string_add_runtime); - // Now check if both strings are ascii strings. - // eax: first string - // ebx: length of resulting flat string as a smi - // edx: second string - Label non_ascii_string_add_flat_result; - STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag); - __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(zero, &non_ascii_string_add_flat_result); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(zero, &string_add_runtime); - - __ bind(&make_flat_ascii_string); - // Both strings are ascii strings. As they are short they are both flat. - // ebx: length of resulting flat string as a smi - __ SmiUntag(ebx); - __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime); - // eax: result string - __ mov(ecx, eax); - // Locate first character of result. - __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Load first argument and locate first character. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: first character of result - // edx: first char of first argument - // edi: length of first argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); - // Load second argument and locate first character. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: next character of result - // edx: first char of second argument - // edi: length of second argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Handle creating a flat two byte result. - // eax: first string - known to be two byte - // ebx: length of resulting flat string as a smi - // edx: second string - __ bind(&non_ascii_string_add_flat_result); - __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset)); - __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag); - __ j(not_zero, &string_add_runtime); - // Both strings are two byte strings. As they are short they are both - // flat. - __ SmiUntag(ebx); - __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime); - // eax: result string - __ mov(ecx, eax); - // Locate first character of result. - __ add(Operand(ecx), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Load first argument and locate first character. - __ mov(edx, Operand(esp, 2 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: first character of result - // edx: first char of first argument - // edi: length of first argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); - // Load second argument and locate first character. - __ mov(edx, Operand(esp, 1 * kPointerSize)); - __ mov(edi, FieldOperand(edx, String::kLengthOffset)); - __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // eax: result string - // ecx: next character of result - // edx: first char of second argument - // edi: length of second argument - StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false); - __ IncrementCounter(&Counters::string_add_native, 1); - __ ret(2 * kPointerSize); - - // Just jump to runtime to add the two strings. - __ bind(&string_add_runtime); - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); -} - - -void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - Label loop; - __ bind(&loop); - // This loop just copies one character at a time, as it is only used for very - // short strings. - if (ascii) { - __ mov_b(scratch, Operand(src, 0)); - __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - } else { - __ mov_w(scratch, Operand(src, 0)); - __ mov_w(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(2)); - __ add(Operand(dest), Immediate(2)); - } - __ sub(Operand(count), Immediate(1)); - __ j(not_zero, &loop); -} - - -void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii) { - // Copy characters using rep movs of doublewords. - // The destination is aligned on a 4 byte boundary because we are - // copying to the beginning of a newly allocated string. - ASSERT(dest.is(edi)); // rep movs destination - ASSERT(src.is(esi)); // rep movs source - ASSERT(count.is(ecx)); // rep movs count - ASSERT(!scratch.is(dest)); - ASSERT(!scratch.is(src)); - ASSERT(!scratch.is(count)); - - // Nothing to do for zero characters. - Label done; - __ test(count, Operand(count)); - __ j(zero, &done); - - // Make count the number of bytes to copy. - if (!ascii) { - __ shl(count, 1); - } - - // Don't enter the rep movs if there are less than 4 bytes to copy. - Label last_bytes; - __ test(count, Immediate(~3)); - __ j(zero, &last_bytes); - - // Copy from edi to esi using rep movs instruction. - __ mov(scratch, count); - __ sar(count, 2); // Number of doublewords to copy. - __ cld(); - __ rep_movs(); - - // Find number of bytes left. - __ mov(count, scratch); - __ and_(count, 3); - - // Check if there are more bytes to copy. - __ bind(&last_bytes); - __ test(count, Operand(count)); - __ j(zero, &done); - - // Copy remaining characters. - Label loop; - __ bind(&loop); - __ mov_b(scratch, Operand(src, 0)); - __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - __ sub(Operand(count), Immediate(1)); - __ j(not_zero, &loop); - - __ bind(&done); -} - - -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found) { - // Register scratch3 is the general scratch register in this function. - Register scratch = scratch3; - - // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. - Label not_array_index; - __ mov(scratch, c1); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); - __ j(above, ¬_array_index); - __ mov(scratch, c2); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); - __ j(below_equal, not_found); - - __ bind(¬_array_index); - // Calculate the two character string hash. - Register hash = scratch1; - GenerateHashInit(masm, hash, c1, scratch); - GenerateHashAddCharacter(masm, hash, c2, scratch); - GenerateHashGetHash(masm, hash, scratch); - - // Collect the two characters in a register. - Register chars = c1; - __ shl(c2, kBitsPerByte); - __ or_(chars, Operand(c2)); - - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string. - - // Load the symbol table. - Register symbol_table = c2; - ExternalReference roots_address = ExternalReference::roots_address(); - __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); - __ mov(symbol_table, - Operand::StaticArray(scratch, times_pointer_size, roots_address)); - - // Calculate capacity mask from the symbol table capacity. - Register mask = scratch2; - __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); - __ SmiUntag(mask); - __ sub(Operand(mask), Immediate(1)); - - // Registers - // chars: two character string, char 1 in byte 0 and char 2 in byte 1. - // hash: hash of two character string - // symbol_table: symbol table - // mask: capacity mask - // scratch: - - - // Perform a number of probes in the symbol table. - static const int kProbes = 4; - Label found_in_symbol_table; - Label next_probe[kProbes], next_probe_pop_mask[kProbes]; - for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. - __ mov(scratch, hash); - if (i > 0) { - __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); - } - __ and_(scratch, Operand(mask)); - - // Load the entry from the symbol table. - Register candidate = scratch; // Scratch register contains candidate. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); - __ mov(candidate, - FieldOperand(symbol_table, - scratch, - times_pointer_size, - SymbolTable::kElementsStartOffset)); - - // If entry is undefined no string with this hash can be found. - __ cmp(candidate, Factory::undefined_value()); - __ j(equal, not_found); - - // If length is not 2 the string is not a candidate. - __ cmp(FieldOperand(candidate, String::kLengthOffset), - Immediate(Smi::FromInt(2))); - __ j(not_equal, &next_probe[i]); - - // As we are out of registers save the mask on the stack and use that - // register as a temporary. - __ push(mask); - Register temp = mask; - - // Check that the candidate is a non-external ascii string. - __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset)); - __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii( - temp, temp, &next_probe_pop_mask[i]); - - // Check if the two characters match. - __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); - __ and_(temp, 0x0000ffff); - __ cmp(chars, Operand(temp)); - __ j(equal, &found_in_symbol_table); - __ bind(&next_probe_pop_mask[i]); - __ pop(mask); - __ bind(&next_probe[i]); - } - - // No matching 2 character string found by probing. - __ jmp(not_found); - - // Scratch register contains result when we fall through to here. - Register result = scratch; - __ bind(&found_in_symbol_table); - __ pop(mask); // Pop saved mask from the stack. - if (!result.is(eax)) { - __ mov(eax, result); - } -} - - -void StringHelper::GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash = character + (character << 10); - __ mov(hash, character); - __ shl(hash, 10); - __ add(hash, Operand(character)); - // hash ^= hash >> 6; - __ mov(scratch, hash); - __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); -} - - -void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch) { - // hash += character; - __ add(hash, Operand(character)); - // hash += hash << 10; - __ mov(scratch, hash); - __ shl(scratch, 10); - __ add(hash, Operand(scratch)); - // hash ^= hash >> 6; - __ mov(scratch, hash); - __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); -} - - -void StringHelper::GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch) { - // hash += hash << 3; - __ mov(scratch, hash); - __ shl(scratch, 3); - __ add(hash, Operand(scratch)); - // hash ^= hash >> 11; - __ mov(scratch, hash); - __ sar(scratch, 11); - __ xor_(hash, Operand(scratch)); - // hash += hash << 15; - __ mov(scratch, hash); - __ shl(scratch, 15); - __ add(hash, Operand(scratch)); - - // if (hash == 0) hash = 27; - Label hash_not_zero; - __ test(hash, Operand(hash)); - __ j(not_zero, &hash_not_zero); - __ mov(hash, Immediate(27)); - __ bind(&hash_not_zero); -} - - -void SubStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: to - // esp[8]: from - // esp[12]: string - - // Make sure first argument is a string. - __ mov(eax, Operand(esp, 3 * kPointerSize)); - STATIC_ASSERT(kSmiTag == 0); - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &runtime); - Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); - __ j(NegateCondition(is_string), &runtime); - - // eax: string - // ebx: instance type - - // Calculate length of sub string using the smi values. - Label result_longer_than_two; - __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index. - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. - __ test(edx, Immediate(kSmiTagMask)); - __ j(not_zero, &runtime); - __ sub(ecx, Operand(edx)); - __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); - Label return_eax; - __ j(equal, &return_eax); - // Special handling of sub-strings of length 1 and 2. One character strings - // are handled in the runtime system (looked up in the single character - // cache). Two character strings are looked for in the symbol cache. - __ SmiUntag(ecx); // Result length is no longer smi. - __ cmp(ecx, 2); - __ j(greater, &result_longer_than_two); - __ j(less, &runtime); - - // Sub string of length 2 requested. - // eax: string - // ebx: instance type - // ecx: sub string length (value is 2) - // edx: from index (smi) - __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime); - - // Get the two characters forming the sub string. - __ SmiUntag(edx); // From index is no longer smi. - __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, - FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1)); - - // Try to lookup two character string in symbol table. - Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, ebx, ecx, eax, edx, edi, &make_two_character_string); - __ ret(3 * kPointerSize); - - __ bind(&make_two_character_string); - // Setup registers for allocating the two character string. - __ mov(eax, Operand(esp, 3 * kPointerSize)); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); - __ Set(ecx, Immediate(2)); - - __ bind(&result_longer_than_two); - // eax: string - // ebx: instance type - // ecx: result string length - // Check for flat ascii string - Label non_ascii_flat; - __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat); - - // Allocate the result. - __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime); - - // eax: result string - // ecx: result string length - __ mov(edx, esi); // esi used by following code. - // Locate first character of result. - __ mov(edi, eax); - __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - // Load string argument and locate character of sub string start. - __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); - __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from - __ SmiUntag(ebx); - __ add(esi, Operand(ebx)); - - // eax: result string - // ecx: result length - // edx: original value of esi - // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true); - __ mov(esi, edx); // Restore esi. - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(3 * kPointerSize); - - __ bind(&non_ascii_flat); - // eax: string - // ebx: instance type & kStringRepresentationMask | kStringEncodingMask - // ecx: result string length - // Check for flat two byte string - __ cmp(ebx, kSeqStringTag | kTwoByteStringTag); - __ j(not_equal, &runtime); - - // Allocate the result. - __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime); - - // eax: result string - // ecx: result string length - __ mov(edx, esi); // esi used by following code. - // Locate first character of result. - __ mov(edi, eax); - __ add(Operand(edi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - // Load string argument and locate character of sub string start. - __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from - // As from is a smi it is 2 times the value which matches the size of a two - // byte character. - STATIC_ASSERT(kSmiTag == 0); - STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(esi, Operand(ebx)); - - // eax: result string - // ecx: result length - // edx: original value of esi - // edi: first character of result - // esi: character of sub string start - StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false); - __ mov(esi, edx); // Restore esi. - - __ bind(&return_eax); - __ IncrementCounter(&Counters::sub_string_native, 1); - __ ret(3 * kPointerSize); - - // Just jump to runtime to create the sub string. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kSubString, 3, 1); -} - - -void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3) { - Label result_not_equal; - Label result_greater; - Label compare_lengths; - - __ IncrementCounter(&Counters::string_compare_native, 1); - - // Find minimum length. - Label left_shorter; - __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); - __ mov(scratch3, scratch1); - __ sub(scratch3, FieldOperand(right, String::kLengthOffset)); - - Register length_delta = scratch3; - - __ j(less_equal, &left_shorter); - // Right string is shorter. Change scratch1 to be length of right string. - __ sub(scratch1, Operand(length_delta)); - __ bind(&left_shorter); - - Register min_length = scratch1; - - // If either length is zero, just compare lengths. - __ test(min_length, Operand(min_length)); - __ j(zero, &compare_lengths); - - // Change index to run from -min_length to -1 by adding min_length - // to string start. This means that loop ends when index reaches zero, - // which doesn't need an additional compare. - __ SmiUntag(min_length); - __ lea(left, - FieldOperand(left, - min_length, times_1, - SeqAsciiString::kHeaderSize)); - __ lea(right, - FieldOperand(right, - min_length, times_1, - SeqAsciiString::kHeaderSize)); - __ neg(min_length); - - Register index = min_length; // index = -min_length; - - { - // Compare loop. - Label loop; - __ bind(&loop); - // Compare characters. - __ mov_b(scratch2, Operand(left, index, times_1, 0)); - __ cmpb(scratch2, Operand(right, index, times_1, 0)); - __ j(not_equal, &result_not_equal); - __ add(Operand(index), Immediate(1)); - __ j(not_zero, &loop); - } - - // Compare lengths - strings up to min-length are equal. - __ bind(&compare_lengths); - __ test(length_delta, Operand(length_delta)); - __ j(not_zero, &result_not_equal); - - // Result is EQUAL. - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - - __ bind(&result_not_equal); - __ j(greater, &result_greater); - - // Result is LESS. - __ Set(eax, Immediate(Smi::FromInt(LESS))); - __ ret(0); - - // Result is GREATER. - __ bind(&result_greater); - __ Set(eax, Immediate(Smi::FromInt(GREATER))); - __ ret(0); -} - - -void StringCompareStub::Generate(MacroAssembler* masm) { - Label runtime; - - // Stack frame on entry. - // esp[0]: return address - // esp[4]: right string - // esp[8]: left string - - __ mov(edx, Operand(esp, 2 * kPointerSize)); // left - __ mov(eax, Operand(esp, 1 * kPointerSize)); // right - - Label not_same; - __ cmp(edx, Operand(eax)); - __ j(not_equal, ¬_same); - STATIC_ASSERT(EQUAL == 0); - STATIC_ASSERT(kSmiTag == 0); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ IncrementCounter(&Counters::string_compare_native, 1); - __ ret(2 * kPointerSize); - - __ bind(¬_same); - - // Check that both objects are sequential ascii strings. - __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); - - // Compare flat ascii strings. - // Drop arguments from the stack. - __ pop(ecx); - __ add(Operand(esp), Immediate(2 * kPointerSize)); - __ push(ecx); - GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); - - // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kStringCompare, 2, 1); -} - #undef __ #define __ masm. diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 37b70110cc..adc00058df 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -574,6 +574,11 @@ class CodeGenerator: public AstVisitor { void Int32BinaryOperation(BinaryOperation* node); + // Generate a stub call from the virtual frame. + Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub, + Result* left, + Result* right); + void Comparison(AstNode* node, Condition cc, bool strict, @@ -627,9 +632,6 @@ class CodeGenerator: public AstVisitor { static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name); bool CheckForInlineRuntimeCall(CallRuntime* node); - static bool PatchInlineRuntimeEntry(Handle name, - const InlineRuntimeLUT& new_entry, - InlineRuntimeLUT* old_entry); void ProcessDeclarations(ZoneList* declarations); @@ -699,8 +701,14 @@ class CodeGenerator: public AstVisitor { // Support for direct calls from JavaScript to native RegExp code. void GenerateRegExpExec(ZoneList* args); + // Construct a RegExp exec result with two in-object properties. void GenerateRegExpConstructResult(ZoneList* args); + // Clone the result of a regexp function. + // Must be an object created by GenerateRegExpConstructResult with + // no extra properties. + void GenerateRegExpCloneResult(ZoneList* args); + // Support for fast native caches. void GenerateGetFromCache(ZoneList* args); @@ -724,6 +732,9 @@ class CodeGenerator: public AstVisitor { // Check whether two RegExps are equivalent void GenerateIsRegExpEquivalent(ZoneList* args); + void GenerateHasCachedArrayIndex(ZoneList* args); + void GenerateGetCachedArrayIndex(ZoneList* args); + // Simple condition analysis. enum ConditionAnalysis { ALWAYS_TRUE, @@ -797,327 +808,6 @@ class CodeGenerator: public AstVisitor { }; -// Compute a transcendental math function natively, or call the -// TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { - public: - explicit TranscendentalCacheStub(TranscendentalCache::Type type) - : type_(type) {} - void Generate(MacroAssembler* masm); - private: - TranscendentalCache::Type type_; - Major MajorKey() { return TranscendentalCache; } - int MinorKey() { return type_; } - Runtime::FunctionId RuntimeFunction(); - void GenerateOperation(MacroAssembler* masm); -}; - - -class ToBooleanStub: public CodeStub { - public: - ToBooleanStub() { } - - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return ToBoolean; } - int MinorKey() { return 0; } -}; - - -// Flag that indicates how to generate code for the stub GenericBinaryOpStub. -enum GenericBinaryFlags { - NO_GENERIC_BINARY_FLAGS = 0, - NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. -}; - - -class GenericBinaryOpStub: public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - GenericBinaryFlags flags, - TypeInfo operands_type) - : op_(op), - mode_(mode), - flags_(flags), - args_in_registers_(false), - args_reversed_(false), - static_operands_type_(operands_type), - runtime_operands_type_(BinaryOpIC::DEFAULT), - name_(NULL) { - if (static_operands_type_.IsSmi()) { - mode_ = NO_OVERWRITE; - } - use_sse3_ = CpuFeatures::IsSupported(SSE3); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - flags_(FlagBits::decode(key)), - args_in_registers_(ArgsInRegistersBits::decode(key)), - args_reversed_(ArgsReversedBits::decode(key)), - use_sse3_(SSE3Bits::decode(key)), - static_operands_type_(TypeInfo::ExpandedRepresentation( - StaticTypeInfoBits::decode(key))), - runtime_operands_type_(runtime_operands_type), - name_(NULL) { - } - - // Generate code to call the stub with the supplied arguments. This will add - // code at the call site to prepare arguments either in registers or on the - // stack together with the actual call. - void GenerateCall(MacroAssembler* masm, Register left, Register right); - void GenerateCall(MacroAssembler* masm, Register left, Smi* right); - void GenerateCall(MacroAssembler* masm, Smi* left, Register right); - - Result GenerateCall(MacroAssembler* masm, - VirtualFrame* frame, - Result* left, - Result* right); - - private: - Token::Value op_; - OverwriteMode mode_; - GenericBinaryFlags flags_; - bool args_in_registers_; // Arguments passed in registers not on the stack. - bool args_reversed_; // Left and right argument are swapped. - bool use_sse3_; - - // Number type information of operands, determined by code generator. - TypeInfo static_operands_type_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo runtime_operands_type_; - - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("GenericBinaryOpStub %d (op %s), " - "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast(mode_), - static_cast(flags_), - static_cast(args_in_registers_), - static_cast(args_reversed_), - static_operands_type_.ToString()); - } -#endif - - // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM. - class ModeBits: public BitField {}; - class OpBits: public BitField {}; - class SSE3Bits: public BitField {}; - class ArgsInRegistersBits: public BitField {}; - class ArgsReversedBits: public BitField {}; - class FlagBits: public BitField {}; - class StaticTypeInfoBits: public BitField {}; - class RuntimeTypeInfoBits: public BitField {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - // Encode the parameters in a unique 18 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | FlagBits::encode(flags_) - | SSE3Bits::encode(use_sse3_) - | ArgsInRegistersBits::encode(args_in_registers_) - | ArgsReversedBits::encode(args_reversed_) - | StaticTypeInfoBits::encode( - static_operands_type_.ThreeBitRepresentation()) - | RuntimeTypeInfoBits::encode(runtime_operands_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateSmiCode(MacroAssembler* masm, Label* slow); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - - bool ArgsInRegistersSupported() { - return op_ == Token::ADD || op_ == Token::SUB - || op_ == Token::MUL || op_ == Token::DIV; - } - bool IsOperationCommutative() { - return (op_ == Token::ADD) || (op_ == Token::MUL); - } - - void SetArgsInRegisters() { args_in_registers_ = true; } - void SetArgsReversed() { args_reversed_ = true; } - bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; } - bool HasArgsInRegisters() { return args_in_registers_; } - bool HasArgsReversed() { return args_reversed_; } - - bool ShouldGenerateSmiCode() { - return HasSmiCodeInStub() && - runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && - runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - bool ShouldGenerateFPCode() { - return runtime_operands_type_ != BinaryOpIC::STRINGS; - } - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(runtime_operands_type_); - } -}; - - -class StringHelper : public AllStatic { - public: - // Generate code for copying characters using a simple loop. This should only - // be used in places where the number of characters is small and the - // additional setup and checking in GenerateCopyCharactersREP adds too much - // overhead. Copying of overlapping regions is not supported. - static void GenerateCopyCharacters(MacroAssembler* masm, - Register dest, - Register src, - Register count, - Register scratch, - bool ascii); - - // Generate code for copying characters using the rep movs instruction. - // Copies ecx characters from esi to edi. Copying of overlapping regions is - // not supported. - static void GenerateCopyCharactersREP(MacroAssembler* masm, - Register dest, // Must be edi. - Register src, // Must be esi. - Register count, // Must be ecx. - Register scratch, // Neither of above. - bool ascii); - - // Probe the symbol table for a two character string. If the string is - // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the - // string is found the code falls through with the string in register eax. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, - Register c1, - Register c2, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found); - - // Generate string hash. - static void GenerateHashInit(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashAddCharacter(MacroAssembler* masm, - Register hash, - Register character, - Register scratch); - static void GenerateHashGetHash(MacroAssembler* masm, - Register hash, - Register scratch); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); -}; - - -// Flag that indicates how to generate code for the stub StringAddStub. -enum StringAddFlags { - NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. -}; - - -class StringAddStub: public CodeStub { - public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } - - private: - Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } - - void Generate(MacroAssembler* masm); - - // Should the stub check whether arguments are strings? - bool string_check_; -}; - - -class SubStringStub: public CodeStub { - public: - SubStringStub() {} - - private: - Major MajorKey() { return SubString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class StringCompareStub: public CodeStub { - public: - explicit StringCompareStub() { - } - - // Compare two flat ascii strings and returns result in eax after popping two - // arguments from the stack. - static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, - Register left, - Register right, - Register scratch1, - Register scratch2, - Register scratch3); - - private: - Major MajorKey() { return StringCompare; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - -class NumberToStringStub: public CodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - bool object_is_smi, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif -}; - - } } // namespace v8::internal #endif // V8_IA32_CODEGEN_IA32_H_ diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc index b57cf3d07d..ee9456564c 100644 --- a/deps/v8/src/ia32/debug-ia32.cc +++ b/deps/v8/src/ia32/debug-ia32.cc @@ -94,22 +94,33 @@ void BreakLocationIterator::ClearDebugBreakAtSlot() { static void Generate_DebugBreakCallHelper(MacroAssembler* masm, - RegList pointer_regs, + RegList object_regs, + RegList non_object_regs, bool convert_call_to_jmp) { - // Save the content of all general purpose registers in memory. This copy in - // memory is later pushed onto the JS expression stack for the fake JS frame - // generated and also to the C frame generated on top of that. In the JS - // frame ONLY the registers containing pointers will be pushed on the - // expression stack. This causes the GC to update these pointers so that - // they will have the correct value when returning from the debugger. - __ SaveRegistersToMemory(kJSCallerSaved); - // Enter an internal frame. __ EnterInternalFrame(); - // Store the registers containing object pointers on the expression stack to - // make sure that these are correctly updated during GC. - __ PushRegistersFromMemory(pointer_regs); + // Store the registers containing live values on the expression stack to + // make sure that these are correctly updated during GC. Non object values + // are stored as a smi causing it to be untouched by GC. + ASSERT((object_regs & ~kJSCallerSaved) == 0); + ASSERT((non_object_regs & ~kJSCallerSaved) == 0); + ASSERT((object_regs & non_object_regs) == 0); + for (int i = 0; i < kNumJSCallerSaved; i++) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if ((object_regs & (1 << r)) != 0) { + __ push(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + if (FLAG_debug_code) { + __ test(reg, Immediate(0xc0000000)); + __ Assert(zero, "Unable to encode value as smi"); + } + __ SmiTag(reg); + __ push(reg); + } + } #ifdef DEBUG __ RecordComment("// Calling from debug break to runtime - come in - over"); @@ -117,12 +128,25 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, __ Set(eax, Immediate(0)); // no arguments __ mov(ebx, Immediate(ExternalReference::debug_break())); - CEntryStub ceb(1, ExitFrame::MODE_DEBUG); + CEntryStub ceb(1); __ CallStub(&ceb); // Restore the register values containing object pointers from the expression - // stack in the reverse order as they where pushed. - __ PopRegistersToMemory(pointer_regs); + // stack. + for (int i = kNumJSCallerSaved; --i >= 0;) { + int r = JSCallerSavedCode(i); + Register reg = { r }; + if (FLAG_debug_code) { + __ Set(reg, Immediate(kDebugZapValue)); + } + if ((object_regs & (1 << r)) != 0) { + __ pop(reg); + } + if ((non_object_regs & (1 << r)) != 0) { + __ pop(reg); + __ SmiUntag(reg); + } + } // Get rid of the internal frame. __ LeaveInternalFrame(); @@ -130,12 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { - __ pop(eax); + __ add(Operand(esp), Immediate(kPointerSize)); } - // Finally restore all registers. - __ RestoreRegistersFromMemory(kJSCallerSaved); - // Now that the break point has been handled, resume normal execution by // jumping to the target address intended by the caller and that was // overwritten by the address of DebugBreakXXX. @@ -151,7 +172,7 @@ void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) { // -- eax : receiver // -- ecx : name // ----------------------------------- - Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), false); + Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit(), 0, false); } @@ -162,7 +183,8 @@ void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) { // -- ecx : name // -- edx : receiver // ----------------------------------- - Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false); + Generate_DebugBreakCallHelper( + masm, eax.bit() | ecx.bit() | edx.bit(), 0, false); } @@ -172,7 +194,7 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) { // -- edx : receiver // -- eax : key // ----------------------------------- - Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), false); + Generate_DebugBreakCallHelper(masm, eax.bit() | edx.bit(), 0, false); } @@ -183,19 +205,17 @@ void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { // -- ecx : key // -- edx : receiver // ----------------------------------- - // Register eax contains an object that needs to be pushed on the - // expression stack of the fake JS frame. - Generate_DebugBreakCallHelper(masm, eax.bit() | ecx.bit() | edx.bit(), false); + Generate_DebugBreakCallHelper( + masm, eax.bit() | ecx.bit() | edx.bit(), 0, false); } void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { // Register state for keyed IC call call (from ic-ia32.cc) // ----------- S t a t e ------------- - // -- eax: number of arguments + // -- ecx: name // ----------------------------------- - // The number of arguments in eax is not smi encoded. - Generate_DebugBreakCallHelper(masm, 0, false); + Generate_DebugBreakCallHelper(masm, ecx.bit(), 0, false); } @@ -204,10 +224,11 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) { // eax is the actual number of arguments not encoded as a smi see comment // above IC call. // ----------- S t a t e ------------- - // -- eax: number of arguments + // -- eax: number of arguments (not smi) + // -- edi: constructor function // ----------------------------------- // The number of arguments in eax is not smi encoded. - Generate_DebugBreakCallHelper(masm, 0, false); + Generate_DebugBreakCallHelper(masm, edi.bit(), eax.bit(), false); } @@ -216,7 +237,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax: return value // ----------------------------------- - Generate_DebugBreakCallHelper(masm, eax.bit(), true); + Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true); } @@ -225,7 +246,7 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { // ----------- S t a t e ------------- // No registers used on entry. // ----------------------------------- - Generate_DebugBreakCallHelper(masm, 0, false); + Generate_DebugBreakCallHelper(masm, 0, 0, false); } @@ -245,7 +266,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) { void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) { // In the places where a debug break slot is inserted no registers can contain // object pointers. - Generate_DebugBreakCallHelper(masm, 0, true); + Generate_DebugBreakCallHelper(masm, 0, 0, true); } diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc index 212cfdeaa0..9baf76336b 100644 --- a/deps/v8/src/ia32/frames-ia32.cc +++ b/deps/v8/src/ia32/frames-ia32.cc @@ -35,21 +35,6 @@ namespace v8 { namespace internal { -StackFrame::Type StackFrame::ComputeType(State* state) { - ASSERT(state->fp != NULL); - if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) { - return ARGUMENTS_ADAPTOR; - } - // The marker and function offsets overlap. If the marker isn't a - // smi then the frame is a JavaScript frame -- and the marker is - // really the function. - const int offset = StandardFrameConstants::kMarkerOffset; - Object* marker = Memory::Object_at(state->fp + offset); - if (!marker->IsSmi()) return JAVA_SCRIPT; - return static_cast(Smi::cast(marker)->value()); -} - - StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { if (fp == 0) return NONE; // Compute the stack pointer. @@ -58,58 +43,11 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { state->fp = fp; state->sp = sp; state->pc_address = reinterpret_cast(sp - 1 * kPointerSize); + ASSERT(*state->pc_address != NULL); return EXIT; } -void ExitFrame::Iterate(ObjectVisitor* v) const { - v->VisitPointer(&code_slot()); - // The arguments are traversed as part of the expression stack of - // the calling frame. -} - - -int JavaScriptFrame::GetProvidedParametersCount() const { - return ComputeParametersCount(); -} - - -Address JavaScriptFrame::GetCallerStackPointer() const { - int arguments; - if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) { - // The arguments for cooked frames are traversed as if they were - // expression stack elements of the calling frame. The reason for - // this rather strange decision is that we cannot access the - // function during mark-compact GCs when the stack is cooked. - // In fact accessing heap objects (like function->shared() below) - // at all during GC is problematic. - arguments = 0; - } else { - // Compute the number of arguments by getting the number of formal - // parameters of the function. We must remember to take the - // receiver into account (+1). - JSFunction* function = JSFunction::cast(this->function()); - arguments = function->shared()->formal_parameter_count() + 1; - } - const int offset = StandardFrameConstants::kCallerSPOffset; - return fp() + offset + (arguments * kPointerSize); -} - - -Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { - const int arguments = Smi::cast(GetExpression(0))->value(); - const int offset = StandardFrameConstants::kCallerSPOffset; - return fp() + offset + (arguments + 1) * kPointerSize; -} - - -Address InternalFrame::GetCallerStackPointer() const { - // Internal frames have no arguments. The stack pointer of the - // caller is at a fixed offset from the frame pointer. - return fp() + StandardFrameConstants::kCallerSPOffset; -} - - } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index cb36904ee3..1631b04327 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -29,6 +29,7 @@ #if defined(V8_TARGET_ARCH_IA32) +#include "code-stubs.h" #include "codegen-inl.h" #include "compiler.h" #include "debug.h" @@ -216,12 +217,28 @@ void FullCodeGenerator::EmitReturnSequence() { // Check that the size of the code used for returning matches what is // expected by the debugger. ASSERT_EQ(Assembler::kJSReturnSequenceLength, - masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); #endif } } +FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand( + Token::Value op, Expression* left, Expression* right) { + ASSERT(ShouldInlineSmiCase(op)); + if (op == Token::DIV || op == Token::MOD || op == Token::MUL) { + // We never generate inlined constant smi operations for these. + return kNoConstants; + } else if (right->IsSmiLiteral()) { + return kRightConstant; + } else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) { + return kLeftConstant; + } else { + return kNoConstants; + } +} + + void FullCodeGenerator::Apply(Expression::Context context, Register reg) { switch (context) { case Expression::kUninitialized: @@ -246,20 +263,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Register reg) { case Expression::kTest: // For simplicity we always test the accumulator register. if (!reg.is(result_register())) __ mov(result_register(), reg); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - if (!reg.is(result_register())) __ mov(result_register(), reg); - switch (location_) { - case kAccumulator: - break; - case kStack: - __ push(result_register()); - break; - } - DoTest(context); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -289,20 +293,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) { case Expression::kTest: // For simplicity we always test the accumulator register. Move(result_register(), slot); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - Move(result_register(), slot); - switch (location_) { - case kAccumulator: - break; - case kStack: - __ push(result_register()); - break; - } - DoTest(context); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -330,20 +321,7 @@ void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) { case Expression::kTest: // For simplicity we always test the accumulator register. __ mov(result_register(), lit->handle()); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - __ mov(result_register(), lit->handle()); - switch (location_) { - case kAccumulator: - break; - case kStack: - __ push(result_register()); - break; - } - DoTest(context); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -371,20 +349,7 @@ void FullCodeGenerator::ApplyTOS(Expression::Context context) { case Expression::kTest: // For simplicity we always test the accumulator register. __ pop(result_register()); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - __ pop(result_register()); - break; - case kStack: - __ mov(result_register(), Operand(esp, 0)); - break; - } - DoTest(context); + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -420,56 +385,7 @@ void FullCodeGenerator::DropAndApply(int count, // For simplicity we always test the accumulator register. __ Drop(count); if (!reg.is(result_register())) __ mov(result_register(), reg); - DoTest(context); - break; - - case Expression::kValueTest: - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - __ Drop(count); - if (!reg.is(result_register())) __ mov(result_register(), reg); - break; - case kStack: - if (count > 1) __ Drop(count - 1); - __ mov(result_register(), reg); - __ mov(Operand(esp, 0), result_register()); - break; - } - DoTest(context); - break; - } -} - - -void FullCodeGenerator::PrepareTest(Label* materialize_true, - Label* materialize_false, - Label** if_true, - Label** if_false) { - switch (context_) { - case Expression::kUninitialized: - UNREACHABLE(); - break; - case Expression::kEffect: - // In an effect context, the true and the false case branch to the - // same label. - *if_true = *if_false = materialize_true; - break; - case Expression::kValue: - *if_true = materialize_true; - *if_false = materialize_false; - break; - case Expression::kTest: - *if_true = true_label_; - *if_false = false_label_; - break; - case Expression::kValueTest: - *if_true = materialize_true; - *if_false = false_label_; - break; - case Expression::kTestValue: - *if_true = true_label_; - *if_false = materialize_false; + DoTest(true_label_, false_label_, fall_through_); break; } } @@ -510,32 +426,6 @@ void FullCodeGenerator::Apply(Expression::Context context, case Expression::kTest: break; - - case Expression::kValueTest: - __ bind(materialize_true); - switch (location_) { - case kAccumulator: - __ mov(result_register(), Factory::true_value()); - break; - case kStack: - __ push(Immediate(Factory::true_value())); - break; - } - __ jmp(true_label_); - break; - - case Expression::kTestValue: - __ bind(materialize_false); - switch (location_) { - case kAccumulator: - __ mov(result_register(), Factory::false_value()); - break; - case kStack: - __ push(Immediate(Factory::false_value())); - break; - } - __ jmp(false_label_); - break; } } @@ -563,78 +453,19 @@ void FullCodeGenerator::Apply(Expression::Context context, bool flag) { break; } case Expression::kTest: - __ jmp(flag ? true_label_ : false_label_); - break; - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - // If value is false it's needed. - if (!flag) __ mov(result_register(), Factory::false_value()); - break; - case kStack: - // If value is false it's needed. - if (!flag) __ push(Immediate(Factory::false_value())); - break; - } - __ jmp(flag ? true_label_ : false_label_); - break; - case Expression::kValueTest: - switch (location_) { - case kAccumulator: - // If value is true it's needed. - if (flag) __ mov(result_register(), Factory::true_value()); - break; - case kStack: - // If value is true it's needed. - if (flag) __ push(Immediate(Factory::true_value())); - break; + if (flag) { + if (true_label_ != fall_through_) __ jmp(true_label_); + } else { + if (false_label_ != fall_through_) __ jmp(false_label_); } - __ jmp(flag ? true_label_ : false_label_); break; } } -void FullCodeGenerator::DoTest(Expression::Context context) { - // The value to test is in the accumulator. If the value might be needed - // on the stack (value/test and test/value contexts with a stack location - // desired), then the value is already duplicated on the stack. - ASSERT_NE(NULL, true_label_); - ASSERT_NE(NULL, false_label_); - - // In value/test and test/value expression contexts with stack as the - // desired location, there is already an extra value on the stack. Use a - // label to discard it if unneeded. - Label discard; - Label* if_true = true_label_; - Label* if_false = false_label_; - switch (context) { - case Expression::kUninitialized: - case Expression::kEffect: - case Expression::kValue: - UNREACHABLE(); - case Expression::kTest: - break; - case Expression::kValueTest: - switch (location_) { - case kAccumulator: - break; - case kStack: - if_false = &discard; - break; - } - break; - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - break; - case kStack: - if_true = &discard; - break; - } - break; - } - +void FullCodeGenerator::DoTest(Label* if_true, + Label* if_false, + Label* fall_through) { // Emit the inlined tests assumed by the stub. __ cmp(result_register(), Factory::undefined_value()); __ j(equal, if_false); @@ -648,83 +479,28 @@ void FullCodeGenerator::DoTest(Expression::Context context) { __ test(result_register(), Immediate(kSmiTagMask)); __ j(zero, if_true); - // Save a copy of the value if it may be needed and isn't already saved. - switch (context) { - case Expression::kUninitialized: - case Expression::kEffect: - case Expression::kValue: - UNREACHABLE(); - case Expression::kTest: - break; - case Expression::kValueTest: - switch (location_) { - case kAccumulator: - __ push(result_register()); - break; - case kStack: - break; - } - break; - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - __ push(result_register()); - break; - case kStack: - break; - } - break; - } - // Call the ToBoolean stub for all other cases. ToBooleanStub stub; __ push(result_register()); __ CallStub(&stub); __ test(eax, Operand(eax)); - // The stub returns nonzero for true. Complete based on the context. - switch (context) { - case Expression::kUninitialized: - case Expression::kEffect: - case Expression::kValue: - UNREACHABLE(); - - case Expression::kTest: - __ j(not_zero, true_label_); - __ jmp(false_label_); - break; + // The stub returns nonzero for true. + Split(not_zero, if_true, if_false, fall_through); +} - case Expression::kValueTest: - switch (location_) { - case kAccumulator: - __ j(zero, &discard); - __ pop(result_register()); - __ jmp(true_label_); - break; - case kStack: - __ j(not_zero, true_label_); - break; - } - __ bind(&discard); - __ Drop(1); - __ jmp(false_label_); - break; - case Expression::kTestValue: - switch (location_) { - case kAccumulator: - __ j(not_zero, &discard); - __ pop(result_register()); - __ jmp(false_label_); - break; - case kStack: - __ j(zero, false_label_); - break; - } - __ bind(&discard); - __ Drop(1); - __ jmp(true_label_); - break; +void FullCodeGenerator::Split(Condition cc, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (if_false == fall_through) { + __ j(cc, if_true); + } else if (if_true == fall_through) { + __ j(NegateCondition(cc), if_false); + } else { + __ j(cc, if_true); + __ jmp(if_false); } } @@ -908,20 +684,21 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Compile the label expression. VisitForValue(clause->label(), kAccumulator); - // Perform the comparison as if via '==='. The comparison stub expects - // the smi vs. smi case to be handled before it is called. - Label slow_case; + // Perform the comparison as if via '==='. __ mov(edx, Operand(esp, 0)); // Switch value. - __ mov(ecx, edx); - __ or_(ecx, Operand(eax)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow_case, not_taken); - __ cmp(edx, Operand(eax)); - __ j(not_equal, &next_test); - __ Drop(1); // Switch value is no longer needed. - __ jmp(clause->body_target()->entry_label()); + if (ShouldInlineSmiCase(Token::EQ_STRICT)) { + Label slow_case; + __ mov(ecx, edx); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow_case, not_taken); + __ cmp(edx, Operand(eax)); + __ j(not_equal, &next_test); + __ Drop(1); // Switch value is no longer needed. + __ jmp(clause->body_target()->entry_label()); + __ bind(&slow_case); + } - __ bind(&slow_case); CompareStub stub(equal, true); __ CallStub(&stub); __ test(eax, Operand(eax)); @@ -1203,7 +980,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset)); int literal_offset = - FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; + FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; __ mov(ebx, FieldOperand(ecx, literal_offset)); __ cmp(ebx, Factory::undefined_value()); __ j(not_equal, &materialized); @@ -1326,12 +1103,18 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(expr->constant_elements())); - if (expr->depth() > 1) { + if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) { + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + __ CallStub(&stub); + __ IncrementCounter(&Counters::cow_arrays_created_stub, 1); + } else if (expr->depth() > 1) { __ CallRuntime(Runtime::kCreateArrayLiteral, 3); - } else if (length > FastCloneShallowArrayStub::kMaximumLength) { + } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); } else { - FastCloneShallowArrayStub stub(length); + FastCloneShallowArrayStub stub( + FastCloneShallowArrayStub::CLONE_ELEMENTS, length); __ CallStub(&stub); } @@ -1385,10 +1168,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY. enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; LhsKind assign_type = VARIABLE; - Property* prop = expr->target()->AsProperty(); - if (prop != NULL) { - assign_type = - (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; + Property* property = expr->target()->AsProperty(); + if (property != NULL) { + assign_type = (property->key()->IsPropertyName()) + ? NAMED_PROPERTY + : KEYED_PROPERTY; } // Evaluate LHS expression. @@ -1399,57 +1183,70 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { case NAMED_PROPERTY: if (expr->is_compound()) { // We need the receiver both on the stack and in the accumulator. - VisitForValue(prop->obj(), kAccumulator); + VisitForValue(property->obj(), kAccumulator); __ push(result_register()); } else { - VisitForValue(prop->obj(), kStack); + VisitForValue(property->obj(), kStack); } break; case KEYED_PROPERTY: if (expr->is_compound()) { - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kAccumulator); + VisitForValue(property->obj(), kStack); + VisitForValue(property->key(), kAccumulator); __ mov(edx, Operand(esp, 0)); __ push(eax); } else { - VisitForValue(prop->obj(), kStack); - VisitForValue(prop->key(), kStack); + VisitForValue(property->obj(), kStack); + VisitForValue(property->key(), kStack); } break; } - // If we have a compound assignment: Get value of LHS expression and - // store in on top of the stack. if (expr->is_compound()) { Location saved_location = location_; - location_ = kStack; + location_ = kAccumulator; switch (assign_type) { case VARIABLE: EmitVariableLoad(expr->target()->AsVariableProxy()->var(), Expression::kValue); break; case NAMED_PROPERTY: - EmitNamedPropertyLoad(prop); - __ push(result_register()); + EmitNamedPropertyLoad(property); break; case KEYED_PROPERTY: - EmitKeyedPropertyLoad(prop); - __ push(result_register()); + EmitKeyedPropertyLoad(property); break; } - location_ = saved_location; - } - // Evaluate RHS expression. - Expression* rhs = expr->value(); - VisitForValue(rhs, kAccumulator); + Token::Value op = expr->binary_op(); + ConstantOperand constant = ShouldInlineSmiCase(op) + ? GetConstantOperand(op, expr->target(), expr->value()) + : kNoConstants; + ASSERT(constant == kRightConstant || constant == kNoConstants); + if (constant == kNoConstants) { + __ push(eax); // Left operand goes on the stack. + VisitForValue(expr->value(), kAccumulator); + } - // If we have a compound assignment: Apply operator. - if (expr->is_compound()) { - Location saved_location = location_; - location_ = kAccumulator; - EmitBinaryOp(expr->binary_op(), Expression::kValue); + OverwriteMode mode = expr->value()->ResultOverwriteAllowed() + ? OVERWRITE_RIGHT + : NO_OVERWRITE; + SetSourcePosition(expr->position() + 1); + if (ShouldInlineSmiCase(op)) { + EmitInlineSmiBinaryOp(expr, + op, + Expression::kValue, + mode, + expr->target(), + expr->value(), + constant); + } else { + EmitBinaryOp(op, Expression::kValue, mode); + } location_ = saved_location; + + } else { + VisitForValue(expr->value(), kAccumulator); } // Record source position before possible IC call. @@ -1490,14 +1287,325 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { } -void FullCodeGenerator::EmitBinaryOp(Token::Value op, - Expression::Context context) { - __ push(result_register()); - GenericBinaryOpStub stub(op, - NO_OVERWRITE, - NO_GENERIC_BINARY_FLAGS, - TypeInfo::Unknown()); +void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + Label call_stub, done; + __ add(Operand(eax), Immediate(value)); + __ j(overflow, &call_stub); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + + // Undo the optimistic add operation and call the shared stub. + __ bind(&call_stub); + __ sub(Operand(eax), Immediate(value)); + Token::Value op = Token::ADD; + GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown()); + if (left_is_constant_smi) { + __ push(Immediate(value)); + __ push(eax); + } else { + __ push(eax); + __ push(Immediate(value)); + } __ CallStub(&stub); + __ bind(&done); + Apply(context, eax); +} + + +void FullCodeGenerator::EmitConstantSmiSub(Expression* expr, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + Label call_stub, done; + if (left_is_constant_smi) { + __ mov(ecx, eax); + __ mov(eax, Immediate(value)); + __ sub(Operand(eax), ecx); + } else { + __ sub(Operand(eax), Immediate(value)); + } + __ j(overflow, &call_stub); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &done); + + __ bind(&call_stub); + if (left_is_constant_smi) { + __ push(Immediate(value)); + __ push(ecx); + } else { + // Undo the optimistic sub operation. + __ add(Operand(eax), Immediate(value)); + + __ push(eax); + __ push(Immediate(value)); + } + + Token::Value op = Token::SUB; + GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown()); + __ CallStub(&stub); + __ bind(&done); + Apply(context, eax); +} + + +void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Smi* value) { + Label call_stub, smi_case, done; + int shift_value = value->value() & 0x1f; + + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &smi_case); + + __ bind(&call_stub); + GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown()); + __ push(eax); + __ push(Immediate(value)); + __ CallStub(&stub); + __ jmp(&done); + + __ bind(&smi_case); + switch (op) { + case Token::SHL: + if (shift_value != 0) { + __ mov(edx, eax); + if (shift_value > 1) { + __ shl(edx, shift_value - 1); + } + // Convert int result to smi, checking that it is in int range. + ASSERT(kSmiTagSize == 1); // Adjust code if not the case. + __ add(edx, Operand(edx)); + __ j(overflow, &call_stub); + __ mov(eax, edx); // Put result back into eax. + } + break; + case Token::SAR: + if (shift_value != 0) { + __ sar(eax, shift_value); + __ and_(eax, ~kSmiTagMask); + } + break; + case Token::SHR: + if (shift_value < 2) { + __ mov(edx, eax); + __ SmiUntag(edx); + __ shr(edx, shift_value); + __ test(edx, Immediate(0xc0000000)); + __ j(not_zero, &call_stub); + __ SmiTag(edx); + __ mov(eax, edx); // Put result back into eax. + } else { + __ SmiUntag(eax); + __ shr(eax, shift_value); + __ SmiTag(eax); + } + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + Apply(context, eax); +} + + +void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Smi* value) { + Label smi_case, done; + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &smi_case); + + GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown()); + // The order of the arguments does not matter for bit-ops with a + // constant operand. + __ push(Immediate(value)); + __ push(eax); + __ CallStub(&stub); + __ jmp(&done); + + __ bind(&smi_case); + switch (op) { + case Token::BIT_OR: + __ or_(Operand(eax), Immediate(value)); + break; + case Token::BIT_XOR: + __ xor_(Operand(eax), Immediate(value)); + break; + case Token::BIT_AND: + __ and_(Operand(eax), Immediate(value)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + Apply(context, eax); +} + + +void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + bool left_is_constant_smi, + Smi* value) { + switch (op) { + case Token::BIT_OR: + case Token::BIT_XOR: + case Token::BIT_AND: + EmitConstantSmiBitOp(expr, op, context, mode, value); + break; + case Token::SHL: + case Token::SAR: + case Token::SHR: + ASSERT(!left_is_constant_smi); + EmitConstantSmiShiftOp(expr, op, context, mode, value); + break; + case Token::ADD: + EmitConstantSmiAdd(expr, context, mode, left_is_constant_smi, value); + break; + case Token::SUB: + EmitConstantSmiSub(expr, context, mode, left_is_constant_smi, value); + break; + default: + UNREACHABLE(); + } +} + + +void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr, + Token::Value op, + Expression::Context context, + OverwriteMode mode, + Expression* left, + Expression* right, + ConstantOperand constant) { + if (constant == kRightConstant) { + Smi* value = Smi::cast(*right->AsLiteral()->handle()); + EmitConstantSmiBinaryOp(expr, op, context, mode, false, value); + return; + } else if (constant == kLeftConstant) { + Smi* value = Smi::cast(*left->AsLiteral()->handle()); + EmitConstantSmiBinaryOp(expr, op, context, mode, true, value); + return; + } + + // Do combined smi check of the operands. Left operand is on the + // stack. Right operand is in eax. + Label done, stub_call, smi_case; + __ pop(edx); + __ mov(ecx, eax); + __ or_(eax, Operand(edx)); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &smi_case); + + __ bind(&stub_call); + GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown()); + if (stub.ArgsInRegistersSupported()) { + stub.GenerateCall(masm_, edx, ecx); + } else { + __ push(edx); + __ push(ecx); + __ CallStub(&stub); + } + __ jmp(&done); + + __ bind(&smi_case); + __ mov(eax, edx); // Copy left operand in case of a stub call. + + switch (op) { + case Token::SAR: + __ SmiUntag(eax); + __ SmiUntag(ecx); + __ sar_cl(eax); // No checks of result necessary + __ SmiTag(eax); + break; + case Token::SHL: { + Label result_ok; + __ SmiUntag(eax); + __ SmiUntag(ecx); + __ shl_cl(eax); + // Check that the *signed* result fits in a smi. + __ cmp(eax, 0xc0000000); + __ j(positive, &result_ok); + __ SmiTag(ecx); + __ jmp(&stub_call); + __ bind(&result_ok); + __ SmiTag(eax); + break; + } + case Token::SHR: { + Label result_ok; + __ SmiUntag(eax); + __ SmiUntag(ecx); + __ shr_cl(eax); + __ test(eax, Immediate(0xc0000000)); + __ j(zero, &result_ok); + __ SmiTag(ecx); + __ jmp(&stub_call); + __ bind(&result_ok); + __ SmiTag(eax); + break; + } + case Token::ADD: + __ add(eax, Operand(ecx)); + __ j(overflow, &stub_call); + break; + case Token::SUB: + __ sub(eax, Operand(ecx)); + __ j(overflow, &stub_call); + break; + case Token::MUL: { + __ SmiUntag(eax); + __ imul(eax, Operand(ecx)); + __ j(overflow, &stub_call); + __ test(eax, Operand(eax)); + __ j(not_zero, &done, taken); + __ mov(ebx, edx); + __ or_(ebx, Operand(ecx)); + __ j(negative, &stub_call); + break; + } + case Token::BIT_OR: + __ or_(eax, Operand(ecx)); + break; + case Token::BIT_AND: + __ and_(eax, Operand(ecx)); + break; + case Token::BIT_XOR: + __ xor_(eax, Operand(ecx)); + break; + default: + UNREACHABLE(); + } + + __ bind(&done); + Apply(context, eax); +} + + +void FullCodeGenerator::EmitBinaryOp(Token::Value op, + Expression::Context context, + OverwriteMode mode) { + TypeInfo type = TypeInfo::Unknown(); + GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type); + if (stub.ArgsInRegistersSupported()) { + __ pop(edx); + stub.GenerateCall(masm_, edx, eax); + } else { + __ push(result_register()); + __ CallStub(&stub); + } Apply(context, eax); } @@ -1914,11 +2022,11 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the // arguments. - // Push function on the stack. - VisitForValue(expr->expression(), kStack); - // Push global object (receiver). - __ push(CodeGenerator::GlobalObject()); + // Push constructor on the stack. If it's not a function it's used as + // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is + // ignored. + VisitForValue(expr->expression(), kStack); // Push the arguments ("left-to-right") on the stack. ZoneList* args = expr->arguments(); @@ -1931,16 +2039,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { // constructor invocation. SetSourcePosition(expr->position()); - // Load function, arg_count into edi and eax. + // Load function and argument count into edi and eax. __ Set(eax, Immediate(arg_count)); - // Function is in esp[arg_count + 1]. - __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize)); + __ mov(edi, Operand(esp, arg_count * kPointerSize)); Handle construct_builtin(Builtins::builtin(Builtins::JSConstructCall)); __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL); - - // Replace function on TOS with result in eax, or pop it. - DropAndApply(1, context_, eax); + Apply(context_, eax); } @@ -1952,11 +2057,12 @@ void FullCodeGenerator::EmitIsSmi(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, if_true); - __ jmp(if_false); + Split(zero, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1970,11 +2076,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask | 0x80000000)); - __ j(zero, if_true); - __ jmp(if_false); + Split(zero, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -1988,7 +2095,9 @@ void FullCodeGenerator::EmitIsObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(zero, if_false); @@ -2003,8 +2112,7 @@ void FullCodeGenerator::EmitIsObject(ZoneList* args) { __ cmp(ecx, FIRST_JS_OBJECT_TYPE); __ j(below, if_false); __ cmp(ecx, LAST_JS_OBJECT_TYPE); - __ j(below_equal, if_true); - __ jmp(if_false); + Split(below_equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2018,13 +2126,14 @@ void FullCodeGenerator::EmitIsSpecObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(equal, if_false); __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx); - __ j(above_equal, if_true); - __ jmp(if_false); + Split(above_equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2038,15 +2147,16 @@ void FullCodeGenerator::EmitIsUndetectableObject(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(zero, if_false); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset)); __ test(ebx, Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_true); - __ jmp(if_false); + Split(not_zero, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2061,7 +2171,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only // used in a few functions in runtime.js which should not normally be hit by @@ -2079,13 +2191,14 @@ void FullCodeGenerator::EmitIsFunction(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(zero, if_false); __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2099,13 +2212,14 @@ void FullCodeGenerator::EmitIsArray(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(equal, if_false); __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2119,13 +2233,14 @@ void FullCodeGenerator::EmitIsRegExp(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ test(eax, Immediate(kSmiTagMask)); __ j(equal, if_false); __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2138,7 +2253,9 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); // Get the frame pointer for the calling frame. __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); @@ -2154,8 +2271,7 @@ void FullCodeGenerator::EmitIsConstructCall(ZoneList* args) { __ bind(&check_frame_marker); __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset), Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2171,12 +2287,13 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList* args) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); __ pop(ebx); __ cmp(eax, Operand(ebx)); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); Apply(context_, if_true, if_false); } @@ -2731,6 +2848,46 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList* args) { } +void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + if (FLAG_debug_code) { + __ AbortIfNotString(eax); + } + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ test(FieldOperand(eax, String::kHashFieldOffset), + Immediate(String::kContainsCachedArrayIndexMask)); + Split(zero, if_true, if_false, fall_through); + + Apply(context_, if_true, if_false); +} + + +void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList* args) { + ASSERT(args->length() == 1); + + VisitForValue(args->at(0), kAccumulator); + + if (FLAG_debug_code) { + __ AbortIfNotString(eax); + } + + __ mov(eax, FieldOperand(eax, String::kHashFieldOffset)); + __ IndexFromHash(eax, eax); + + Apply(context_, eax); +} + + void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { Handle name = expr->name(); if (name->length() > 0 && name->Get(0) == '_') { @@ -2831,19 +2988,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { break; } break; - case Expression::kTestValue: - // Value is false so it's needed. - switch (location_) { - case kAccumulator: - __ mov(result_register(), Factory::undefined_value()); - break; - case kStack: - __ push(Immediate(Factory::undefined_value())); - break; - } - // Fall through. case Expression::kTest: - case Expression::kValueTest: __ jmp(false_label_); break; } @@ -2852,45 +2997,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - + Label* fall_through = NULL; // Notice that the labels are swapped. - PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true); - - VisitForControl(expr->expression(), if_true, if_false); - + PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + VisitForControl(expr->expression(), if_true, if_false, fall_through); Apply(context_, if_false, if_true); // Labels swapped. break; } case Token::TYPEOF: { Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); - VariableProxy* proxy = expr->expression()->AsVariableProxy(); - if (proxy != NULL && - !proxy->var()->is_this() && - proxy->var()->is_global()) { - Comment cmnt(masm_, "Global variable"); - __ mov(eax, CodeGenerator::GlobalObject()); - __ mov(ecx, Immediate(proxy->name())); - Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); - // Use a regular load, not a contextual load, to avoid a reference - // error. - __ call(ic, RelocInfo::CODE_TARGET); - __ push(eax); - } else if (proxy != NULL && - proxy->var()->slot() != NULL && - proxy->var()->slot()->type() == Slot::LOOKUP) { - __ push(esi); - __ push(Immediate(proxy->name())); - __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); - __ push(eax); - } else { - // This expression cannot throw a reference error at the top level. - VisitForValue(expr->expression(), kStack); - } - + VisitForTypeofValue(expr->expression(), kStack); __ CallRuntime(Runtime::kTypeof, 1); Apply(context_, eax); break; @@ -2911,9 +3033,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::SUB: { Comment cmt(masm_, "[ UnaryOperation (SUB)"); - bool can_overwrite = - (expr->expression()->AsBinaryOperation() != NULL && - expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); + bool can_overwrite = expr->expression()->ResultOverwriteAllowed(); UnaryOverwriteMode overwrite = can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; GenericUnaryOpStub stub(Token::SUB, overwrite); @@ -2927,28 +3047,26 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::BIT_NOT: { Comment cmt(masm_, "[ UnaryOperation (BIT_NOT)"); - bool can_overwrite = - (expr->expression()->AsBinaryOperation() != NULL && - expr->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); - UnaryOverwriteMode overwrite = - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite); - // GenericUnaryOpStub expects the argument to be in the - // accumulator register eax. + // The generic unary operation stub expects the argument to be + // in the accumulator register eax. VisitForValue(expr->expression(), kAccumulator); - // Avoid calling the stub for Smis. - Label smi, done; - __ test(result_register(), Immediate(kSmiTagMask)); - __ j(zero, &smi); - // Non-smi: call stub leaving result in accumulator register. + Label done; + if (ShouldInlineSmiCase(expr->op())) { + Label call_stub; + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &call_stub); + __ lea(eax, Operand(eax, kSmiTagMask)); + __ not_(eax); + __ jmp(&done); + __ bind(&call_stub); + } + bool overwrite = expr->expression()->ResultOverwriteAllowed(); + UnaryOverwriteMode mode = + overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; + GenericUnaryOpStub stub(Token::BIT_NOT, mode); __ CallStub(&stub); - __ jmp(&done); - // Perform operation directly on Smis. - __ bind(&smi); - __ not_(result_register()); - __ and_(result_register(), ~kSmiTagMask); // Remove inverted smi-tag. __ bind(&done); - Apply(context_, result_register()); + Apply(context_, eax); break; } @@ -2960,6 +3078,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Comment cmnt(masm_, "[ CountOperation"); + SetSourcePosition(expr->position()); + // Invalid left-hand sides are rewritten to have a 'throw ReferenceError' // as the left-hand side. if (!expr->expression()->IsValidLeftHandSide()) { @@ -3008,8 +3128,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Call ToNumber only if operand is not a smi. Label no_conversion; - __ test(eax, Immediate(kSmiTagMask)); - __ j(zero, &no_conversion); + if (ShouldInlineSmiCase(expr->op())) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &no_conversion); + } __ push(eax); __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); __ bind(&no_conversion); @@ -3024,8 +3146,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { break; case Expression::kValue: case Expression::kTest: - case Expression::kValueTest: - case Expression::kTestValue: // Save the result on the stack. If we have a named or keyed property // we store the result under the receiver that is currently on top // of the stack. @@ -3046,7 +3166,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Inline smi case if we are in a loop. Label stub_call, done; - if (loop_depth() > 0) { + if (ShouldInlineSmiCase(expr->op())) { if (expr->op() == Token::INC) { __ add(Operand(eax), Immediate(Smi::FromInt(1))); } else { @@ -3132,68 +3252,117 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { } -void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { - Comment cmnt(masm_, "[ BinaryOperation"); - switch (expr->op()) { - case Token::COMMA: - VisitForEffect(expr->left()); - Visit(expr->right()); - break; - - case Token::OR: - case Token::AND: - EmitLogicalOperation(expr); - break; - - case Token::ADD: - case Token::SUB: - case Token::DIV: - case Token::MOD: - case Token::MUL: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SHL: - case Token::SHR: - case Token::SAR: - VisitForValue(expr->left(), kStack); - VisitForValue(expr->right(), kAccumulator); - EmitBinaryOp(expr->op(), context_); - break; - - default: - UNREACHABLE(); +void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) { + VariableProxy* proxy = expr->AsVariableProxy(); + if (proxy != NULL && !proxy->var()->is_this() && proxy->var()->is_global()) { + Comment cmnt(masm_, "Global variable"); + __ mov(eax, CodeGenerator::GlobalObject()); + __ mov(ecx, Immediate(proxy->name())); + Handle ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + // Use a regular load, not a contextual load, to avoid a reference + // error. + __ call(ic, RelocInfo::CODE_TARGET); + if (where == kStack) __ push(eax); + } else if (proxy != NULL && + proxy->var()->slot() != NULL && + proxy->var()->slot()->type() == Slot::LOOKUP) { + __ push(esi); + __ push(Immediate(proxy->name())); + __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + if (where == kStack) __ push(eax); + } else { + // This expression cannot throw a reference error at the top level. + VisitForValue(expr, where); } } -void FullCodeGenerator::EmitNullCompare(bool strict, - Register obj, - Register null_const, - Label* if_true, - Label* if_false, - Register scratch) { - __ cmp(obj, Operand(null_const)); - if (strict) { +bool FullCodeGenerator::TryLiteralCompare(Token::Value op, + Expression* left, + Expression* right, + Label* if_true, + Label* if_false, + Label* fall_through) { + if (op != Token::EQ && op != Token::EQ_STRICT) return false; + + // Check for the pattern: typeof == . + Literal* right_literal = right->AsLiteral(); + if (right_literal == NULL) return false; + Handle right_literal_value = right_literal->handle(); + if (!right_literal_value->IsString()) return false; + UnaryOperation* left_unary = left->AsUnaryOperation(); + if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false; + Handle check = Handle::cast(right_literal_value); + + VisitForTypeofValue(left_unary->expression(), kAccumulator); + if (check->Equals(Heap::number_symbol())) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_true); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + Factory::heap_number_map()); + Split(equal, if_true, if_false, fall_through); + } else if (check->Equals(Heap::string_symbol())) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + // Check for undetectable objects => false. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_false); + __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE); + Split(below, if_true, if_false, fall_through); + } else if (check->Equals(Heap::boolean_symbol())) { + __ cmp(eax, Factory::true_value()); __ j(equal, if_true); - } else { + __ cmp(eax, Factory::false_value()); + Split(equal, if_true, if_false, fall_through); + } else if (check->Equals(Heap::undefined_symbol())) { + __ cmp(eax, Factory::undefined_value()); __ j(equal, if_true); - __ cmp(obj, Factory::undefined_value()); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + // Check for undetectable objects => true. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + Split(not_zero, if_true, if_false, fall_through); + } else if (check->Equals(Heap::function_symbol())) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); __ j(equal, if_true); - __ test(obj, Immediate(kSmiTagMask)); + // Regular expressions => 'function' (they are callable). + __ CmpInstanceType(edx, JS_REGEXP_TYPE); + Split(equal, if_true, if_false, fall_through); + } else if (check->Equals(Heap::object_symbol())) { + __ test(eax, Immediate(kSmiTagMask)); __ j(zero, if_false); - // It can be an undetectable object. - __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset)); - __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset)); - __ test(scratch, Immediate(1 << Map::kIsUndetectable)); - __ j(not_zero, if_true); + __ cmp(eax, Factory::null_value()); + __ j(equal, if_true); + // Regular expressions => 'function', not 'object'. + __ CmpObjectType(eax, JS_REGEXP_TYPE, edx); + __ j(equal, if_false); + // Check for undetectable objects => false. + __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); + __ test(ecx, Immediate(1 << Map::kIsUndetectable)); + __ j(not_zero, if_false); + // Check for JS objects => true. + __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset)); + __ cmp(ecx, FIRST_JS_OBJECT_TYPE); + __ j(less, if_false); + __ cmp(ecx, LAST_JS_OBJECT_TYPE); + Split(less_equal, if_true, if_false, fall_through); + } else { + if (if_false != fall_through) __ jmp(if_false); } - __ jmp(if_false); + + return true; } void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Comment cmnt(masm_, "[ CompareOperation"); + SetSourcePosition(expr->position()); // Always perform the comparison for its control flow. Pack the result // into the expression's context after the comparison is performed. @@ -3201,7 +3370,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { Label materialize_true, materialize_false; Label* if_true = NULL; Label* if_false = NULL; - PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false); + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + // First we try a fast inlined version of the compare when one of + // the operands is a literal. + Token::Value op = expr->op(); + Expression* left = expr->left(); + Expression* right = expr->right(); + if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) { + Apply(context_, if_true, if_false); + return; + } VisitForValue(expr->left(), kStack); switch (expr->op()) { @@ -3209,8 +3390,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { VisitForValue(expr->right(), kStack); __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); __ cmp(eax, Factory::true_value()); - __ j(equal, if_true); - __ jmp(if_false); + Split(equal, if_true, if_false, fall_through); break; case Token::INSTANCEOF: { @@ -3218,8 +3398,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { InstanceofStub stub; __ CallStub(&stub); __ test(eax, Operand(eax)); - __ j(zero, if_true); // The stub returns 0 for true. - __ jmp(if_false); + // The stub returns 0 for true. + Split(zero, if_true, if_false, fall_through); break; } @@ -3227,28 +3407,14 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { VisitForValue(expr->right(), kAccumulator); Condition cc = no_condition; bool strict = false; - switch (expr->op()) { + switch (op) { case Token::EQ_STRICT: strict = true; // Fall through - case Token::EQ: { + case Token::EQ: cc = equal; __ pop(edx); - // If either operand is constant null we do a fast compare - // against null. - Literal* right_literal = expr->right()->AsLiteral(); - Literal* left_literal = expr->left()->AsLiteral(); - if (right_literal != NULL && right_literal->handle()->IsNull()) { - EmitNullCompare(strict, edx, eax, if_true, if_false, ecx); - Apply(context_, if_true, if_false); - return; - } else if (left_literal != NULL && left_literal->handle()->IsNull()) { - EmitNullCompare(strict, eax, edx, if_true, if_false, ecx); - Apply(context_, if_true, if_false); - return; - } break; - } case Token::LT: cc = less; __ pop(edx); @@ -3275,23 +3441,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { UNREACHABLE(); } - // The comparison stub expects the smi vs. smi case to be handled - // before it is called. - Label slow_case; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); - __ test(ecx, Immediate(kSmiTagMask)); - __ j(not_zero, &slow_case, not_taken); - __ cmp(edx, Operand(eax)); - __ j(cc, if_true); - __ jmp(if_false); + if (ShouldInlineSmiCase(op)) { + Label slow_case; + __ mov(ecx, Operand(edx)); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow_case, not_taken); + __ cmp(edx, Operand(eax)); + Split(cc, if_true, if_false, NULL); + __ bind(&slow_case); + } - __ bind(&slow_case); CompareStub stub(cc, strict); __ CallStub(&stub); __ test(eax, Operand(eax)); - __ j(cc, if_true); - __ jmp(if_false); + Split(cc, if_true, if_false, fall_through); } } @@ -3301,6 +3465,34 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } +void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + VisitForValue(expr->expression(), kAccumulator); + __ cmp(eax, Factory::null_value()); + if (expr->is_strict()) { + Split(equal, if_true, if_false, fall_through); + } else { + __ j(equal, if_true); + __ cmp(eax, Factory::undefined_value()); + __ j(equal, if_true); + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, if_false); + // It can be an undetectable object. + __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); + __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset)); + __ test(edx, Immediate(1 << Map::kIsUndetectable)); + Split(not_zero, if_true, if_false, fall_through); + } + Apply(context_, if_true, if_false); +} + + void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); Apply(context_, eax); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 2cd41a15bb..3d0bd796a0 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -452,6 +452,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, // Loads an indexed element from a fast case array. +// If not_fast_array is NULL, doesn't perform the elements map check. static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, Register key, @@ -468,8 +469,12 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // we fall through. __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). - __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true); + if (not_fast_array != NULL) { + // Check that the object is in fast mode and writable. + __ CheckMap(scratch, Factory::fixed_array_map(), not_fast_array, true); + } else { + __ AssertFastElements(scratch); + } // Check that the key (index) is within bounds. __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset)); __ j(above_equal, out_of_range); @@ -514,31 +519,6 @@ static void GenerateKeyStringCheck(MacroAssembler* masm, } -// Picks out an array index from the hash field. -static void GenerateIndexFromHash(MacroAssembler* masm, - Register key, - Register hash) { - // Register use: - // key - holds the overwritten key on exit. - // hash - holds the key's hash. Clobbered. - - // The assert checks that the constants for the maximum number of digits - // for an array index cached in the hash field and the number of bits - // reserved for it does not conflict. - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < - (1 << String::kArrayIndexValueBits)); - // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in - // the low kHashShift bits. - ASSERT(String::kHashShift >= kSmiTagSize); - __ and_(hash, String::kArrayIndexValueMask); - __ shr(hash, String::kHashShift - kSmiTagSize); - // Here we actually clobber the key which will be used if calling into - // runtime later. However as the new key is the numeric value of a string key - // there is no difference in using either key. - __ mov(key, hash); -} - - void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : key @@ -558,12 +538,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateKeyedLoadReceiverCheck( masm, edx, ecx, Map::kHasIndexedInterceptor, &slow); + // Check the "has fast elements" bit in the receiver's map which is + // now in ecx. + __ test_b(FieldOperand(ecx, Map::kBitField2Offset), + 1 << Map::kHasFastElements); + __ j(zero, &check_pixel_array, not_taken); + GenerateFastArrayLoad(masm, edx, eax, ecx, eax, - &check_pixel_array, + NULL, &slow); __ IncrementCounter(&Counters::keyed_load_generic_smi, 1); __ ret(0); @@ -572,7 +558,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Check whether the elements is a pixel array. // edx: receiver // eax: key - // ecx: elements + __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); __ mov(ebx, eax); __ SmiUntag(ebx); __ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true); @@ -693,7 +679,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ret(0); __ bind(&index_string); - GenerateIndexFromHash(masm, eax, ebx); + __ IndexFromHash(ebx, eax); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); } @@ -967,7 +953,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // edx: JSObject // ecx: key (a smi) __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - // Check that the object is in fast mode (not dictionary). + // Check that the object is in fast mode and writable. __ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true); __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); __ j(below, &fast, taken); @@ -1023,8 +1009,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ jmp(&fast); // Array case: Get the length and the elements array from the JS - // array. Check that the array is in fast mode; if it is the - // length is always a smi. + // array. Check that the array is in fast mode (and writable); if it + // is the length is always a smi. __ bind(&array); // eax: value // edx: receiver, a JSArray @@ -1554,7 +1540,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { GenerateMiss(masm, argc); __ bind(&index_string); - GenerateIndexFromHash(masm, ecx, ebx); + __ IndexFromHash(ebx, ecx); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); } @@ -1872,6 +1858,8 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) { __ j(not_equal, &miss, not_taken); // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); __ j(not_equal, &miss, not_taken); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 24538461ff..87e25d73db 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -191,81 +191,6 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { #ifdef ENABLE_DEBUGGER_SUPPORT -void MacroAssembler::SaveRegistersToMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of registers to memory location. - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - Register reg = { r }; - ExternalReference reg_addr = - ExternalReference(Debug_Address::Register(i)); - mov(Operand::StaticVariable(reg_addr), reg); - } - } -} - - -void MacroAssembler::RestoreRegistersFromMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of memory location to registers. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - Register reg = { r }; - ExternalReference reg_addr = - ExternalReference(Debug_Address::Register(i)); - mov(reg, Operand::StaticVariable(reg_addr)); - } - } -} - - -void MacroAssembler::PushRegistersFromMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Push the content of the memory location to the stack. - for (int i = 0; i < kNumJSCallerSaved; i++) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - ExternalReference reg_addr = - ExternalReference(Debug_Address::Register(i)); - push(Operand::StaticVariable(reg_addr)); - } - } -} - - -void MacroAssembler::PopRegistersToMemory(RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Pop the content from the stack to the memory location. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - ExternalReference reg_addr = - ExternalReference(Debug_Address::Register(i)); - pop(Operand::StaticVariable(reg_addr)); - } - } -} - - -void MacroAssembler::CopyRegistersFromStackToMemory(Register base, - Register scratch, - RegList regs) { - ASSERT((regs & ~kJSCallerSaved) == 0); - // Copy the content of the stack to the memory location and adjust base. - for (int i = kNumJSCallerSaved; --i >= 0;) { - int r = JSCallerSavedCode(i); - if ((regs & (1 << r)) != 0) { - mov(scratch, Operand(base, 0)); - ExternalReference reg_addr = - ExternalReference(Debug_Address::Register(i)); - mov(Operand::StaticVariable(reg_addr), scratch); - lea(base, Operand(base, kPointerSize)); - } - } -} - void MacroAssembler::DebugBreak() { Set(eax, Immediate(0)); mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak))); @@ -274,6 +199,7 @@ void MacroAssembler::DebugBreak() { } #endif + void MacroAssembler::Set(Register dst, const Immediate& x) { if (x.is_zero()) { xor_(dst, Operand(dst)); // shorter than mov @@ -377,6 +303,17 @@ void MacroAssembler::AbortIfNotSmi(Register object) { } +void MacroAssembler::AbortIfNotString(Register object) { + test(object, Immediate(kSmiTagMask)); + Assert(not_equal, "Operand is not a string"); + push(object); + mov(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, FIRST_NONSTRING_TYPE); + pop(object); + Assert(below, "Operand is not a string"); +} + + void MacroAssembler::AbortIfSmi(Register object) { test(object, Immediate(kSmiTagMask)); Assert(not_equal, "Operand is a smi"); @@ -405,7 +342,8 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { leave(); } -void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) { + +void MacroAssembler::EnterExitFramePrologue() { // Setup the frame structure on the stack. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); @@ -413,7 +351,7 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) { push(ebp); mov(ebp, Operand(esp)); - // Reserve room for entry stack pointer and push the debug marker. + // Reserve room for entry stack pointer and push the code object. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); push(Immediate(0)); // Saved entry sp, patched before call. push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot. @@ -425,21 +363,8 @@ void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) { mov(Operand::StaticVariable(context_address), esi); } -void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) { -#ifdef ENABLE_DEBUGGER_SUPPORT - // Save the state of all registers to the stack from the memory - // location. This is needed to allow nested break points. - if (mode == ExitFrame::MODE_DEBUG) { - // TODO(1243899): This should be symmetric to - // CopyRegistersFromStackToMemory() but it isn't! esp is assumed - // correct here, but computed for the other call. Very error - // prone! FIX THIS. Actually there are deeper problems with - // register saving than this asymmetry (see the bug report - // associated with this issue). - PushRegistersFromMemory(kJSCallerSaved); - } -#endif +void MacroAssembler::EnterExitFrameEpilogue(int argc) { // Reserve space for arguments. sub(Operand(esp), Immediate(argc * kPointerSize)); @@ -455,44 +380,30 @@ void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) { } -void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { - EnterExitFramePrologue(mode); +void MacroAssembler::EnterExitFrame() { + EnterExitFramePrologue(); // Setup argc and argv in callee-saved registers. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; mov(edi, Operand(eax)); lea(esi, Operand(ebp, eax, times_4, offset)); - EnterExitFrameEpilogue(mode, 2); + EnterExitFrameEpilogue(2); } -void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode, - int stack_space, +void MacroAssembler::EnterApiExitFrame(int stack_space, int argc) { - EnterExitFramePrologue(mode); + EnterExitFramePrologue(); int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset)); - EnterExitFrameEpilogue(mode, argc); + EnterExitFrameEpilogue(argc); } -void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { -#ifdef ENABLE_DEBUGGER_SUPPORT - // Restore the memory copy of the registers by digging them out from - // the stack. This is needed to allow nested break points. - if (mode == ExitFrame::MODE_DEBUG) { - // It's okay to clobber register ebx below because we don't need - // the function pointer after this. - const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; - int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; - lea(ebx, Operand(ebp, kOffset)); - CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved); - } -#endif - +void MacroAssembler::LeaveExitFrame() { // Get the return address from the stack and restore the frame pointer. mov(ecx, Operand(ebp, 1 * kPointerSize)); mov(ebp, Operand(ebp, 0 * kPointerSize)); @@ -871,6 +782,31 @@ void MacroAssembler::AllocateAsciiString(Register result, } +void MacroAssembler::AllocateAsciiString(Register result, + int length, + Register scratch1, + Register scratch2, + Label* gc_required) { + ASSERT(length > 0); + + // Allocate ascii string in new space. + AllocateInNewSpace(SeqAsciiString::SizeFor(length), + result, + scratch1, + scratch2, + gc_required, + TAG_OBJECT); + + // Set the map, length and hash field. + mov(FieldOperand(result, HeapObject::kMapOffset), + Immediate(Factory::ascii_string_map())); + mov(FieldOperand(result, String::kLengthOffset), + Immediate(Smi::FromInt(length))); + mov(FieldOperand(result, String::kHashFieldOffset), + Immediate(String::kEmptyHashField)); +} + + void MacroAssembler::AllocateConsString(Register result, Register scratch1, Register scratch2, @@ -1040,6 +976,25 @@ void MacroAssembler::IllegalOperation(int num_arguments) { } +void MacroAssembler::IndexFromHash(Register hash, Register index) { + // The assert checks that the constants for the maximum number of digits + // for an array index cached in the hash field and the number of bits + // reserved for it does not conflict. + ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << String::kArrayIndexValueBits)); + // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in + // the low kHashShift bits. + and_(hash, String::kArrayIndexValueMask); + STATIC_ASSERT(String::kHashShift >= kSmiTagSize && kSmiTag == 0); + if (String::kHashShift > kSmiTagSize) { + shr(hash, String::kHashShift - kSmiTagSize); + } + if (!index.is(hash)) { + mov(index, hash); + } +} + + void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { CallRuntime(Runtime::FunctionForId(id), num_arguments); } @@ -1298,11 +1253,10 @@ void MacroAssembler::InvokeFunction(Register fun, mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); SmiUntag(ebx); - mov(edx, FieldOperand(edi, JSFunction::kCodeOffset)); - lea(edx, FieldOperand(edx, Code::kHeaderSize)); ParameterCount expected(ebx); - InvokeCode(Operand(edx), expected, actual, flag); + InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), + expected, actual, flag); } @@ -1313,7 +1267,6 @@ void MacroAssembler::InvokeFunction(JSFunction* function, // Get the function and setup the context. mov(edi, Immediate(Handle(function))); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - // Invoke the cached code. Handle code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); @@ -1329,33 +1282,26 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { // arguments match the expected number of arguments. Fake a // parameter count to avoid emitting code to do the check. ParameterCount expected(0); - GetBuiltinEntry(edx, id); - InvokeCode(Operand(edx), expected, expected, flag); + GetBuiltinFunction(edi, id); + InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), + expected, expected, flag); } - -void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { - ASSERT(!target.is(edi)); - - // Load the builtins object into target register. +void MacroAssembler::GetBuiltinFunction(Register target, + Builtins::JavaScript id) { + // Load the JavaScript builtin function from the builtins object. mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); + mov(target, FieldOperand(target, + JSBuiltinsObject::OffsetOfFunctionWithId(id))); +} +void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { + ASSERT(!target.is(edi)); // Load the JavaScript builtin function from the builtins object. - mov(edi, FieldOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id))); - - // Load the code entry point from the builtins object. - mov(target, FieldOperand(target, JSBuiltinsObject::OffsetOfCodeWithId(id))); - if (FLAG_debug_code) { - // Make sure the code objects in the builtins object and in the - // builtin function are the same. - push(target); - mov(target, FieldOperand(edi, JSFunction::kCodeOffset)); - cmp(target, Operand(esp, 0)); - Assert(equal, "Builtin code object changed"); - pop(target); - } - lea(target, FieldOperand(target, Code::kHeaderSize)); + GetBuiltinFunction(edi, id); + // Load the code entry point from the function into the target register. + mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset)); } @@ -1378,6 +1324,30 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { } +void MacroAssembler::LoadGlobalFunction(int index, Register function) { + // Load the global or builtins object from the current context. + mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); + // Load the global context from the global or builtins object. + mov(function, FieldOperand(function, GlobalObject::kGlobalContextOffset)); + // Load the function from the global context. + mov(function, Operand(function, Context::SlotOffset(index))); +} + + +void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, + Register map) { + // Load the initial map. The global functions all have initial maps. + mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + if (FLAG_debug_code) { + Label ok, fail; + CheckMap(map, Factory::meta_map(), &fail, false); + jmp(&ok); + bind(&fail); + Abort("Global functions must have initial map"); + bind(&ok); + } +} + void MacroAssembler::Ret() { ret(0); @@ -1464,6 +1434,21 @@ void MacroAssembler::Assert(Condition cc, const char* msg) { } +void MacroAssembler::AssertFastElements(Register elements) { + if (FLAG_debug_code) { + Label ok; + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::fixed_array_map())); + j(equal, &ok); + cmp(FieldOperand(elements, HeapObject::kMapOffset), + Immediate(Factory::fixed_cow_array_map())); + j(equal, &ok); + Abort("JSObject with fast elements map has slow elements"); + bind(&ok); + } +} + + void MacroAssembler::Check(Condition cc, const char* msg) { Label L; j(cc, &L, taken); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 0b16f0d40b..a7534cbd47 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -99,13 +99,6 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Debugger Support - void SaveRegistersToMemory(RegList regs); - void RestoreRegistersFromMemory(RegList regs); - void PushRegistersFromMemory(RegList regs); - void PopRegistersToMemory(RegList regs); - void CopyRegistersFromStackToMemory(Register base, - Register scratch, - RegList regs); void DebugBreak(); #endif @@ -128,18 +121,25 @@ class MacroAssembler: public Assembler { // Expects the number of arguments in register eax and // sets up the number of arguments in register edi and the pointer // to the first argument in register esi. - void EnterExitFrame(ExitFrame::Mode mode); + void EnterExitFrame(); - void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc); + void EnterApiExitFrame(int stack_space, int argc); // Leave the current exit frame. Expects the return value in // register eax:edx (untouched) and the pointer to the first // argument in register esi. - void LeaveExitFrame(ExitFrame::Mode mode); + void LeaveExitFrame(); // Find the function context up the context chain. void LoadContext(Register dst, int context_chain_length); + // Load the global function with the given index. + void LoadGlobalFunction(int index, Register function); + + // Load the initial map from the global function. The registers + // function and map can be the same. + void LoadGlobalFunctionInitialMap(Register function, Register map); + // --------------------------------------------------------------------------- // JavaScript invokes @@ -169,6 +169,9 @@ class MacroAssembler: public Assembler { // the unresolved list if the name does not resolve. void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag); + // Store the function for the given builtin in the target register. + void GetBuiltinFunction(Register target, Builtins::JavaScript id); + // Store the code object for the given builtin in the target register. void GetBuiltinEntry(Register target, Builtins::JavaScript id); @@ -264,6 +267,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is a smi. Used in debug code. void AbortIfSmi(Register object); + // Abort execution if argument is a string. Used in debug code. + void AbortIfNotString(Register object); + // --------------------------------------------------------------------------- // Exception handling @@ -350,6 +356,11 @@ class MacroAssembler: public Assembler { Register scratch2, Register scratch3, Label* gc_required); + void AllocateAsciiString(Register result, + int length, + Register scratch1, + Register scratch2, + Label* gc_required); // Allocate a raw cons string object. Only the map field of the result is // initialized. @@ -393,6 +404,12 @@ class MacroAssembler: public Assembler { // occurred. void IllegalOperation(int num_arguments); + // Picks out an array index from the hash field. + // Register use: + // hash - holds the index's hash. Clobbered. + // index - holds the overwritten index on exit. + void IndexFromHash(Register hash, Register index); + // --------------------------------------------------------------------------- // Runtime calls @@ -508,6 +525,8 @@ class MacroAssembler: public Assembler { // Use --debug_code to enable. void Assert(Condition cc, const char* msg); + void AssertFastElements(Register elements); + // Like Assert(), but always enabled. void Check(Condition cc, const char* msg); @@ -559,8 +578,8 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); - void EnterExitFramePrologue(ExitFrame::Mode mode); - void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc); + void EnterExitFramePrologue(); + void EnterExitFrameEpilogue(int argc); // Allocation support helpers. void LoadAllocationTopHelper(Register result, diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index a7930fb1ed..2aab7a8d94 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -31,11 +31,9 @@ #include "unicode.h" #include "log.h" -#include "ast.h" #include "regexp-stack.h" #include "macro-assembler.h" #include "regexp-macro-assembler.h" -#include "ia32/macro-assembler-ia32.h" #include "ia32/regexp-macro-assembler-ia32.h" namespace v8 { diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index b2c9dab83b..7fc3f8114d 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -257,16 +257,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, int index, Register prototype) { - // Load the global or builtins object from the current context. - __ mov(prototype, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); - // Load the global context from the global or builtins object. - __ mov(prototype, - FieldOperand(prototype, GlobalObject::kGlobalContextOffset)); - // Load the function from the global context. - __ mov(prototype, Operand(prototype, Context::SlotOffset(index))); - // Load the initial map. The global functions all have initial maps. - __ mov(prototype, - FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); + __ LoadGlobalFunction(index, prototype); + __ LoadGlobalFunctionInitialMap(prototype, prototype); // Load the prototype from the initial map. __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); } @@ -1366,16 +1358,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); __ ret((argc + 1) * kPointerSize); } else { + Label call_builtin; + // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); - // Check that the elements are in fast mode (not dictionary). + // Check that the elements are in fast mode and writable. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); - __ j(not_equal, &miss); + __ j(not_equal, &call_builtin); if (argc == 1) { // Otherwise fall through to call builtin. - Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements; + Label exit, with_write_barrier, attempt_to_grow_elements; // Get the array's length into eax and calculate new length. __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); @@ -1456,10 +1450,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object, // Elements are in new space, so write barrier is not required. __ ret((argc + 1) * kPointerSize); - - __ bind(&call_builtin); } + __ bind(&call_builtin); __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush), argc + 1, 1); @@ -1511,10 +1504,10 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object, // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset)); - // Check that the elements are in fast mode (not dictionary). + // Check that the elements are in fast mode and writable. __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(Factory::fixed_array_map())); - __ j(not_equal, &miss); + __ j(not_equal, &call_builtin); // Get the array's length into ecx and calculate new length. __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc index ff9132cf71..5f1e1e4e5e 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.cc +++ b/deps/v8/src/ia32/virtual-frame-ia32.cc @@ -1143,9 +1143,9 @@ Result VirtualFrame::CallConstructor(int arg_count) { // and receiver on the stack. Handle ic(Builtins::builtin(Builtins::JSConstructCall)); // Duplicate the function before preparing the frame. - PushElementAt(arg_count + 1); + PushElementAt(arg_count); Result function = Pop(); - PrepareForCall(arg_count + 1, arg_count + 1); // Spill args and receiver. + PrepareForCall(arg_count + 1, arg_count + 1); // Spill function and args. function.ToRegister(edi); // Constructors are called with the number of arguments in register diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h index 70bbaf8c96..94dbd5f51f 100644 --- a/deps/v8/src/ic-inl.h +++ b/deps/v8/src/ic-inl.h @@ -108,10 +108,10 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object, } -Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) { +JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) { Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype()); ASSERT(map_owner->IsJSObject()); - return JSObject::cast(map_owner)->map(); + return JSObject::cast(map_owner); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index a5370a6f85..b4a333ec90 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -165,14 +165,14 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) { if (cache_holder == OWN_MAP && !receiver->IsJSObject()) { // The stub was generated for JSObject but called for non-JSObject. - // IC::GetCodeCacheMap is not applicable. + // IC::GetCodeCacheHolder is not applicable. return MONOMORPHIC; } else if (cache_holder == PROTOTYPE_MAP && receiver->GetPrototype()->IsNull()) { - // IC::GetCodeCacheMap is not applicable. + // IC::GetCodeCacheHolder is not applicable. return MONOMORPHIC; } - Map* map = IC::GetCodeCacheMap(receiver, cache_holder); + Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map(); // Decide whether the inline cache failed because of changes to the // receiver itself or changes to one of its prototypes. diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index a02f272fc7..17450cc356 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -123,8 +123,8 @@ class IC { JSObject* holder); static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object, JSObject* holder); - static inline Map* GetCodeCacheMap(Object* object, - InlineCacheHolderFlag holder); + static inline JSObject* GetCodeCacheHolder(Object* object, + InlineCacheHolderFlag holder); protected: Address fp() const { return fp_; } diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js index e7ec6100e5..a39d7c4a97 100644 --- a/deps/v8/src/json.js +++ b/deps/v8/src/json.js @@ -68,15 +68,13 @@ function JSONParse(text, reviver) { } var characterQuoteCache = { + '\b': '\\b', // ASCII 8, Backspace + '\t': '\\t', // ASCII 9, Tab + '\n': '\\n', // ASCII 10, Newline + '\f': '\\f', // ASCII 12, Formfeed + '\r': '\\r', // ASCII 13, Carriage Return '\"': '\\"', - '\\': '\\\\', - '/': '\\/', - '\b': '\\b', - '\f': '\\f', - '\n': '\\n', - '\r': '\\r', - '\t': '\\t', - '\x0B': '\\u000b' + '\\': '\\\\' }; function QuoteSingleJSONCharacter(c) { @@ -95,7 +93,7 @@ function QuoteSingleJSONCharacter(c) { } function QuoteJSONString(str) { - var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g; + var quotable = /[\\\"\x00-\x1f]/g; return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"'; } diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index c9afc62e15..30d4dcbf23 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -380,10 +380,11 @@ int RegExpImpl::IrregexpPrepare(Handle regexp, } -RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce(Handle regexp, - Handle subject, - int index, - Vector output) { +RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce( + Handle regexp, + Handle subject, + int index, + Vector output) { Handle irregexp(FixedArray::cast(regexp->data())); ASSERT(index >= 0); @@ -478,11 +479,9 @@ Handle RegExpImpl::IrregexpExec(Handle jsregexp, OffsetsVector registers(required_registers); - IrregexpResult res = IrregexpExecOnce(jsregexp, - subject, - previous_index, - Vector(registers.vector(), - registers.length())); + IrregexpResult res = RegExpImpl::IrregexpExecOnce( + jsregexp, subject, previous_index, Vector(registers.vector(), + registers.length())); if (res == RE_SUCCESS) { int capture_register_count = (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2; diff --git a/deps/v8/src/jump-target-heavy.h b/deps/v8/src/jump-target-heavy.h index b2113a5a4c..8cec86926a 100644 --- a/deps/v8/src/jump-target-heavy.h +++ b/deps/v8/src/jump-target-heavy.h @@ -117,17 +117,17 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated. // the target and the fall-through. virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); - virtual void Branch(Condition cc, - Result* arg0, - Result* arg1, - Hint hint = no_hint); + void Branch(Condition cc, + Result* arg0, + Result* arg1, + Hint hint = no_hint); // Bind a jump target. If there is no current frame at the binding // site, there must be at least one frame reaching via a forward // jump. virtual void Bind(); virtual void Bind(Result* arg); - virtual void Bind(Result* arg0, Result* arg1); + void Bind(Result* arg0, Result* arg1); // Emit a call to a jump target. There must be a current frame at // the call. The frame at the target is the same as the current diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index 769ac35c85..5a8749e513 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -739,7 +739,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle array) { Handle name_handle(String::cast(info->name())); info_wrapper.SetProperties(name_handle, info->start_position(), info->end_position(), info); - array->SetElement(i, *(info_wrapper.GetJSArray())); + SetElement(array, i, info_wrapper.GetJSArray()); } } @@ -750,7 +750,7 @@ void LiveEdit::WrapSharedFunctionInfos(Handle array) { class ReferenceCollectorVisitor : public ObjectVisitor { public: explicit ReferenceCollectorVisitor(Code* original) - : original_(original), rvalues_(10), reloc_infos_(10) { + : original_(original), rvalues_(10), reloc_infos_(10), code_entries_(10) { } virtual void VisitPointers(Object** start, Object** end) { @@ -761,7 +761,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor { } } - void VisitCodeTarget(RelocInfo* rinfo) { + virtual void VisitCodeEntry(Address entry) { + if (Code::GetObjectFromEntryAddress(entry) == original_) { + code_entries_.Add(entry); + } + } + + virtual void VisitCodeTarget(RelocInfo* rinfo) { if (RelocInfo::IsCodeTarget(rinfo->rmode()) && Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) { reloc_infos_.Add(*rinfo); @@ -778,8 +784,13 @@ class ReferenceCollectorVisitor : public ObjectVisitor { for (int i = 0; i < rvalues_.length(); i++) { *(rvalues_[i]) = substitution; } + Address substitution_entry = substitution->instruction_start(); for (int i = 0; i < reloc_infos_.length(); i++) { - reloc_infos_[i].set_target_address(substitution->instruction_start()); + reloc_infos_[i].set_target_address(substitution_entry); + } + for (int i = 0; i < code_entries_.length(); i++) { + Address entry = code_entries_[i]; + Memory::Address_at(entry) = substitution_entry; } } @@ -787,28 +798,10 @@ class ReferenceCollectorVisitor : public ObjectVisitor { Code* original_; ZoneList rvalues_; ZoneList reloc_infos_; + ZoneList
code_entries_; }; -class FrameCookingThreadVisitor : public ThreadVisitor { - public: - void VisitThread(ThreadLocalTop* top) { - StackFrame::CookFramesForThread(top); - } -}; - -class FrameUncookingThreadVisitor : public ThreadVisitor { - public: - void VisitThread(ThreadLocalTop* top) { - StackFrame::UncookFramesForThread(top); - } -}; - -static void IterateAllThreads(ThreadVisitor* visitor) { - Top::IterateThread(visitor); - ThreadManager::IterateArchivedThreads(visitor); -} - // Finds all references to original and replaces them with substitution. static void ReplaceCodeObject(Code* original, Code* substitution) { ASSERT(!Heap::InNewSpace(substitution)); @@ -824,13 +817,7 @@ static void ReplaceCodeObject(Code* original, Code* substitution) { // so temporary replace the pointers with offset numbers // in prologue/epilogue. { - FrameCookingThreadVisitor cooking_visitor; - IterateAllThreads(&cooking_visitor); - Heap::IterateStrongRoots(&visitor, VISIT_ALL); - - FrameUncookingThreadVisitor uncooking_visitor; - IterateAllThreads(&uncooking_visitor); } // Now iterate over all pointers of all objects, including code_target @@ -1372,8 +1359,9 @@ static const char* DropActivationsInActiveThread( for (int i = 0; i < array_len; i++) { if (result->GetElement(i) == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) { - result->SetElement(i, Smi::FromInt( - LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK)); + Handle replaced( + Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK)); + SetElement(result, i, replaced); } } return NULL; diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index e083f01a44..0bca5ebd86 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -30,6 +30,7 @@ #include "v8.h" #include "bootstrapper.h" +#include "code-stubs.h" #include "global-handles.h" #include "log.h" #include "macro-assembler.h" @@ -1266,7 +1267,8 @@ void Logger::LogCodeObject(Object* object) { case Code::BINARY_OP_IC: // fall through case Code::STUB: - description = CodeStub::MajorName(code_object->major_key(), true); + description = + CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true); if (description == NULL) description = "A stub from the snapshot"; tag = Logger::STUB_TAG; diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 686a61c367..d261f57da7 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -83,4 +83,31 @@ const int kInvalidProtoDepth = -1; #error Unsupported target architecture. #endif +namespace v8 { +namespace internal { + +// Support for "structured" code comments. +#ifdef DEBUG + +class Comment { + public: + Comment(MacroAssembler* masm, const char* msg); + ~Comment(); + + private: + MacroAssembler* masm_; + const char* msg_; +}; + +#else + +class Comment { + public: + Comment(MacroAssembler*, const char*) {} +}; + +#endif // DEBUG + +} } // namespace v8::internal + #endif // V8_MACRO_ASSEMBLER_H_ diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index 643a2851a1..1ceb620146 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -120,7 +120,7 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg)); # Inline macros. Use %IS_VAR to make sure arg is evaluated only once. macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg)); -macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg)); +macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg))); macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg))); macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0)); macro TO_UINT32(arg) = (arg >>> 0); diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 1a020e55d1..a9e852ef32 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -27,6 +27,7 @@ #include "v8.h" +#include "compilation-cache.h" #include "execution.h" #include "heap-profiler.h" #include "global-handles.h" @@ -84,11 +85,15 @@ void MarkCompactCollector::CollectGarbage() { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT); EncodeForwardingAddresses(); + Heap::MarkMapPointersAsEncoded(true); UpdatePointers(); + Heap::MarkMapPointersAsEncoded(false); + PcToCodeCache::FlushPcToCodeCache(); RelocateObjects(); } else { SweepSpaces(); + PcToCodeCache::FlushPcToCodeCache(); } Finish(); @@ -252,6 +257,14 @@ class StaticMarkingVisitor : public StaticVisitorBase { table_.GetVisitor(map)(map, obj); } + static void EnableCodeFlushing(bool enabled) { + if (enabled) { + table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode); + } else { + table_.Register(kVisitJSFunction, &VisitJSFunction); + } + } + static void Initialize() { table_.Register(kVisitShortcutCandidate, &FixedBodyVisitor(object)->CodeIterateBody(); } + // Code flushing support. + + // How many collections newly compiled code object will survive before being + // flushed. + static const int kCodeAgeThreshold = 5; + + inline static bool HasSourceCode(SharedFunctionInfo* info) { + Object* undefined = Heap::raw_unchecked_undefined_value(); + return (info->script() != undefined) && + (reinterpret_cast(info->script())->source() != undefined); + } + + + inline static bool IsCompiled(JSFunction* function) { + return + function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile); + } + + + inline static bool IsCompiled(SharedFunctionInfo* function) { + return + function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile); + } + + + static void FlushCodeForFunction(JSFunction* function) { + SharedFunctionInfo* shared_info = function->unchecked_shared(); + + if (shared_info->IsMarked()) return; + + // Special handling if the function and shared info objects + // have different code objects. + if (function->unchecked_code() != shared_info->unchecked_code()) { + // If the shared function has been flushed but the function has not, + // we flush the function if possible. + if (!IsCompiled(shared_info) && + IsCompiled(function) && + !function->unchecked_code()->IsMarked()) { + function->set_code(shared_info->unchecked_code()); + } + return; + } + + // Code is either on stack or in compilation cache. + if (shared_info->unchecked_code()->IsMarked()) { + shared_info->set_code_age(0); + return; + } + + // The function must be compiled and have the source code available, + // to be able to recompile it in case we need the function again. + if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return; + + // We never flush code for Api functions. + Object* function_data = shared_info->function_data(); + if (function_data->IsHeapObject() && + (SafeMap(function_data)->instance_type() == + FUNCTION_TEMPLATE_INFO_TYPE)) { + return; + } + + // Only flush code for functions. + if (shared_info->code()->kind() != Code::FUNCTION) return; + + // Function must be lazy compilable. + if (!shared_info->allows_lazy_compilation()) return; + + // If this is a full script wrapped in a function we do no flush the code. + if (shared_info->is_toplevel()) return; + + // Age this shared function info. + if (shared_info->code_age() < kCodeAgeThreshold) { + shared_info->set_code_age(shared_info->code_age() + 1); + return; + } + + // Compute the lazy compilable version of the code. + Code* code = Builtins::builtin(Builtins::LazyCompile); + shared_info->set_code(code); + function->set_code(code); + } + + + static inline Map* SafeMap(Object* obj) { + MapWord map_word = HeapObject::cast(obj)->map_word(); + map_word.ClearMark(); + map_word.ClearOverflow(); + return map_word.ToMap(); + } + + + static inline bool IsJSBuiltinsObject(Object* obj) { + return obj->IsHeapObject() && + (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE); + } + + + static inline bool IsValidNotBuiltinContext(Object* ctx) { + if (!ctx->IsHeapObject()) return false; + + Map* map = SafeMap(ctx); + if (!(map == Heap::raw_unchecked_context_map() || + map == Heap::raw_unchecked_catch_context_map() || + map == Heap::raw_unchecked_global_context_map())) { + return false; + } + + Context* context = reinterpret_cast(ctx); + + if (IsJSBuiltinsObject(context->global())) { + return false; + } + + return true; + } + + + static void VisitCodeEntry(Address entry_address) { + Object* code = Code::GetObjectFromEntryAddress(entry_address); + Object* old_code = code; + VisitPointer(&code); + if (code != old_code) { + Memory::Address_at(entry_address) = + reinterpret_cast(code)->entry(); + } + } + + + static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { + JSFunction* jsfunction = reinterpret_cast(object); + // The function must have a valid context and not be a builtin. + if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { + FlushCodeForFunction(jsfunction); + } + VisitJSFunction(map, object); + } + + + static void VisitJSFunction(Map* map, HeapObject* object) { +#define SLOT_ADDR(obj, offset) \ + reinterpret_cast((obj)->address() + offset) + + VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset), + SLOT_ADDR(object, JSFunction::kCodeEntryOffset)); + + VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset); + + VisitPointers(SLOT_ADDR(object, + JSFunction::kCodeEntryOffset + kPointerSize), + SLOT_ADDR(object, JSFunction::kSize)); +#undef SLOT_ADDR + } + + typedef void (*Callback)(Map* map, HeapObject* object); static VisitorDispatchTable table_; @@ -435,6 +604,66 @@ class MarkingVisitor : public ObjectVisitor { }; +class CodeMarkingVisitor : public ThreadVisitor { + public: + void VisitThread(ThreadLocalTop* top) { + for (StackFrameIterator it(top); !it.done(); it.Advance()) { + MarkCompactCollector::MarkObject(it.frame()->unchecked_code()); + } + } +}; + + +class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { + public: + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) VisitPointer(p); + } + + void VisitPointer(Object** slot) { + Object* obj = *slot; + if (obj->IsHeapObject()) { + MarkCompactCollector::MarkObject(HeapObject::cast(obj)); + } + } +}; + + +void MarkCompactCollector::PrepareForCodeFlushing() { + if (!FLAG_flush_code) { + StaticMarkingVisitor::EnableCodeFlushing(false); + return; + } + +#ifdef ENABLE_DEBUGGER_SUPPORT + if (Debug::IsLoaded() || Debug::has_break_points()) { + StaticMarkingVisitor::EnableCodeFlushing(false); + return; + } +#endif + StaticMarkingVisitor::EnableCodeFlushing(true); + + // Ensure that empty descriptor array is marked. Method MarkDescriptorArray + // relies on it being marked before any other descriptor array. + MarkObject(Heap::raw_unchecked_empty_descriptor_array()); + + // Make sure we are not referencing the code from the stack. + for (StackFrameIterator it; !it.done(); it.Advance()) { + MarkObject(it.frame()->unchecked_code()); + } + + // Iterate the archived stacks in all threads to check if + // the code is referenced. + CodeMarkingVisitor code_marking_visitor; + ThreadManager::IterateArchivedThreads(&code_marking_visitor); + + SharedFunctionInfoMarkingVisitor visitor; + CompilationCache::IterateFunctions(&visitor); + + ProcessMarkingStack(); +} + + // Visitor class for marking heap roots. class RootMarkingVisitor : public ObjectVisitor { public: @@ -793,6 +1022,8 @@ void MarkCompactCollector::MarkLiveObjects() { ASSERT(!marking_stack.overflowed()); + PrepareForCodeFlushing(); + RootMarkingVisitor root_visitor; MarkRoots(&root_visitor); @@ -962,8 +1193,6 @@ void MarkCompactCollector::ClearNonLiveTransitions() { // pair of distinguished invalid map encodings (for single word and multiple // words) to indicate free regions in the page found during computation of // forwarding addresses and skipped over in subsequent sweeps. -static const uint32_t kSingleFreeEncoding = 0; -static const uint32_t kMultiFreeEncoding = 1; // Encode a free region, defined by the given start address and size, in the @@ -971,10 +1200,10 @@ static const uint32_t kMultiFreeEncoding = 1; void EncodeFreeRegion(Address free_start, int free_size) { ASSERT(free_size >= kIntSize); if (free_size == kIntSize) { - Memory::uint32_at(free_start) = kSingleFreeEncoding; + Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding; } else { ASSERT(free_size >= 2 * kIntSize); - Memory::uint32_at(free_start) = kMultiFreeEncoding; + Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding; Memory::int_at(free_start + kIntSize) = free_size; } @@ -1404,7 +1633,7 @@ static void SweepNewSpace(NewSpace* space) { } -static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { +static void SweepSpace(PagedSpace* space) { PageIterator it(space, PageIterator::PAGES_IN_USE); // During sweeping of paged space we are trying to find longest sequences @@ -1445,10 +1674,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { MarkCompactCollector::tracer()->decrement_marked_count(); if (!is_previous_alive) { // Transition from free to live. - dealloc(free_start, - static_cast(current - free_start), - true, - false); + space->DeallocateBlock(free_start, + static_cast(current - free_start), + true); is_previous_alive = true; } } else { @@ -1478,7 +1706,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { // without putting anything into free list. int size_in_bytes = static_cast(p->AllocationTop() - free_start); if (size_in_bytes > 0) { - dealloc(free_start, size_in_bytes, false, true); + space->DeallocateBlock(free_start, size_in_bytes, false); } } } else { @@ -1494,7 +1722,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { if (last_free_size > 0) { Page::FromAddress(last_free_start)-> SetAllocationWatermark(last_free_start); - dealloc(last_free_start, last_free_size, true, true); + space->DeallocateBlock(last_free_start, last_free_size, true); last_free_start = NULL; last_free_size = 0; } @@ -1525,7 +1753,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { // There was a free ending area on the previous page. // Deallocate it without putting it into freelist and move allocation // top to the beginning of this free area. - dealloc(last_free_start, last_free_size, false, true); + space->DeallocateBlock(last_free_start, last_free_size, false); new_allocation_top = last_free_start; } @@ -1546,61 +1774,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { } -void MarkCompactCollector::DeallocateOldPointerBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page) { - Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); -} - - -void MarkCompactCollector::DeallocateOldDataBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page) { - Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); -} - - -void MarkCompactCollector::DeallocateCodeBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page) { - Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); -} - - -void MarkCompactCollector::DeallocateMapBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page) { - // Objects in map space are assumed to have size Map::kSize and a - // valid map in their first word. Thus, we break the free block up into - // chunks and free them separately. - ASSERT(size_in_bytes % Map::kSize == 0); - Address end = start + size_in_bytes; - for (Address a = start; a < end; a += Map::kSize) { - Heap::map_space()->Free(a, add_to_freelist); - } -} - - -void MarkCompactCollector::DeallocateCellBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page) { - // Free-list elements in cell space are assumed to have a fixed size. - // We break the free block into chunks and add them to the free list - // individually. - int size = Heap::cell_space()->object_size_in_bytes(); - ASSERT(size_in_bytes % size == 0); - Address end = start + size_in_bytes; - for (Address a = start; a < end; a += size) { - Heap::cell_space()->Free(a, add_to_freelist); - } -} - - void MarkCompactCollector::EncodeForwardingAddresses() { ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); // Objects in the active semispace of the young generation may be @@ -1865,14 +2038,14 @@ void MarkCompactCollector::SweepSpaces() { // the map space last because freeing non-live maps overwrites them and // the other spaces rely on possibly non-live maps to get the sizes for // non-live objects. - SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); - SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); - SweepSpace(Heap::code_space(), &DeallocateCodeBlock); - SweepSpace(Heap::cell_space(), &DeallocateCellBlock); + SweepSpace(Heap::old_pointer_space()); + SweepSpace(Heap::old_data_space()); + SweepSpace(Heap::code_space()); + SweepSpace(Heap::cell_space()); { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); SweepNewSpace(Heap::new_space()); } - SweepSpace(Heap::map_space(), &DeallocateMapBlock); + SweepSpace(Heap::map_space()); Heap::IterateDirtyRegions(Heap::map_space(), &Heap::IteratePointersInDirtyMapsRegion, diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index ad635867c3..72a6fa3b62 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -36,15 +36,6 @@ namespace internal { // to the first live object in the page (only used for old and map objects). typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); -// Callback function for non-live blocks in the old generation. -// If add_to_freelist is false then just accounting stats are updated and -// no attempt to add area to free list is made. -typedef void (*DeallocateFunction)(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - - // Forward declarations. class RootMarkingVisitor; class MarkingVisitor; @@ -121,11 +112,17 @@ class MarkCompactCollector: public AllStatic { #ifdef DEBUG // Checks whether performing mark-compact collection. static bool in_use() { return state_ > PREPARE_GC; } + static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } #endif // Determine type of object and emit deletion log event. static void ReportDeleteIfNeeded(HeapObject* obj); + // Distinguishable invalid map encodings (for single word and multiple words) + // that indicate free regions. + static const uint32_t kSingleFreeEncoding = 0; + static const uint32_t kMultiFreeEncoding = 1; + private: #ifdef DEBUG enum CollectorState { @@ -175,6 +172,10 @@ class MarkCompactCollector: public AllStatic { friend class RootMarkingVisitor; friend class MarkingVisitor; friend class StaticMarkingVisitor; + friend class CodeMarkingVisitor; + friend class SharedFunctionInfoMarkingVisitor; + + static void PrepareForCodeFlushing(); // Marking operations for objects reachable from roots. static void MarkLiveObjects(); @@ -319,33 +320,6 @@ class MarkCompactCollector: public AllStatic { static int IterateLiveObjectsInRange(Address start, Address end, HeapObjectCallback size_func); - // Callback functions for deallocating non-live blocks in the old - // generation. - static void DeallocateOldPointerBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - - static void DeallocateOldDataBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - - static void DeallocateCodeBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - - static void DeallocateMapBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - - static void DeallocateCellBlock(Address start, - int size_in_bytes, - bool add_to_freelist, - bool last_on_page); - // If we are not compacting the heap, we simply sweep the spaces except // for the large object space, clearing mark bits and adding unmarked // regions to each space's free list. diff --git a/deps/v8/src/memory.h b/deps/v8/src/memory.h index 503492a4b5..27f32f7a2e 100644 --- a/deps/v8/src/memory.h +++ b/deps/v8/src/memory.h @@ -36,6 +36,10 @@ namespace internal { class Memory { public: + static uint8_t& uint8_at(Address addr) { + return *reinterpret_cast(addr); + } + static uint16_t& uint16_at(Address addr) { return *reinterpret_cast(addr); } diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 0375e8a173..f26c3b501d 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -831,11 +831,11 @@ function FormatSourcePosition(frame) { } var line = ""; var functionName = frame.getFunction().name; - var methodName = frame.getMethodName(); var addPrefix = true; var isConstructor = frame.isConstructor(); var isMethodCall = !(frame.isToplevel() || isConstructor); if (isMethodCall) { + var methodName = frame.getMethodName(); line += frame.getTypeName() + "."; if (functionName) { line += functionName; diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 26fea25153..95329389e1 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -94,7 +94,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // args // Clear the context before we push it when entering the JS frame. - __ li(cp, Operand(0)); + __ li(cp, Operand(0, RelocInfo::NONE)); // Enter an internal frame. __ EnterInternalFrame(); diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index 3ad94e86d3..75e7a293f9 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -309,9 +309,6 @@ class CodeGenerator: public AstVisitor { static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle name); bool CheckForInlineRuntimeCall(CallRuntime* node); - static bool PatchInlineRuntimeEntry(Handle name, - const InlineRuntimeLUT& new_entry, - InlineRuntimeLUT* old_entry); static Handle ComputeLazyCompile(int argc); void ProcessDeclarations(ZoneList* declarations); diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index d340e4b598..6d49d7503c 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -30,6 +30,7 @@ #include "disassembler.h" #include "disasm.h" #include "jsregexp.h" +#include "objects-visiting.h" namespace v8 { namespace internal { @@ -540,7 +541,8 @@ void JSObject::JSObjectVerify() { map()->NextFreePropertyIndex())); } ASSERT(map()->has_fast_elements() == - (elements()->map() == Heap::fixed_array_map())); + (elements()->map() == Heap::fixed_array_map() || + elements()->map() == Heap::fixed_cow_array_map())); ASSERT(map()->has_fast_elements() == HasFastElements()); } @@ -639,13 +641,25 @@ void Map::MapPrint() { void Map::MapVerify() { ASSERT(!Heap::InNewSpace(this)); ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE); - ASSERT(kPointerSize <= instance_size() - && instance_size() < Heap::Capacity()); + ASSERT(instance_size() == kVariableSizeSentinel || + (kPointerSize <= instance_size() && + instance_size() < Heap::Capacity())); VerifyHeapPointer(prototype()); VerifyHeapPointer(instance_descriptors()); } +void Map::NormalizedMapVerify() { + MapVerify(); + ASSERT_EQ(Heap::empty_descriptor_array(), instance_descriptors()); + ASSERT_EQ(Heap::empty_fixed_array(), code_cache()); + ASSERT_EQ(0, pre_allocated_property_fields()); + ASSERT_EQ(0, unused_property_fields()); + ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()), + visitor_id()); +} + + void CodeCache::CodeCachePrint() { HeapObject::PrintHeader("CodeCache"); PrintF("\n - default_cache: "); @@ -1361,6 +1375,21 @@ void JSFunctionResultCache::JSFunctionResultCacheVerify() { } +void NormalizedMapCache::NormalizedMapCacheVerify() { + FixedArray::cast(this)->Verify(); + if (FLAG_enable_slow_asserts) { + for (int i = 0; i < length(); i++) { + Object* e = get(i); + if (e->IsMap()) { + Map::cast(e)->NormalizedMapVerify(); + } else { + ASSERT(e->IsUndefined()); + } + } + } +} + + #endif // DEBUG } } // namespace v8::internal diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 5e8022e51f..bac224f4e9 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -38,7 +38,10 @@ #include "objects.h" #include "contexts.h" #include "conversions-inl.h" +#include "heap.h" +#include "memory.h" #include "property.h" +#include "spaces.h" namespace v8 { namespace internal { @@ -574,6 +577,18 @@ bool Object::IsJSFunctionResultCache() { } +bool Object::IsNormalizedMapCache() { + if (!IsFixedArray()) return false; + if (FixedArray::cast(this)->length() != NormalizedMapCache::kEntries) { + return false; + } +#ifdef DEBUG + reinterpret_cast(this)->NormalizedMapCacheVerify(); +#endif + return true; +} + + bool Object::IsCompilationCacheTable() { return IsHashTable(); } @@ -1167,7 +1182,8 @@ HeapObject* JSObject::elements() { void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) { ASSERT(map()->has_fast_elements() == - (value->map() == Heap::fixed_array_map())); + (value->map() == Heap::fixed_array_map() || + value->map() == Heap::fixed_cow_array_map())); // In the assert below Dictionary is covered under FixedArray. ASSERT(value->IsFixedArray() || value->IsPixelArray() || value->IsExternalArray()); @@ -1397,6 +1413,7 @@ Object* FixedArray::get(int index) { void FixedArray::set(int index, Smi* value) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(reinterpret_cast(value)->IsSmi()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); @@ -1404,6 +1421,7 @@ void FixedArray::set(int index, Smi* value) { void FixedArray::set(int index, Object* value) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); @@ -1420,6 +1438,7 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) { void FixedArray::set(int index, Object* value, WriteBarrierMode mode) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(this, offset, value); @@ -1428,6 +1447,7 @@ void FixedArray::set(int index, void FixedArray::fast_set(FixedArray* array, int index, Object* value) { + ASSERT(array->map() != Heap::raw_unchecked_fixed_cow_array_map()); ASSERT(index >= 0 && index < array->length()); ASSERT(!Heap::InNewSpace(value)); WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); @@ -1435,6 +1455,7 @@ void FixedArray::fast_set(FixedArray* array, int index, Object* value) { void FixedArray::set_undefined(int index) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(!Heap::InNewSpace(Heap::undefined_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, @@ -1443,6 +1464,7 @@ void FixedArray::set_undefined(int index) { void FixedArray::set_null(int index) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(!Heap::InNewSpace(Heap::null_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value()); @@ -1450,12 +1472,27 @@ void FixedArray::set_null(int index) { void FixedArray::set_the_hole(int index) { + ASSERT(map() != Heap::fixed_cow_array_map()); ASSERT(index >= 0 && index < this->length()); ASSERT(!Heap::InNewSpace(Heap::the_hole_value())); WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::the_hole_value()); } +void FixedArray::set_unchecked(int index, Smi* value) { + ASSERT(reinterpret_cast(value)->IsSmi()); + int offset = kHeaderSize + index * kPointerSize; + WRITE_FIELD(this, offset, value); +} + + +void FixedArray::set_null_unchecked(int index) { + ASSERT(index >= 0 && index < this->length()); + ASSERT(!Heap::InNewSpace(Heap::null_value())); + WRITE_FIELD(this, kHeaderSize + index * kPointerSize, Heap::null_value()); +} + + Object** FixedArray::data_start() { return HeapObject::RawField(this, kHeaderSize); } @@ -1637,6 +1674,7 @@ CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(SymbolTable) CAST_ACCESSOR(JSFunctionResultCache) +CAST_ACCESSOR(NormalizedMapCache) CAST_ACCESSOR(CompilationCacheTable) CAST_ACCESSOR(CodeCacheHashTable) CAST_ACCESSOR(MapCache) @@ -2071,7 +2109,16 @@ void ExternalFloatArray::set(int index, float value) { } -INT_ACCESSORS(Map, visitor_id, kScavengerCallbackOffset) +int Map::visitor_id() { + return READ_BYTE_FIELD(this, kVisitorIdOffset); +} + + +void Map::set_visitor_id(int id) { + ASSERT(0 <= id && id < 256); + WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast(id)); +} + int Map::instance_size() { return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; @@ -2089,20 +2136,28 @@ int Map::pre_allocated_property_fields() { int HeapObject::SizeFromMap(Map* map) { - InstanceType instance_type = map->instance_type(); + int instance_size = map->instance_size(); + if (instance_size != kVariableSizeSentinel) return instance_size; + // We can ignore the "symbol" bit becase it is only set for symbols + // and implies a string type. + int instance_type = static_cast(map->instance_type()) & ~kIsSymbolMask; // Only inline the most frequent cases. - if (instance_type == JS_OBJECT_TYPE || - (instance_type & (kIsNotStringMask | kStringRepresentationMask)) == - (kStringTag | kConsStringTag) || - instance_type == JS_ARRAY_TYPE) return map->instance_size(); if (instance_type == FIXED_ARRAY_TYPE) { return FixedArray::BodyDescriptor::SizeOf(map, this); } + if (instance_type == ASCII_STRING_TYPE) { + return SeqAsciiString::SizeFor( + reinterpret_cast(this)->length()); + } if (instance_type == BYTE_ARRAY_TYPE) { return reinterpret_cast(this)->ByteArraySize(); } - // Otherwise do the general size computation. - return SlowSizeFromMap(map); + if (instance_type == STRING_TYPE) { + return SeqTwoByteString::SizeFor( + reinterpret_cast(this)->length()); + } + ASSERT(instance_type == CODE_TYPE); + return reinterpret_cast(this)->CodeSize(); } @@ -2275,14 +2330,13 @@ int Code::arguments_count() { } -CodeStub::Major Code::major_key() { +int Code::major_key() { ASSERT(kind() == STUB || kind() == BINARY_OP_IC); - return static_cast(READ_BYTE_FIELD(this, - kStubMajorKeyOffset)); + return READ_BYTE_FIELD(this, kStubMajorKeyOffset); } -void Code::set_major_key(CodeStub::Major major) { +void Code::set_major_key(int major) { ASSERT(kind() == STUB || kind() == BINARY_OP_IC); ASSERT(0 <= major && major < 256); WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major); @@ -2380,6 +2434,12 @@ Code* Code::GetCodeFromTargetAddress(Address address) { } +Object* Code::GetObjectFromEntryAddress(Address location_of_address) { + return HeapObject:: + FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize); +} + + Object* Map::prototype() { return READ_FIELD(this, kPrototypeOffset); } @@ -2398,6 +2458,7 @@ Object* Map::GetFastElementsMap() { if (obj->IsFailure()) return obj; Map* new_map = Map::cast(obj); new_map->set_has_fast_elements(true); + Counters::map_slow_to_fast_elements.Increment(); return new_map; } @@ -2408,6 +2469,7 @@ Object* Map::GetSlowElementsMap() { if (obj->IsFailure()) return obj; Map* new_map = Map::cast(obj); new_map->set_has_fast_elements(false); + Counters::map_fast_to_slow_elements.Increment(); return new_map; } @@ -2539,6 +2601,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, allows_lazy_compilation, kAllowLazyCompilation) + #if V8_HOST_ARCH_32_BIT SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset) SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count, @@ -2638,6 +2701,11 @@ Code* SharedFunctionInfo::code() { } +Code* SharedFunctionInfo::unchecked_code() { + return reinterpret_cast(READ_FIELD(this, kCodeOffset)); +} + + void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { WRITE_FIELD(this, kCodeOffset, value); CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode); @@ -2684,20 +2752,38 @@ int SharedFunctionInfo::custom_call_generator_id() { } +int SharedFunctionInfo::code_age() { + return (compiler_hints() >> kCodeAgeShift) & kCodeAgeMask; +} + + +void SharedFunctionInfo::set_code_age(int code_age) { + set_compiler_hints(compiler_hints() | + ((code_age & kCodeAgeMask) << kCodeAgeShift)); +} + + bool JSFunction::IsBuiltin() { return context()->global()->IsJSBuiltinsObject(); } Code* JSFunction::code() { - return Code::cast(READ_FIELD(this, kCodeOffset)); + return Code::cast(unchecked_code()); +} + + +Code* JSFunction::unchecked_code() { + return reinterpret_cast( + Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset))); } void JSFunction::set_code(Code* value) { // Skip the write barrier because code is never in new space. ASSERT(!Heap::InNewSpace(value)); - WRITE_FIELD(this, kCodeOffset, value); + Address entry = value->entry(); + WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast(entry)); } @@ -2711,6 +2797,12 @@ Object* JSFunction::unchecked_context() { } +SharedFunctionInfo* JSFunction::unchecked_shared() { + return reinterpret_cast( + READ_FIELD(this, kSharedFunctionInfoOffset)); +} + + void JSFunction::set_context(Object* value) { ASSERT(value == Heap::undefined_value() || value->IsContext()); WRITE_FIELD(this, kContextOffset, value); @@ -2867,7 +2959,7 @@ byte* Code::entry() { bool Code::contains(byte* pc) { return (instruction_start() <= pc) && - (pc < instruction_start() + instruction_size()); + (pc <= instruction_start() + instruction_size()); } @@ -2928,18 +3020,18 @@ void JSRegExp::SetDataAt(int index, Object* value) { JSObject::ElementsKind JSObject::GetElementsKind() { + if (map()->has_fast_elements()) { + ASSERT(elements()->map() == Heap::fixed_array_map() || + elements()->map() == Heap::fixed_cow_array_map()); + return FAST_ELEMENTS; + } HeapObject* array = elements(); if (array->IsFixedArray()) { - // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray. - if (array->map() == Heap::fixed_array_map()) { - ASSERT(map()->has_fast_elements()); - return FAST_ELEMENTS; - } + // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a + // FixedArray, but FAST_ELEMENTS is already handled above. ASSERT(array->IsDictionary()); - ASSERT(!map()->has_fast_elements()); return DICTIONARY_ELEMENTS; } - ASSERT(!map()->has_fast_elements()); if (array->IsExternalArray()) { switch (array->map()->instance_type()) { case EXTERNAL_BYTE_ARRAY_TYPE: @@ -3042,6 +3134,19 @@ bool JSObject::AllowsSetElementsLength() { } +Object* JSObject::EnsureWritableFastElements() { + ASSERT(HasFastElements()); + FixedArray* elems = FixedArray::cast(elements()); + if (elems->map() != Heap::fixed_cow_array_map()) return elems; + Object* writable_elems = Heap::CopyFixedArray(elems); + if (writable_elems->IsFailure()) return writable_elems; + FixedArray::cast(writable_elems)->set_map(Heap::fixed_array_map()); + set_elements(FixedArray::cast(writable_elems)); + Counters::cow_arrays_converted.Increment(); + return writable_elems; +} + + StringDictionary* JSObject::property_dictionary() { ASSERT(!HasFastProperties()); return StringDictionary::cast(properties()); diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc index 293c9bf8de..c35e02cc9c 100644 --- a/deps/v8/src/objects-visiting.cc +++ b/deps/v8/src/objects-visiting.cc @@ -101,7 +101,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( case JS_VALUE_TYPE: case JS_ARRAY_TYPE: case JS_REGEXP_TYPE: - case JS_FUNCTION_TYPE: case JS_GLOBAL_PROXY_TYPE: case JS_GLOBAL_OBJECT_TYPE: case JS_BUILTINS_OBJECT_TYPE: @@ -109,6 +108,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( kVisitJSObjectGeneric, instance_size); + case JS_FUNCTION_TYPE: + return kVisitJSFunction; + case HEAP_NUMBER_TYPE: case PIXEL_ARRAY_TYPE: case EXTERNAL_BYTE_ARRAY_TYPE: diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h index 6280bac4ed..a6d6b12c69 100644 --- a/deps/v8/src/objects-visiting.h +++ b/deps/v8/src/objects-visiting.h @@ -100,11 +100,15 @@ class StaticVisitorBase : public AllStatic { kVisitMap, kVisitPropertyCell, kVisitSharedFunctionInfo, + kVisitJSFunction, kVisitorIdCount, kMinObjectSizeInWords = 2 }; + // Visitor ID should fit in one byte. + STATIC_ASSERT(kVisitorIdCount <= 256); + // Determine which specialized visitor should be used for given instance type // and instance type. static VisitorId GetVisitorId(int instance_type, int instance_size); @@ -198,13 +202,16 @@ class FlexibleBodyVisitor : public BodyVisitorBase { public: static inline ReturnType Visit(Map* map, HeapObject* object) { int object_size = BodyDescriptor::SizeOf(map, object); - IteratePointers(object, BodyDescriptor::kStartOffset, object_size); + BodyVisitorBase::IteratePointers( + object, BodyDescriptor::kStartOffset, object_size); return static_cast(object_size); } template static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) { - IteratePointers(object, BodyDescriptor::kStartOffset, object_size); + ASSERT(BodyDescriptor::SizeOf(map, object) == object_size); + BodyVisitorBase::IteratePointers( + object, BodyDescriptor::kStartOffset, object_size); return static_cast(object_size); } }; @@ -214,9 +221,8 @@ template class FixedBodyVisitor : public BodyVisitorBase { public: static inline ReturnType Visit(Map* map, HeapObject* object) { - IteratePointers(object, - BodyDescriptor::kStartOffset, - BodyDescriptor::kEndOffset); + BodyVisitorBase::IteratePointers( + object, BodyDescriptor::kStartOffset, BodyDescriptor::kEndOffset); return static_cast(BodyDescriptor::kSize); } }; @@ -268,6 +274,10 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); + table_.Register(kVisitJSFunction, + &JSObjectVisitor:: + template VisitSpecialized); + table_.RegisterSpecializations(); @@ -275,8 +285,8 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { kVisitJSObject, kVisitJSObjectGeneric>(); table_.RegisterSpecializations(); + kVisitStruct, + kVisitStructGeneric>(); } static inline int IterateBody(Map* map, HeapObject* obj) { diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 5687a3a53f..ef5185145a 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -54,7 +54,8 @@ const int kGetterIndex = 0; const int kSetterIndex = 1; -static Object* CreateJSValue(JSFunction* constructor, Object* value) { +MUST_USE_RESULT static Object* CreateJSValue(JSFunction* constructor, + Object* value) { Object* result = Heap::AllocateJSObject(constructor); if (result->IsFailure()) return result; JSValue::cast(result)->set_value(value); @@ -1024,38 +1025,6 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { } -int HeapObject::SlowSizeFromMap(Map* map) { - // Avoid calling functions such as FixedArray::cast during GC, which - // read map pointer of this object again. - InstanceType instance_type = map->instance_type(); - uint32_t type = static_cast(instance_type); - - if (instance_type < FIRST_NONSTRING_TYPE - && (StringShape(instance_type).IsSequential())) { - if ((type & kStringEncodingMask) == kAsciiStringTag) { - SeqAsciiString* seq_ascii_this = reinterpret_cast(this); - return seq_ascii_this->SeqAsciiStringSize(instance_type); - } else { - SeqTwoByteString* self = reinterpret_cast(this); - return self->SeqTwoByteStringSize(instance_type); - } - } - - switch (instance_type) { - case FIXED_ARRAY_TYPE: - return FixedArray::BodyDescriptor::SizeOf(map, this); - case BYTE_ARRAY_TYPE: - return reinterpret_cast(this)->ByteArraySize(); - case CODE_TYPE: - return reinterpret_cast(this)->CodeSize(); - case MAP_TYPE: - return Map::kSize; - default: - return map->instance_size(); - } -} - - void HeapObject::Iterate(ObjectVisitor* v) { // Handle header IteratePointer(v, kMapOffset); @@ -1098,12 +1067,15 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case JS_VALUE_TYPE: case JS_ARRAY_TYPE: case JS_REGEXP_TYPE: - case JS_FUNCTION_TYPE: case JS_GLOBAL_PROXY_TYPE: case JS_GLOBAL_OBJECT_TYPE: case JS_BUILTINS_OBJECT_TYPE: JSObject::BodyDescriptor::IterateBody(this, object_size, v); break; + case JS_FUNCTION_TYPE: + reinterpret_cast(this) + ->JSFunctionIterateBody(object_size, v); + break; case ODDBALL_TYPE: Oddball::BodyDescriptor::IterateBody(this, v); break; @@ -1148,11 +1120,6 @@ void HeapObject::IterateBody(InstanceType type, int object_size, } -void HeapObject::IterateStructBody(int object_size, ObjectVisitor* v) { - IteratePointers(v, HeapObject::kHeaderSize, object_size); -} - - Object* HeapNumber::HeapNumberToBoolean() { // NaN, +0, and -0 should return the false object #if __BYTE_ORDER == __LITTLE_ENDIAN @@ -2132,6 +2099,121 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) { } +bool NormalizedMapCache::IsCacheable(JSObject* object) { + // Caching for global objects is not worth it (there are too few of them). + return !object->IsGlobalObject(); +} + + +Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) { + Object* result; + + Map* fast = obj->map(); + if (!IsCacheable(obj)) { + result = fast->CopyNormalized(mode); + if (result->IsFailure()) return result; + } else { + int index = Hash(fast) % kEntries; + result = get(index); + + if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) { +#ifdef DEBUG + if (FLAG_enable_slow_asserts) { + // Make sure that the new slow map has exactly the same hash as the + // original fast map. This way we can use hash to check if a slow map + // is already in the hash (see Contains method). + ASSERT(Hash(fast) == Hash(Map::cast(result))); + // The cached map should match newly created normalized map bit-by-bit. + Object* fresh = fast->CopyNormalized(mode); + if (!fresh->IsFailure()) { + ASSERT(memcmp(Map::cast(fresh)->address(), + Map::cast(result)->address(), + Map::kSize) == 0); + } + } +#endif + return result; + } + + result = fast->CopyNormalized(mode); + if (result->IsFailure()) return result; + set(index, result); + } + Counters::normalized_maps.Increment(); + + return result; +} + + +bool NormalizedMapCache::Contains(Map* map) { + // If the map is present in the cache it can only be at one place: + // at the index calculated from the hash. We assume that a slow map has the + // same hash as a fast map it has been generated from. + int index = Hash(map) % kEntries; + return get(index) == map; +} + + +void NormalizedMapCache::Clear() { + int entries = length(); + for (int i = 0; i != entries; i++) { + set_undefined(i); + } +} + + +int NormalizedMapCache::Hash(Map* fast) { + // For performance reasons we only hash the 3 most variable fields of a map: + // constructor, prototype and bit_field2. + + // Shift away the tag. + int hash = (static_cast( + reinterpret_cast(fast->constructor())) >> 2); + + // XOR-ing the prototype and constructor directly yields too many zero bits + // when the two pointers are close (which is fairly common). + // To avoid this we shift the prototype 4 bits relatively to the constructor. + hash ^= (static_cast( + reinterpret_cast(fast->prototype())) << 2); + + return hash ^ (hash >> 16) ^ fast->bit_field2(); +} + + +bool NormalizedMapCache::CheckHit(Map* slow, + Map* fast, + PropertyNormalizationMode mode) { +#ifdef DEBUG + slow->NormalizedMapVerify(); +#endif + return + slow->constructor() == fast->constructor() && + slow->prototype() == fast->prototype() && + slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ? + 0 : + fast->inobject_properties()) && + slow->instance_type() == fast->instance_type() && + slow->bit_field() == fast->bit_field() && + slow->bit_field2() == fast->bit_field2(); +} + + +Object* JSObject::UpdateMapCodeCache(String* name, Code* code) { + if (!HasFastProperties() && + NormalizedMapCache::IsCacheable(this) && + Top::context()->global_context()->normalized_map_cache()-> + Contains(map())) { + // Replace the map with the identical copy that can be safely modified. + Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES); + if (obj->IsFailure()) return obj; + Counters::normalized_maps.Increment(); + + set_map(Map::cast(obj)); + } + return map()->UpdateCodeCache(name, code); +} + + Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode, int expected_additional_properties) { if (!HasFastProperties()) return this; @@ -2196,28 +2278,22 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode, int index = map()->instance_descriptors()->NextEnumerationIndex(); dictionary->SetNextEnumerationIndex(index); - // Allocate new map. - obj = map()->CopyDropDescriptors(); + obj = Top::context()->global_context()-> + normalized_map_cache()->Get(this, mode); if (obj->IsFailure()) return obj; Map* new_map = Map::cast(obj); - // Clear inobject properties if needed by adjusting the instance size and - // putting in a filler object instead of the inobject properties. - if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) { - int instance_size_delta = map()->inobject_properties() * kPointerSize; - int new_instance_size = map()->instance_size() - instance_size_delta; - new_map->set_inobject_properties(0); - new_map->set_instance_size(new_instance_size); - new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map)); - Heap::CreateFillerObjectAt(this->address() + new_instance_size, - instance_size_delta); - } - new_map->set_unused_property_fields(0); - // We have now successfully allocated all the necessary objects. // Changes can now be made with the guarantee that all of them take effect. + + // Resize the object in the heap if necessary. + int new_instance_size = new_map->instance_size(); + int instance_size_delta = map()->instance_size() - new_instance_size; + ASSERT(instance_size_delta >= 0); + Heap::CreateFillerObjectAt(this->address() + new_instance_size, + instance_size_delta); + set_map(new_map); - map()->set_instance_descriptors(Heap::empty_descriptor_array()); set_properties(dictionary); @@ -2338,6 +2414,8 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index, ASSERT(!HasPixelElements() && !HasExternalArrayElements()); switch (GetElementsKind()) { case FAST_ELEMENTS: { + Object* obj = EnsureWritableFastElements(); + if (obj->IsFailure()) return obj; uint32_t length = IsJSArray() ? static_cast(Smi::cast(JSArray::cast(this)->length())->value()) : static_cast(FixedArray::cast(elements())->length()); @@ -2418,6 +2496,8 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { switch (GetElementsKind()) { case FAST_ELEMENTS: { + Object* obj = EnsureWritableFastElements(); + if (obj->IsFailure()) return obj; uint32_t length = IsJSArray() ? static_cast(Smi::cast(JSArray::cast(this)->length())->value()) : static_cast(FixedArray::cast(elements())->length()); @@ -2601,7 +2681,8 @@ bool JSObject::ReferencesObject(Object* obj) { Object* JSObject::PreventExtensions() { // If there are fast elements we normalize. if (HasFastElements()) { - NormalizeElements(); + Object* ok = NormalizeElements(); + if (ok->IsFailure()) return ok; } // Make sure that we never go back to fast case. element_dictionary()->set_requires_slow_elements(); @@ -3113,6 +3194,33 @@ Object* Map::CopyDropDescriptors() { } +Object* Map::CopyNormalized(PropertyNormalizationMode mode) { + int new_instance_size = instance_size(); + if (mode == CLEAR_INOBJECT_PROPERTIES) { + new_instance_size -= inobject_properties() * kPointerSize; + } + + Object* result = Heap::AllocateMap(instance_type(), new_instance_size); + if (result->IsFailure()) return result; + + if (mode != CLEAR_INOBJECT_PROPERTIES) { + Map::cast(result)->set_inobject_properties(inobject_properties()); + } + + Map::cast(result)->set_prototype(prototype()); + Map::cast(result)->set_constructor(constructor()); + + Map::cast(result)->set_bit_field(bit_field()); + Map::cast(result)->set_bit_field2(bit_field2()); + +#ifdef DEBUG + Map::cast(result)->NormalizedMapVerify(); +#endif + + return result; +} + + Object* Map::CopyDropTransitions() { Object* new_map = CopyDropDescriptors(); if (new_map->IsFailure()) return new_map; @@ -4880,24 +4988,21 @@ bool String::SlowAsArrayIndex(uint32_t* index) { } -static inline uint32_t HashField(uint32_t hash, - bool is_array_index, - int length = -1) { - uint32_t result = (hash << String::kHashShift); - if (is_array_index) { - // For array indexes mix the length into the hash as an array index could - // be zero. - ASSERT(length > 0); - ASSERT(length <= String::kMaxArrayIndexSize); - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < - (1 << String::kArrayIndexValueBits)); - ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits)); - result &= ~String::kIsNotArrayIndexMask; - result |= length << String::kArrayIndexHashLengthShift; - } else { - result |= String::kIsNotArrayIndexMask; - } - return result; +uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { + // For array indexes mix the length into the hash as an array index could + // be zero. + ASSERT(length > 0); + ASSERT(length <= String::kMaxArrayIndexSize); + ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << String::kArrayIndexValueBits)); + + value <<= String::kHashShift; + value |= length << String::kArrayIndexHashLengthShift; + + ASSERT((value & String::kIsNotArrayIndexMask) == 0); + ASSERT((length > String::kMaxCachedArrayIndexLength) || + (value & String::kContainsCachedArrayIndexMask) == 0); + return value; } @@ -4905,14 +5010,11 @@ uint32_t StringHasher::GetHashField() { ASSERT(is_valid()); if (length_ <= String::kMaxHashCalcLength) { if (is_array_index()) { - return v8::internal::HashField(array_index(), true, length_); - } else { - return v8::internal::HashField(GetHash(), false); + return MakeArrayIndexHash(array_index(), length_); } - uint32_t payload = v8::internal::HashField(GetHash(), false); - return payload; + return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask; } else { - return v8::internal::HashField(length_, false); + return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask; } } @@ -5009,8 +5111,8 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) { ASSERT(target->IsHeapObject()); if (!target->IsMarked()) { ASSERT(target->IsMap()); - contents->set(i + 1, NullDescriptorDetails); - contents->set_null(i); + contents->set_unchecked(i + 1, NullDescriptorDetails); + contents->set_null_unchecked(i); ASSERT(target->prototype() == this || target->prototype() == real_prototype); // Getter prototype() is read-only, set_prototype() has side effects. @@ -5021,6 +5123,15 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) { } +void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) { + // Iterate over all fields in the body but take care in dealing with + // the code entry. + IteratePointers(v, kPropertiesOffset, kCodeEntryOffset); + v->VisitCodeEntry(this->address() + kCodeEntryOffset); + IteratePointers(v, kCodeEntryOffset + kPointerSize, object_size); +} + + Object* JSFunction::SetInstancePrototype(Object* value) { ASSERT(value->IsJSObject()); @@ -5037,7 +5148,6 @@ Object* JSFunction::SetInstancePrototype(Object* value) { } - Object* JSFunction::SetPrototype(Object* value) { ASSERT(should_have_prototype()); Object* construct_prototype = value; @@ -5095,7 +5205,7 @@ Object* Oddball::Initialize(const char* to_string, Object* to_number) { bool SharedFunctionInfo::HasSourceCode() { return !script()->IsUndefined() && - !Script::cast(script())->source()->IsUndefined(); + !reinterpret_cast(script())->source()->IsUndefined(); } @@ -5265,6 +5375,16 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { } +void ObjectVisitor::VisitCodeEntry(Address entry_address) { + Object* code = Code::GetObjectFromEntryAddress(entry_address); + Object* old_code = code; + VisitPointer(&code); + if (code != old_code) { + Memory::Address_at(entry_address) = reinterpret_cast(code)->entry(); + } +} + + void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) { ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsPatchedReturnSequence()) || @@ -5591,6 +5711,8 @@ Object* JSObject::SetElementsLength(Object* len) { int old_capacity = FixedArray::cast(elements())->length(); if (value <= old_capacity) { if (IsJSArray()) { + Object* obj = EnsureWritableFastElements(); + if (obj->IsFailure()) return obj; int old_length = FastD2I(JSArray::cast(this)->length()->Number()); // NOTE: We may be able to optimize this by removing the // last part of the elements backing storage array and @@ -6051,7 +6173,9 @@ Object* JSObject::SetElementWithCallback(Object* structure, Object* JSObject::SetFastElement(uint32_t index, Object* value) { ASSERT(HasFastElements()); - FixedArray* elms = FixedArray::cast(elements()); + Object* elms_obj = EnsureWritableFastElements(); + if (elms_obj->IsFailure()) return elms_obj; + FixedArray* elms = FixedArray::cast(elms_obj); uint32_t elms_length = static_cast(elms->length()); if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) { @@ -6095,6 +6219,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) { return SetElement(index, value); } + Object* JSObject::SetElement(uint32_t index, Object* value) { // Check access rights if needed. if (IsAccessCheckNeeded() && @@ -7573,6 +7698,9 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) { set_map(new_map); set_elements(fast_elements); + } else { + Object* obj = EnsureWritableFastElements(); + if (obj->IsFailure()) return obj; } ASSERT(HasFastElements()); diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index d2f6d3559b..7f6538cf9c 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -29,7 +29,6 @@ #define V8_OBJECTS_H_ #include "builtins.h" -#include "code-stubs.h" #include "smart-pointer.h" #include "unicode-inl.h" #if V8_TARGET_ARCH_ARM @@ -201,6 +200,10 @@ enum PropertyNormalizationMode { }; +// Instance size sentinel for objects of variable size. +static const int kVariableSizeSentinel = 0; + + // All Maps have a field instance_type containing a InstanceType. // It describes the type of the instances. // @@ -230,19 +233,21 @@ enum PropertyNormalizationMode { V(CONS_SYMBOL_TYPE) \ V(CONS_ASCII_SYMBOL_TYPE) \ V(EXTERNAL_SYMBOL_TYPE) \ + V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \ V(EXTERNAL_ASCII_SYMBOL_TYPE) \ V(STRING_TYPE) \ V(ASCII_STRING_TYPE) \ V(CONS_STRING_TYPE) \ V(CONS_ASCII_STRING_TYPE) \ V(EXTERNAL_STRING_TYPE) \ + V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ V(EXTERNAL_ASCII_STRING_TYPE) \ V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \ \ V(MAP_TYPE) \ V(CODE_TYPE) \ - V(JS_GLOBAL_PROPERTY_CELL_TYPE) \ V(ODDBALL_TYPE) \ + V(JS_GLOBAL_PROPERTY_CELL_TYPE) \ \ V(HEAP_NUMBER_TYPE) \ V(PROXY_TYPE) \ @@ -260,11 +265,9 @@ enum PropertyNormalizationMode { V(EXTERNAL_FLOAT_ARRAY_TYPE) \ V(FILLER_TYPE) \ \ - V(FIXED_ARRAY_TYPE) \ V(ACCESSOR_INFO_TYPE) \ V(ACCESS_CHECK_INFO_TYPE) \ V(INTERCEPTOR_INFO_TYPE) \ - V(SHARED_FUNCTION_INFO_TYPE) \ V(CALL_HANDLER_INFO_TYPE) \ V(FUNCTION_TEMPLATE_INFO_TYPE) \ V(OBJECT_TEMPLATE_INFO_TYPE) \ @@ -273,6 +276,9 @@ enum PropertyNormalizationMode { V(SCRIPT_TYPE) \ V(CODE_CACHE_TYPE) \ \ + V(FIXED_ARRAY_TYPE) \ + V(SHARED_FUNCTION_INFO_TYPE) \ + \ V(JS_VALUE_TYPE) \ V(JS_OBJECT_TYPE) \ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \ @@ -301,11 +307,11 @@ enum PropertyNormalizationMode { // iterate over them. #define STRING_TYPE_LIST(V) \ V(SYMBOL_TYPE, \ - SeqTwoByteString::kAlignedSize, \ + kVariableSizeSentinel, \ symbol, \ Symbol) \ V(ASCII_SYMBOL_TYPE, \ - SeqAsciiString::kAlignedSize, \ + kVariableSizeSentinel, \ ascii_symbol, \ AsciiSymbol) \ V(CONS_SYMBOL_TYPE, \ @@ -329,11 +335,11 @@ enum PropertyNormalizationMode { external_ascii_symbol, \ ExternalAsciiSymbol) \ V(STRING_TYPE, \ - SeqTwoByteString::kAlignedSize, \ + kVariableSizeSentinel, \ string, \ String) \ V(ASCII_STRING_TYPE, \ - SeqAsciiString::kAlignedSize, \ + kVariableSizeSentinel, \ ascii_string, \ AsciiString) \ V(CONS_STRING_TYPE, \ @@ -355,7 +361,7 @@ enum PropertyNormalizationMode { V(EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ external_ascii_string, \ - ExternalAsciiString) \ + ExternalAsciiString) // A struct is a simple object a set of object-valued fields. Including an // object type in this causes the compiler to generate most of the boilerplate @@ -631,6 +637,7 @@ class Object BASE_EMBEDDED { inline bool IsDictionary(); inline bool IsSymbolTable(); inline bool IsJSFunctionResultCache(); + inline bool IsNormalizedMapCache(); inline bool IsCompilationCacheTable(); inline bool IsCodeCacheHashTable(); inline bool IsMapCache(); @@ -1015,10 +1022,6 @@ class HeapObject: public Object { // object, and so is safe to call while the map pointer is modified. void IterateBody(InstanceType type, int object_size, ObjectVisitor* v); - // This method only applies to struct objects. Iterates over all the fields - // of this struct. - void IterateStructBody(int object_size, ObjectVisitor* v); - // Returns the heap object's size in bytes inline int Size(); @@ -1097,10 +1100,6 @@ class HeapObject: public Object { // as above, for the single element at "offset" inline void IteratePointer(ObjectVisitor* v, int offset); - // Computes the object size from the map. - // Should only be used from SizeFromMap. - int SlowSizeFromMap(Map* map); - private: DISALLOW_IMPLICIT_CONSTRUCTORS(HeapObject); }; @@ -1211,7 +1210,9 @@ class JSObject: public HeapObject { public: enum DeleteMode { NORMAL_DELETION, FORCE_DELETION }; enum ElementsKind { + // The only "fast" kind. FAST_ELEMENTS, + // All the kinds below are "slow". DICTIONARY_ELEMENTS, PIXEL_ELEMENTS, EXTERNAL_BYTE_ELEMENTS, @@ -1232,8 +1233,21 @@ class JSObject: public HeapObject { inline StringDictionary* property_dictionary(); // Gets slow properties. // [elements]: The elements (properties with names that are integers). - // elements is a FixedArray in the fast case, a Dictionary in the slow - // case, and a PixelArray or ExternalArray in special cases. + // + // Elements can be in two general modes: fast and slow. Each mode + // corrensponds to a set of object representations of elements that + // have something in common. + // + // In the fast mode elements is a FixedArray and so each element can + // be quickly accessed. This fact is used in the generated code. The + // elements array can have one of the two maps in this mode: + // fixed_array_map or fixed_cow_array_map (for copy-on-write + // arrays). In the latter case the elements array may be shared by a + // few objects and so before writing to any element the array must + // be copied. Use EnsureWritableFastElements in this case. + // + // In the slow mode elements is either a NumberDictionary or a + // PixelArray or an ExternalArray. DECL_ACCESSORS(elements, HeapObject) inline void initialize_elements(); inline Object* ResetElements(); @@ -1251,6 +1265,8 @@ class JSObject: public HeapObject { inline bool HasExternalFloatElements(); inline bool AllowsSetElementsLength(); inline NumberDictionary* element_dictionary(); // Gets slow elements. + // Requires: this->HasFastElements(). + inline Object* EnsureWritableFastElements(); // Collects elements starting at index 0. // Undefined values are placed after non-undefined values. @@ -1258,7 +1274,7 @@ class JSObject: public HeapObject { Object* PrepareElementsForSort(uint32_t limit); // As PrepareElementsForSort, but only on objects where elements is // a dictionary, and it will stay a dictionary. - Object* PrepareSlowElementsForSort(uint32_t limit); + MUST_USE_RESULT Object* PrepareSlowElementsForSort(uint32_t limit); Object* SetProperty(String* key, Object* value, @@ -1296,12 +1312,13 @@ class JSObject: public HeapObject { // Sets the property value in a normalized object given (key, value, details). // Handles the special representation of JS global objects. - Object* SetNormalizedProperty(String* name, - Object* value, - PropertyDetails details); + MUST_USE_RESULT Object* SetNormalizedProperty(String* name, + Object* value, + PropertyDetails details); // Deletes the named property in a normalized object. - Object* DeleteNormalizedProperty(String* name, DeleteMode mode); + MUST_USE_RESULT Object* DeleteNormalizedProperty(String* name, + DeleteMode mode); // Returns the class name ([[Class]] property in the specification). String* class_name(); @@ -1319,11 +1336,13 @@ class JSObject: public HeapObject { String* name); PropertyAttributes GetLocalPropertyAttribute(String* name); - Object* DefineAccessor(String* name, bool is_getter, JSFunction* fun, - PropertyAttributes attributes); + MUST_USE_RESULT Object* DefineAccessor(String* name, + bool is_getter, + JSFunction* fun, + PropertyAttributes attributes); Object* LookupAccessor(String* name, bool is_getter); - Object* DefineAccessor(AccessorInfo* info); + MUST_USE_RESULT Object* DefineAccessor(AccessorInfo* info); // Used from Object::GetProperty(). Object* GetPropertyWithFailedAccessCheck(Object* receiver, @@ -1374,8 +1393,8 @@ class JSObject: public HeapObject { inline Object* GetHiddenPropertiesObject(); inline Object* SetHiddenPropertiesObject(Object* hidden_obj); - Object* DeleteProperty(String* name, DeleteMode mode); - Object* DeleteElement(uint32_t index, DeleteMode mode); + MUST_USE_RESULT Object* DeleteProperty(String* name, DeleteMode mode); + MUST_USE_RESULT Object* DeleteElement(uint32_t index, DeleteMode mode); // Tests for the fast common case for property enumeration. bool IsSimpleEnum(); @@ -1403,19 +1422,20 @@ class JSObject: public HeapObject { bool HasElementWithInterceptor(JSObject* receiver, uint32_t index); bool HasElementPostInterceptor(JSObject* receiver, uint32_t index); - Object* SetFastElement(uint32_t index, Object* value); + MUST_USE_RESULT Object* SetFastElement(uint32_t index, Object* value); // Set the index'th array element. // A Failure object is returned if GC is needed. - Object* SetElement(uint32_t index, Object* value); + MUST_USE_RESULT Object* SetElement(uint32_t index, Object* value); // Returns the index'th element. // The undefined object if index is out of bounds. Object* GetElementWithReceiver(JSObject* receiver, uint32_t index); Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index); - Object* SetFastElementsCapacityAndLength(int capacity, int length); - Object* SetSlowElements(Object* length); + MUST_USE_RESULT Object* SetFastElementsCapacityAndLength(int capacity, + int length); + MUST_USE_RESULT Object* SetSlowElements(Object* length); // Lookup interceptors are used for handling properties controlled by host // objects. @@ -1428,7 +1448,7 @@ class JSObject: public HeapObject { bool HasRealNamedCallbackProperty(String* key); // Initializes the array to a certain length - Object* SetElementsLength(Object* length); + MUST_USE_RESULT Object* SetElementsLength(Object* length); // Get the header size for a JSObject. Used to compute the index of // internal fields as well as the number of internal fields. @@ -1535,6 +1555,8 @@ class JSObject: public HeapObject { int expected_additional_properties); Object* NormalizeElements(); + Object* UpdateMapCodeCache(String* name, Code* code); + // Transform slow named properties to fast variants. // Returns failure if allocation failed. Object* TransformToFastProperties(int unused_property_fields); @@ -1563,7 +1585,7 @@ class JSObject: public HeapObject { static inline JSObject* cast(Object* obj); // Disalow further properties to be added to the object. - Object* PreventExtensions(); + MUST_USE_RESULT Object* PreventExtensions(); // Dispatched behavior. @@ -1636,16 +1658,20 @@ class JSObject: public HeapObject { uint32_t index, Object* value, JSObject* holder); - Object* SetElementWithInterceptor(uint32_t index, Object* value); - Object* SetElementWithoutInterceptor(uint32_t index, Object* value); + MUST_USE_RESULT Object* SetElementWithInterceptor(uint32_t index, + Object* value); + MUST_USE_RESULT Object* SetElementWithoutInterceptor(uint32_t index, + Object* value); Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index); - Object* DeletePropertyPostInterceptor(String* name, DeleteMode mode); - Object* DeletePropertyWithInterceptor(String* name); + MUST_USE_RESULT Object* DeletePropertyPostInterceptor(String* name, + DeleteMode mode); + MUST_USE_RESULT Object* DeletePropertyWithInterceptor(String* name); - Object* DeleteElementPostInterceptor(uint32_t index, DeleteMode mode); - Object* DeleteElementWithInterceptor(uint32_t index); + MUST_USE_RESULT Object* DeleteElementPostInterceptor(uint32_t index, + DeleteMode mode); + MUST_USE_RESULT Object* DeleteElementWithInterceptor(uint32_t index); PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver, String* name, @@ -1667,13 +1693,14 @@ class JSObject: public HeapObject { bool HasDenseElements(); bool CanSetCallback(String* name); - Object* SetElementCallback(uint32_t index, - Object* structure, - PropertyAttributes attributes); - Object* SetPropertyCallback(String* name, - Object* structure, - PropertyAttributes attributes); - Object* DefineGetterSetter(String* name, PropertyAttributes attributes); + MUST_USE_RESULT Object* SetElementCallback(uint32_t index, + Object* structure, + PropertyAttributes attributes); + MUST_USE_RESULT Object* SetPropertyCallback(String* name, + Object* structure, + PropertyAttributes attributes); + MUST_USE_RESULT Object* DefineGetterSetter(String* name, + PropertyAttributes attributes); void LookupInDescriptor(String* name, LookupResult* result); @@ -1703,18 +1730,22 @@ class FixedArray: public HeapObject { inline void set_null(int index); inline void set_the_hole(int index); + // Setters with less debug checks for the GC to use. + inline void set_unchecked(int index, Smi* value); + inline void set_null_unchecked(int index); + // Gives access to raw memory which stores the array's data. inline Object** data_start(); // Copy operations. inline Object* Copy(); - Object* CopySize(int new_length); + MUST_USE_RESULT Object* CopySize(int new_length); // Add the elements of a JSArray to this FixedArray. - Object* AddKeysFromJSArray(JSArray* array); + MUST_USE_RESULT Object* AddKeysFromJSArray(JSArray* array); // Compute the union of this and other. - Object* UnionOfKeys(FixedArray* other); + MUST_USE_RESULT Object* UnionOfKeys(FixedArray* other); // Copy a sub array from the receiver to dest. void CopyTo(int pos, FixedArray* dest, int dest_pos, int len); @@ -1853,11 +1884,12 @@ class DescriptorArray: public FixedArray { // or null), its enumeration index is kept as is. // If adding a real property, map transitions must be removed. If adding // a transition, they must not be removed. All null descriptors are removed. - Object* CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag); + MUST_USE_RESULT Object* CopyInsert(Descriptor* descriptor, + TransitionFlag transition_flag); // Remove all transitions. Return a copy of the array with all transitions // removed, or a Failure object if the new array could not be allocated. - Object* RemoveTransitions(); + MUST_USE_RESULT Object* RemoveTransitions(); // Sort the instance descriptors by the hash codes of their keys. void Sort(); @@ -1885,7 +1917,7 @@ class DescriptorArray: public FixedArray { // Allocates a DescriptorArray, but returns the singleton // empty descriptor array object if number_of_descriptors is 0. - static Object* Allocate(int number_of_descriptors); + MUST_USE_RESULT static Object* Allocate(int number_of_descriptors); // Casting. static inline DescriptorArray* cast(Object* obj); @@ -2025,8 +2057,9 @@ class HashTable: public FixedArray { } // Returns a new HashTable object. Might return Failure. - static Object* Allocate(int at_least_space_for, - PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT static Object* Allocate( + int at_least_space_for, + PretenureFlag pretenure = NOT_TENURED); // Returns the key at entry. Object* KeyAt(int entry) { return get(EntryToIndex(entry)); } @@ -2120,7 +2153,7 @@ class HashTable: public FixedArray { } // Ensure enough space for n additional elements. - Object* EnsureCapacity(int n, Key key); + MUST_USE_RESULT Object* EnsureCapacity(int n, Key key); }; @@ -2136,7 +2169,7 @@ class HashTableKey { virtual uint32_t HashForObject(Object* key) = 0; // Returns the key object for storing into the hash table. // If allocations fails a failure object is returned. - virtual Object* AsObject() = 0; + MUST_USE_RESULT virtual Object* AsObject() = 0; // Required. virtual ~HashTableKey() {} }; @@ -2152,7 +2185,7 @@ class SymbolTableShape { static uint32_t HashForObject(HashTableKey* key, Object* object) { return key->HashForObject(object); } - static Object* AsObject(HashTableKey* key) { + MUST_USE_RESULT static Object* AsObject(HashTableKey* key) { return key->AsObject(); } @@ -2202,7 +2235,7 @@ class MapCacheShape { return key->HashForObject(object); } - static Object* AsObject(HashTableKey* key) { + MUST_USE_RESULT static Object* AsObject(HashTableKey* key) { return key->AsObject(); } @@ -2290,7 +2323,7 @@ class Dictionary: public HashTable { } // Returns a new array for dictionary usage. Might return Failure. - static Object* Allocate(int at_least_space_for); + MUST_USE_RESULT static Object* Allocate(int at_least_space_for); // Ensure enough space for n additional elements. Object* EnsureCapacity(int n, Key key); @@ -2332,7 +2365,7 @@ class StringDictionaryShape { static inline bool IsMatch(String* key, Object* other); static inline uint32_t Hash(String* key); static inline uint32_t HashForObject(String* key, Object* object); - static inline Object* AsObject(String* key); + MUST_USE_RESULT static inline Object* AsObject(String* key); static const int kPrefixSize = 2; static const int kEntrySize = 3; static const bool kIsEnumerable = true; @@ -2364,7 +2397,7 @@ class NumberDictionaryShape { static inline bool IsMatch(uint32_t key, Object* other); static inline uint32_t Hash(uint32_t key); static inline uint32_t HashForObject(uint32_t key, Object* object); - static inline Object* AsObject(uint32_t key); + MUST_USE_RESULT static inline Object* AsObject(uint32_t key); static const int kPrefixSize = 2; static const int kEntrySize = 3; static const bool kIsEnumerable = false; @@ -2445,6 +2478,35 @@ class JSFunctionResultCache: public FixedArray { }; +// The cache for maps used by normalized (dictionary mode) objects. +// Such maps do not have property descriptors, so a typical program +// needs very limited number of distinct normalized maps. +class NormalizedMapCache: public FixedArray { + public: + static const int kEntries = 64; + + static bool IsCacheable(JSObject* object); + + Object* Get(JSObject* object, PropertyNormalizationMode mode); + + bool Contains(Map* map); + + void Clear(); + + // Casting + static inline NormalizedMapCache* cast(Object* obj); + +#ifdef DEBUG + void NormalizedMapCacheVerify(); +#endif + + private: + static int Hash(Map* fast); + + static bool CheckHit(Map* slow, Map* fast, PropertyNormalizationMode mode); +}; + + // ByteArray represents fixed sized byte arrays. Used by the outside world, // such as PCRE, and also by the memory allocator and garbage collector to // fill in free blocks in the heap. @@ -2762,7 +2824,12 @@ class Code: public HeapObject { public: // Opaque data type for encapsulating code flags like kind, inline // cache state, and arguments count. - enum Flags { }; + // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that + // enumeration type has correct value range (see Issue 830 for more details). + enum Flags { + FLAGS_MIN_VALUE = kMinInt, + FLAGS_MAX_VALUE = kMaxInt + }; enum Kind { FUNCTION, @@ -2829,8 +2896,8 @@ class Code: public HeapObject { inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; } // [major_key]: For kind STUB or BINARY_OP_IC, the major key. - inline CodeStub::Major major_key(); - inline void set_major_key(CodeStub::Major major); + inline int major_key(); + inline void set_major_key(int major); // Flags operations. static inline Flags ComputeFlags(Kind kind, @@ -2858,6 +2925,9 @@ class Code: public HeapObject { // Convert a target address into a code object. static inline Code* GetCodeFromTargetAddress(Address address); + // Convert an entry address into an object. + static inline Object* GetObjectFromEntryAddress(Address location_of_address); + // Returns the address of the first instruction. inline byte* instruction_start(); @@ -2964,6 +3034,8 @@ class Code: public HeapObject { class Map: public HeapObject { public: // Instance size. + // Size in bytes or kVariableSizeSentinel if instances do not have + // a fixed size. inline int instance_size(); inline void set_instance_size(int value); @@ -3061,7 +3133,8 @@ class Map: public HeapObject { inline bool is_extensible(); // Tells whether the instance has fast elements. - void set_has_fast_elements(bool value) { + // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS. + inline void set_has_fast_elements(bool value) { if (value) { set_bit_field2(bit_field2() | (1 << kHasFastElements)); } else { @@ -3069,7 +3142,7 @@ class Map: public HeapObject { } } - bool has_fast_elements() { + inline bool has_fast_elements() { return ((1 << kHasFastElements) & bit_field2()) != 0; } @@ -3090,11 +3163,13 @@ class Map: public HeapObject { // [stub cache]: contains stubs compiled for this map. DECL_ACCESSORS(code_cache, Object) - Object* CopyDropDescriptors(); + MUST_USE_RESULT Object* CopyDropDescriptors(); + + MUST_USE_RESULT Object* CopyNormalized(PropertyNormalizationMode mode); // Returns a copy of the map, with all transitions dropped from the // instance descriptors. - Object* CopyDropTransitions(); + MUST_USE_RESULT Object* CopyDropTransitions(); // Returns this map if it has the fast elements bit set, otherwise // returns a copy of the map, with all transitions dropped from the @@ -3127,7 +3202,7 @@ class Map: public HeapObject { inline void ClearCodeCache(); // Update code cache. - Object* UpdateCodeCache(String* name, Code* code); + MUST_USE_RESULT Object* UpdateCodeCache(String* name, Code* code); // Returns the found code or undefined if absent. Object* FindInCodeCache(String* name, Code::Flags flags); @@ -3154,6 +3229,7 @@ class Map: public HeapObject { #ifdef DEBUG void MapPrint(); void MapVerify(); + void NormalizedMapVerify(); #endif inline int visitor_id(); @@ -3169,8 +3245,7 @@ class Map: public HeapObject { static const int kInstanceDescriptorsOffset = kConstructorOffset + kPointerSize; static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize; - static const int kScavengerCallbackOffset = kCodeCacheOffset + kPointerSize; - static const int kPadStart = kScavengerCallbackOffset + kPointerSize; + static const int kPadStart = kCodeCacheOffset + kPointerSize; static const int kSize = MAP_POINTER_ALIGN(kPadStart); // Layout of pointer fields. Heap iteration code relies on them @@ -3187,7 +3262,8 @@ class Map: public HeapObject { static const int kPreAllocatedPropertyFieldsByte = 2; static const int kPreAllocatedPropertyFieldsOffset = kInstanceSizesOffset + kPreAllocatedPropertyFieldsByte; - // The byte at position 3 is not in use at the moment. + static const int kVisitorIdByte = 3; + static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte; // Byte offsets within kInstanceAttributesOffset attributes. static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0; @@ -3344,6 +3420,8 @@ class SharedFunctionInfo: public HeapObject { // [construct stub]: Code stub for constructing instances of this function. DECL_ACCESSORS(construct_stub, Code) + inline Code* unchecked_code(); + // Returns if this function has been compiled to native code yet. inline bool is_compiled(); @@ -3451,6 +3529,15 @@ class SharedFunctionInfo: public HeapObject { inline bool allows_lazy_compilation(); inline void set_allows_lazy_compilation(bool flag); + // Indicates how many full GCs this function has survived with assigned + // code object. Used to determine when it is relatively safe to flush + // this code object and replace it with lazy compilation stub. + // Age is reset when GC notices that the code object is referenced + // from the stack or compilation cache. + inline int code_age(); + inline void set_code_age(int age); + + // Check whether a inlined constructor can be generated with the given // prototype. bool CanGenerateInlineConstructor(Object* prototype); @@ -3581,6 +3668,8 @@ class SharedFunctionInfo: public HeapObject { static const int kHasOnlySimpleThisPropertyAssignments = 0; static const int kTryFullCodegen = 1; static const int kAllowLazyCompilation = 2; + static const int kCodeAgeShift = 3; + static const int kCodeAgeMask = 7; DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo); }; @@ -3596,6 +3685,8 @@ class JSFunction: public JSObject { // can be shared by instances. DECL_ACCESSORS(shared, SharedFunctionInfo) + inline SharedFunctionInfo* unchecked_shared(); + // [context]: The context for this function. inline Context* context(); inline Object* unchecked_context(); @@ -3608,6 +3699,8 @@ class JSFunction: public JSObject { inline Code* code(); inline void set_code(Code* value); + inline Code* unchecked_code(); + // Tells whether this function is builtin. inline bool IsBuiltin(); @@ -3636,7 +3729,7 @@ class JSFunction: public JSObject { inline Object* prototype(); inline Object* instance_prototype(); Object* SetInstancePrototype(Object* value); - Object* SetPrototype(Object* value); + MUST_USE_RESULT Object* SetPrototype(Object* value); // After prototype is removed, it will not be created when accessed, and // [[Construct]] from this function will not be allowed. @@ -3660,6 +3753,10 @@ class JSFunction: public JSObject { // Casting. static inline JSFunction* cast(Object* obj); + // Iterates the objects, including code objects indirectly referenced + // through pointers to the first instruction in the code object. + void JSFunctionIterateBody(int object_size, ObjectVisitor* v); + // Dispatched behavior. #ifdef DEBUG void JSFunctionPrint(); @@ -3673,9 +3770,9 @@ class JSFunction: public JSObject { static Context* GlobalContextFromLiterals(FixedArray* literals); // Layout descriptors. - static const int kCodeOffset = JSObject::kHeaderSize; + static const int kCodeEntryOffset = JSObject::kHeaderSize; static const int kPrototypeOrInitialMapOffset = - kCodeOffset + kPointerSize; + kCodeEntryOffset + kPointerSize; static const int kSharedFunctionInfoOffset = kPrototypeOrInitialMapOffset + kPointerSize; static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize; @@ -3973,7 +4070,7 @@ class CompilationCacheShape { return key->HashForObject(object); } - static Object* AsObject(HashTableKey* key) { + MUST_USE_RESULT static Object* AsObject(HashTableKey* key) { return key->AsObject(); } @@ -4006,7 +4103,7 @@ class CodeCache: public Struct { DECL_ACCESSORS(normal_type_cache, Object) // Add the code object to the cache. - Object* Update(String* name, Code* code); + MUST_USE_RESULT Object* Update(String* name, Code* code); // Lookup code object in the cache. Returns code object if found and undefined // if not. @@ -4034,8 +4131,8 @@ class CodeCache: public Struct { static const int kSize = kNormalTypeCacheOffset + kPointerSize; private: - Object* UpdateDefaultCache(String* name, Code* code); - Object* UpdateNormalTypeCache(String* name, Code* code); + MUST_USE_RESULT Object* UpdateDefaultCache(String* name, Code* code); + MUST_USE_RESULT Object* UpdateNormalTypeCache(String* name, Code* code); Object* LookupDefaultCache(String* name, Code::Flags flags); Object* LookupNormalTypeCache(String* name, Code::Flags flags); @@ -4063,7 +4160,7 @@ class CodeCacheHashTableShape { return key->HashForObject(object); } - static Object* AsObject(HashTableKey* key) { + MUST_USE_RESULT static Object* AsObject(HashTableKey* key) { return key->AsObject(); } @@ -4076,7 +4173,7 @@ class CodeCacheHashTable: public HashTable { public: Object* Lookup(String* name, Code::Flags flags); - Object* Put(String* name, Code* code); + MUST_USE_RESULT Object* Put(String* name, Code* code); int GetIndex(String* name, Code::Flags flags); void RemoveByIndex(int index); @@ -4123,6 +4220,11 @@ class StringHasher { void invalidate() { is_valid_ = false; } + // Calculated hash value for a string consisting of 1 to + // String::kMaxArrayIndexSize digits with no leading zeros (except "0"). + // value is represented decimal value. + static uint32_t MakeArrayIndexHash(uint32_t value, int length); + private: uint32_t array_index() { @@ -4365,6 +4467,7 @@ class String: public HeapObject { kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields; STATIC_CHECK((kArrayIndexLengthBits > 0)); + STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits)); static const int kArrayIndexHashLengthShift = kArrayIndexValueBits + kNofHashBitFields; @@ -4938,12 +5041,13 @@ class JSArray: public JSObject { // is set to a smi. This matches the set function on FixedArray. inline void set_length(Smi* length); - Object* JSArrayUpdateLengthFromIndex(uint32_t index, Object* value); + MUST_USE_RESULT Object* JSArrayUpdateLengthFromIndex(uint32_t index, + Object* value); // Initialize the array with the given capacity. The function may // fail due to out-of-memory situations, but only if the requested // capacity is non-zero. - Object* Initialize(int capacity); + MUST_USE_RESULT Object* Initialize(int capacity); // Set the content of the array to the content of storage. inline void SetContent(FixedArray* storage); @@ -5390,6 +5494,9 @@ class ObjectVisitor BASE_EMBEDDED { // Visits a code target in the instruction stream. virtual void VisitCodeTarget(RelocInfo* rinfo); + // Visits a code entry in a JS function. + virtual void VisitCodeEntry(Address entry_address); + // Visits a runtime entry in the instruction stream. virtual void VisitRuntimeEntry(RelocInfo* rinfo) {} diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 1df7c21450..7667e89a3c 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -32,6 +32,7 @@ #include "bootstrapper.h" #include "codegen.h" #include "compiler.h" +#include "func-name-inferrer.h" #include "messages.h" #include "parser.h" #include "platform.h" @@ -153,7 +154,7 @@ class Parser { ParserLog* log_; bool is_pre_parsing_; ScriptDataImpl* pre_data_; - bool seen_loop_stmt_; // Used for inner loop detection. + FuncNameInferrer* fni_; bool inside_with() const { return with_nesting_level_ > 0; } ParserFactory* factory() const { return factory_; } @@ -213,6 +214,11 @@ class Parser { ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok); Expression* ParseRegExpLiteral(bool seen_equal, bool* ok); + Expression* NewCompareNode(Token::Value op, + Expression* x, + Expression* y, + int position); + // Populate the constant properties fixed array for a materialized object // literal. void BuildObjectLiteralConstantProperties( @@ -260,6 +266,8 @@ class Parser { bool Check(Token::Value token); void ExpectSemicolon(bool* ok); + Handle GetSymbol(bool* ok); + // Get odd-ball literals. Literal* GetLiteralUndefined(); Literal* GetLiteralTheHole(); @@ -338,9 +346,7 @@ class Parser { template class BufferedZoneList { public: - - BufferedZoneList() : - list_(NULL), last_(NULL) {} + BufferedZoneList() : list_(NULL), last_(NULL) {} // Adds element at end of list. This element is buffered and can // be read using last() or removed using RemoveLast until a new Add or until @@ -411,6 +417,7 @@ class BufferedZoneList { T* last_; }; + // Accumulates RegExp atoms and assertions into lists of terms and alternatives. class RegExpBuilder: public ZoneObject { public: @@ -649,6 +656,7 @@ class RegExpParser { static const int kMaxCaptures = 1 << 16; static const uc32 kEndMarker = (1 << 21); + private: enum SubexpressionType { INITIAL, @@ -744,6 +752,10 @@ class TemporaryScope BASE_EMBEDDED { void AddProperty() { expected_property_count_++; } int expected_property_count() { return expected_property_count_; } + + void AddLoop() { loop_count_++; } + bool ContainsLoops() const { return loop_count_ > 0; } + private: // Captures the number of literals that need materialization in the // function. Includes regexp literals, and boilerplate for object @@ -753,9 +765,14 @@ class TemporaryScope BASE_EMBEDDED { // Properties count estimation. int expected_property_count_; + // Keeps track of assignments to properties of this. Used for + // optimizing constructors. bool only_simple_this_property_assignments_; Handle this_property_assignments_; + // Captures the number of loops inside the scope. + int loop_count_; + // Bookkeeping Parser* parser_; TemporaryScope* parent_; @@ -769,6 +786,7 @@ TemporaryScope::TemporaryScope(Parser* parser) expected_property_count_(0), only_simple_this_property_assignments_(false), this_property_assignments_(Factory::empty_fixed_array()), + loop_count_(0), parser_(parser), parent_(parser->temp_scope_) { parser->temp_scope_ = this; @@ -812,7 +830,7 @@ class ParserFactory BASE_EMBEDDED { virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with); - virtual Handle LookupSymbol(const char* string, int length) { + virtual Handle LookupSymbol(int index, Vector string) { return Handle(); } @@ -851,23 +869,48 @@ class ParserLog BASE_EMBEDDED { public: virtual ~ParserLog() { } - // Records the occurrence of a function. The returned object is - // only guaranteed to be valid until the next function has been - // logged. + // Records the occurrence of a function. virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); } - + virtual void LogSymbol(int start, Vector symbol) {} + // Return the current position in the function entry log. + virtual int function_position() { return 0; } + virtual int symbol_position() { return 0; } + virtual int symbol_ids() { return 0; } virtual void LogError() { } }; class AstBuildingParserFactory : public ParserFactory { public: - AstBuildingParserFactory() : ParserFactory(false) { } + explicit AstBuildingParserFactory(int expected_symbols) + : ParserFactory(false), symbol_cache_(expected_symbols) { } virtual Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with); - virtual Handle LookupSymbol(const char* string, int length) { - return Factory::LookupSymbol(Vector(string, length)); + virtual Handle LookupSymbol(int symbol_id, + Vector string) { + // If there is no preparse data, we have no simpler way to identify similar + // symbols. + if (symbol_id < 0) return Factory::LookupSymbol(string); + return LookupCachedSymbol(symbol_id, string); + } + + Handle LookupCachedSymbol(int symbol_id, + Vector string) { + // Make sure the cache is large enough to hold the symbol identifier. + if (symbol_cache_.length() <= symbol_id) { + // Increase length to index + 1. + symbol_cache_.AddBlock(Handle::null(), + symbol_id + 1 - symbol_cache_.length()); + } + Handle result = symbol_cache_.at(symbol_id); + if (result.is_null()) { + result = Factory::LookupSymbol(string); + symbol_cache_.at(symbol_id) = result; + return result; + } + Counters::total_preparse_symbols_skipped.Increment(); + return result; } virtual Handle EmptySymbol() { @@ -885,6 +928,8 @@ class AstBuildingParserFactory : public ParserFactory { } virtual Statement* EmptyStatement(); + private: + List > symbol_cache_; }; @@ -892,80 +937,190 @@ class ParserRecorder: public ParserLog { public: ParserRecorder(); virtual FunctionEntry LogFunction(int start); + virtual void LogSymbol(int start, Vector literal) { + int hash = vector_hash(literal); + HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true); + int id = static_cast(reinterpret_cast(entry->value)); + if (id == 0) { + // Put (symbol_id_ + 1) into entry and increment it. + symbol_id_++; + entry->value = reinterpret_cast(symbol_id_); + Vector > symbol = symbol_entries_.AddBlock(1, literal); + entry->key = &symbol[0]; + } else { + // Log a reuse of an earlier seen symbol. + symbol_store_.Add(start); + symbol_store_.Add(id - 1); + } + } virtual void LogError() { } virtual void LogMessage(Scanner::Location loc, const char* message, Vector args); - void WriteString(Vector str); - static const char* ReadString(unsigned* start, int* chars); - List* store() { return &store_; } - private: - bool has_error_; - List store_; -}; + Vector ExtractData() { + int function_size = function_store_.size(); + int symbol_size = symbol_store_.size(); + int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size; + Vector data = Vector::New(total_size); + preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size; + preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_; + memcpy(data.start(), preamble_, sizeof(preamble_)); + int symbol_start = ScriptDataImpl::kHeaderSize + function_size; + if (function_size > 0) { + function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize, + symbol_start)); + } + if (symbol_size > 0) { + symbol_store_.WriteTo(data.SubVector(symbol_start, total_size)); + } + return data; + } + virtual int function_position() { return function_store_.size(); } + virtual int symbol_position() { return symbol_store_.size(); } + virtual int symbol_ids() { return symbol_id_; } + private: + Collector function_store_; + Collector symbol_store_; + Collector > symbol_entries_; + HashMap symbol_table_; + int symbol_id_; + + static int vector_hash(Vector string) { + int hash = 0; + for (int i = 0; i < string.length(); i++) { + int c = string[i]; + hash += c; + hash += (hash << 10); + hash ^= (hash >> 6); + } + return hash; + } -FunctionEntry ScriptDataImpl::GetFunctionEnd(int start) { - if (nth(last_entry_).start_pos() > start) { - // If the last entry we looked up is higher than what we're - // looking for then it's useless and we reset it. - last_entry_ = 0; + static bool vector_compare(void* a, void* b) { + Vector* string1 = reinterpret_cast* >(a); + Vector* string2 = reinterpret_cast* >(b); + int length = string1->length(); + if (string2->length() != length) return false; + return memcmp(string1->start(), string2->start(), length) == 0; } - for (int i = last_entry_; i < EntryCount(); i++) { - FunctionEntry entry = nth(i); - if (entry.start_pos() == start) { - last_entry_ = i; - return entry; - } + + unsigned preamble_[ScriptDataImpl::kHeaderSize]; +#ifdef DEBUG + int prev_start; +#endif + + bool has_error() { + return static_cast(preamble_[ScriptDataImpl::kHasErrorOffset]); } - return FunctionEntry(); + void WriteString(Vector str); +}; + + +void ScriptDataImpl::SkipFunctionEntry(int start) { + ASSERT(function_index_ + FunctionEntry::kSize <= store_.length()); + ASSERT(static_cast(store_[function_index_]) == start); + function_index_ += FunctionEntry::kSize; } -bool ScriptDataImpl::SanityCheck() { - if (store_.length() < static_cast(ScriptDataImpl::kHeaderSize)) - return false; - if (magic() != ScriptDataImpl::kMagicNumber) - return false; - if (version() != ScriptDataImpl::kCurrentVersion) - return false; - return true; +FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) { + // The current pre-data entry must be a FunctionEntry with the given + // start position. + if ((function_index_ + FunctionEntry::kSize <= store_.length()) + && (static_cast(store_[function_index_]) == start)) { + int index = function_index_; + function_index_ += FunctionEntry::kSize; + return FunctionEntry(store_.SubVector(index, + index + FunctionEntry::kSize)); + } + return FunctionEntry(); } -int ScriptDataImpl::EntryCount() { - return (store_.length() - kHeaderSize) / FunctionEntry::kSize; +int ScriptDataImpl::GetSymbolIdentifier(int start) { + int next = symbol_index_ + 2; + if (next <= store_.length() + && static_cast(store_[symbol_index_]) == start) { + symbol_index_ = next; + return store_[next - 1]; + } + return symbol_id_++; } -FunctionEntry ScriptDataImpl::nth(int n) { - int offset = kHeaderSize + n * FunctionEntry::kSize; - return FunctionEntry(Vector(store_.start() + offset, - FunctionEntry::kSize)); + +bool ScriptDataImpl::SanityCheck() { + // Check that the header data is valid and doesn't specify + // point to positions outside the store. + if (store_.length() < ScriptDataImpl::kHeaderSize) return false; + if (magic() != ScriptDataImpl::kMagicNumber) return false; + if (version() != ScriptDataImpl::kCurrentVersion) return false; + if (has_error()) { + // Extra sane sanity check for error message encoding. + if (store_.length() <= kHeaderSize + kMessageTextPos) return false; + if (Read(kMessageStartPos) > Read(kMessageEndPos)) return false; + unsigned arg_count = Read(kMessageArgCountPos); + int pos = kMessageTextPos; + for (unsigned int i = 0; i <= arg_count; i++) { + if (store_.length() <= kHeaderSize + pos) return false; + int length = static_cast(Read(pos)); + if (length < 0) return false; + pos += 1 + length; + } + if (store_.length() < kHeaderSize + pos) return false; + return true; + } + // Check that the space allocated for function entries is sane. + int functions_size = + static_cast(store_[ScriptDataImpl::kFunctionsSizeOffset]); + if (functions_size < 0) return false; + if (functions_size % FunctionEntry::kSize != 0) return false; + // Check that the count of symbols is non-negative. + int symbol_count = + static_cast(store_[ScriptDataImpl::kSymbolCountOffset]); + if (symbol_count < 0) return false; + // Check that the total size has room both function entries. + int minimum_size = + ScriptDataImpl::kHeaderSize + functions_size; + if (store_.length() < minimum_size) return false; + return true; } ParserRecorder::ParserRecorder() - : has_error_(false), store_(4) { - Vector preamble = store()->AddBlock(0, ScriptDataImpl::kHeaderSize); - preamble[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber; - preamble[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion; - preamble[ScriptDataImpl::kHasErrorOffset] = false; + : function_store_(0), + symbol_store_(0), + symbol_entries_(0), + symbol_table_(vector_compare), + symbol_id_(0) { +#ifdef DEBUG + prev_start = -1; +#endif + preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber; + preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion; + preamble_[ScriptDataImpl::kHasErrorOffset] = false; + preamble_[ScriptDataImpl::kFunctionsSizeOffset] = 0; + preamble_[ScriptDataImpl::kSymbolCountOffset] = 0; + preamble_[ScriptDataImpl::kSizeOffset] = 0; + ASSERT_EQ(6, ScriptDataImpl::kHeaderSize); } void ParserRecorder::WriteString(Vector str) { - store()->Add(str.length()); - for (int i = 0; i < str.length(); i++) - store()->Add(str[i]); + function_store_.Add(str.length()); + for (int i = 0; i < str.length(); i++) { + function_store_.Add(str[i]); + } } -const char* ParserRecorder::ReadString(unsigned* start, int* chars) { +const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) { int length = start[0]; char* result = NewArray(length + 1); - for (int i = 0; i < length; i++) + for (int i = 0; i < length; i++) { result[i] = start[i + 1]; + } result[length] = '\0'; if (chars != NULL) *chars = length; return result; @@ -974,38 +1129,44 @@ const char* ParserRecorder::ReadString(unsigned* start, int* chars) { void ParserRecorder::LogMessage(Scanner::Location loc, const char* message, Vector args) { - if (has_error_) return; - store()->Rewind(ScriptDataImpl::kHeaderSize); - store()->at(ScriptDataImpl::kHasErrorOffset) = true; - store()->Add(loc.beg_pos); - store()->Add(loc.end_pos); - store()->Add(args.length()); + if (has_error()) return; + preamble_[ScriptDataImpl::kHasErrorOffset] = true; + function_store_.Reset(); + STATIC_ASSERT(ScriptDataImpl::kMessageStartPos == 0); + function_store_.Add(loc.beg_pos); + STATIC_ASSERT(ScriptDataImpl::kMessageEndPos == 1); + function_store_.Add(loc.end_pos); + STATIC_ASSERT(ScriptDataImpl::kMessageArgCountPos == 2); + function_store_.Add(args.length()); + STATIC_ASSERT(ScriptDataImpl::kMessageTextPos == 3); WriteString(CStrVector(message)); - for (int i = 0; i < args.length(); i++) + for (int i = 0; i < args.length(); i++) { WriteString(CStrVector(args[i])); + } } Scanner::Location ScriptDataImpl::MessageLocation() { - int beg_pos = Read(0); - int end_pos = Read(1); + int beg_pos = Read(kMessageStartPos); + int end_pos = Read(kMessageEndPos); return Scanner::Location(beg_pos, end_pos); } const char* ScriptDataImpl::BuildMessage() { - unsigned* start = ReadAddress(3); - return ParserRecorder::ReadString(start, NULL); + unsigned* start = ReadAddress(kMessageTextPos); + return ReadString(start, NULL); } Vector ScriptDataImpl::BuildArgs() { - int arg_count = Read(2); + int arg_count = Read(kMessageArgCountPos); const char** array = NewArray(arg_count); - int pos = ScriptDataImpl::kHeaderSize + Read(3); + // Position after the string starting at position 3. + int pos = kMessageTextPos + 1 + Read(kMessageTextPos); for (int i = 0; i < arg_count; i++) { int count = 0; - array[i] = ParserRecorder::ReadString(ReadAddress(pos), &count); + array[i] = ReadString(ReadAddress(pos), &count); pos += count + 1; } return Vector(array, arg_count); @@ -1023,8 +1184,12 @@ unsigned* ScriptDataImpl::ReadAddress(int position) { FunctionEntry ParserRecorder::LogFunction(int start) { - if (has_error_) return FunctionEntry(); - FunctionEntry result(store()->AddBlock(0, FunctionEntry::kSize)); +#ifdef DEBUG + ASSERT(start > prev_start); + prev_start = start; +#endif + if (has_error()) return FunctionEntry(); + FunctionEntry result(function_store_.AddBlock(FunctionEntry::kSize, 0)); result.set_start_pos(start); return result; } @@ -1034,8 +1199,14 @@ class AstBuildingParser : public Parser { public: AstBuildingParser(Handle