Browse Source

Merge branch 'v0.4'

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
98515742b7
  1. 4
      CMakeLists.txt
  2. 1
      benchmark/report-startup-memory.js
  3. 15
      benchmark/v8_bench.js
  4. 2
      cmake/CTestCustom.cmake
  5. 29
      cmake/configure.cmake
  6. 54
      cmake/node_build.cmake
  7. 18
      cmake/v8_build.cmake
  8. 9
      deps/libev/Changes
  9. 2
      deps/libev/configure.ac
  10. 6
      deps/libev/ev++.h
  11. 41
      deps/libev/ev.3
  12. 40
      deps/libev/ev.c
  13. 10
      deps/libev/ev.h
  14. 39
      deps/libev/ev.pod
  15. 8
      deps/libev/ev_epoll.c
  16. 10
      deps/libev/ev_kqueue.c
  17. 10
      deps/libev/ev_poll.c
  18. 8
      deps/libev/ev_port.c
  19. 19
      deps/libev/ev_select.c
  20. 6
      deps/libev/ev_vars.h
  21. 12
      deps/libev/ev_win32.c
  22. 6
      deps/libev/event.c
  23. 6
      deps/libev/event.h
  24. 4
      deps/libev/libev.m4
  25. 2
      deps/v8/.gitignore
  26. 1
      deps/v8/AUTHORS
  27. 65
      deps/v8/ChangeLog
  28. 143
      deps/v8/SConstruct
  29. 1
      deps/v8/include/v8.h
  30. 3
      deps/v8/samples/shell.cc
  31. 7
      deps/v8/src/SConscript
  32. 1
      deps/v8/src/accessors.cc
  33. 68
      deps/v8/src/api.cc
  34. 2
      deps/v8/src/arguments.h
  35. 4
      deps/v8/src/arm/assembler-arm-inl.h
  36. 58
      deps/v8/src/arm/assembler-arm.cc
  37. 9
      deps/v8/src/arm/assembler-arm.h
  38. 58
      deps/v8/src/arm/builtins-arm.cc
  39. 381
      deps/v8/src/arm/code-stubs-arm.cc
  40. 46
      deps/v8/src/arm/code-stubs-arm.h
  41. 24
      deps/v8/src/arm/codegen-arm.cc
  42. 2
      deps/v8/src/arm/constants-arm.h
  43. 204
      deps/v8/src/arm/deoptimizer-arm.cc
  44. 557
      deps/v8/src/arm/full-codegen-arm.cc
  45. 117
      deps/v8/src/arm/ic-arm.cc
  46. 88
      deps/v8/src/arm/lithium-arm.cc
  47. 106
      deps/v8/src/arm/lithium-arm.h
  48. 208
      deps/v8/src/arm/lithium-codegen-arm.cc
  49. 8
      deps/v8/src/arm/lithium-codegen-arm.h
  50. 172
      deps/v8/src/arm/macro-assembler-arm.cc
  51. 27
      deps/v8/src/arm/macro-assembler-arm.h
  52. 71
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  53. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  54. 15
      deps/v8/src/arm/simulator-arm.h
  55. 41
      deps/v8/src/arm/stub-cache-arm.cc
  56. 13
      deps/v8/src/arm/virtual-frame-arm.cc
  57. 3
      deps/v8/src/arm/virtual-frame-arm.h
  58. 10
      deps/v8/src/array.js
  59. 2
      deps/v8/src/assembler.cc
  60. 8
      deps/v8/src/assembler.h
  61. 4
      deps/v8/src/ast.cc
  62. 11
      deps/v8/src/bignum.cc
  63. 51
      deps/v8/src/bootstrapper.cc
  64. 45
      deps/v8/src/builtins.cc
  65. 170
      deps/v8/src/builtins.h
  66. 4
      deps/v8/src/code-stubs.cc
  67. 3
      deps/v8/src/code-stubs.h
  68. 1
      deps/v8/src/codegen.cc
  69. 25
      deps/v8/src/compiler.cc
  70. 4
      deps/v8/src/compiler.h
  71. 2
      deps/v8/src/d8.cc
  72. 7
      deps/v8/src/date.js
  73. 4
      deps/v8/src/debug.cc
  74. 14
      deps/v8/src/deoptimizer.cc
  75. 1
      deps/v8/src/execution.cc
  76. 2
      deps/v8/src/execution.h
  77. 9
      deps/v8/src/factory.cc
  78. 2
      deps/v8/src/factory.h
  79. 10
      deps/v8/src/flag-definitions.h
  80. 2
      deps/v8/src/full-codegen.cc
  81. 6
      deps/v8/src/full-codegen.h
  82. 121
      deps/v8/src/gdb-jit.cc
  83. 12
      deps/v8/src/handles.cc
  84. 7
      deps/v8/src/handles.h
  85. 2
      deps/v8/src/heap.cc
  86. 1
      deps/v8/src/heap.h
  87. 186
      deps/v8/src/hydrogen-instructions.cc
  88. 441
      deps/v8/src/hydrogen-instructions.h
  89. 462
      deps/v8/src/hydrogen.cc
  90. 38
      deps/v8/src/hydrogen.h
  91. 17
      deps/v8/src/ia32/assembler-ia32.cc
  92. 53
      deps/v8/src/ia32/assembler-ia32.h
  93. 15
      deps/v8/src/ia32/builtins-ia32.cc
  94. 223
      deps/v8/src/ia32/code-stubs-ia32.cc
  95. 38
      deps/v8/src/ia32/code-stubs-ia32.h
  96. 27
      deps/v8/src/ia32/codegen-ia32.cc
  97. 58
      deps/v8/src/ia32/deoptimizer-ia32.cc
  98. 12
      deps/v8/src/ia32/disasm-ia32.cc
  99. 133
      deps/v8/src/ia32/full-codegen-ia32.cc
  100. 49
      deps/v8/src/ia32/ic-ia32.cc

4
CMakeLists.txt

@ -25,7 +25,9 @@ option(SHARED_V8 "use system shared V8 library")
option(SHARED_LIBEV "use system shared libev library")
option(SHARED_CARES "use system shared c-ares library")
option(V8_SNAPSHOT "turn on snapshot when building stock v8")
option(V8_OPROFILE "Add oprofile support")
option(V8_GDBJIT "add gdbjit support")
option(DTRACE "build with DTrace (experimental)")
# cmake policies to get rid of some warnings
cmake_policy(SET CMP0009 NEW) # GLOB_RECURSE should no follow symlinks

1
benchmark/report-startup-memory.js

@ -0,0 +1 @@
console.log(process.memoryUsage().rss);

15
benchmark/v8_bench.js

@ -0,0 +1,15 @@
// compare with "google-chrome deps/v8/benchmarks/run.html"
var fs = require('fs');
var path = require('path');
var vm = require('vm');
var dir = path.join(__dirname, '..', 'deps', 'v8', 'benchmarks');
global.print = console.log;
global.load = function (x) {
var source = fs.readFileSync(path.join(dir, x), 'utf8');
vm.runInThisContext(source, x);
}
load('run.js');

2
cmake/CTestCustom.cmake

@ -1,2 +1,4 @@
set(CTEST_CUSTOM_PRE_TEST "sh -c \"rm -rf ../test/tmp && mkdir ../test/tmp\"")
set(CTEST_CUSTOM_POST_TEST ${CTEST_CUSTOM_PRE_TEST})
set(CTEST_CUSTOM_PRE_MEMCHECK ${CTEST_CUSTOM_PRE_TEST})
set(CTEST_CUSTOM_POST_MEMCHECK ${CTEST_CUSTOM_PRE_TEST})

29
cmake/configure.cmake

@ -10,6 +10,16 @@ endif()
string(TOLOWER ${CMAKE_SYSTEM_NAME} node_platform)
if(${node_platform} MATCHES darwin)
execute_process(COMMAND sw_vers -productVersion OUTPUT_VARIABLE OSX_VERSION)
string(REGEX REPLACE "^([0-9]+\\.[0-9]+).*$" "\\1" OSX_VERSION "${OSX_VERSION}")
if(OSX_VERSION GREATER 10.5)
# 10.6 builds are 64-bit
set(CMAKE_SYSTEM_PROCESSOR x86_64)
endif()
endif()
# Get system architecture
if(${CMAKE_SYSTEM_PROCESSOR} MATCHES i686*)
set(node_arch x86)
@ -23,7 +33,6 @@ if(${node_arch} MATCHES unknown)
set(node_arch x86)
endif()
# Copy tools directory for out-of-source build
string(COMPARE EQUAL $(PROJECT_BINARY_DIR) ${PROJECT_SOURCE_DIR} in_source_build)
if(NOT ${in_source_build})
@ -37,7 +46,7 @@ set(CMAKE_CXX_FLAGS_DEBUG "-O0 -Wall -g -Wextra -DDEBUG $ENV{CXXFLAGS}")
set(CMAKE_C_FLAGS_RELEASE "-g -O3 -DNDEBUG $ENV{CFLAGS}")
set(CMAKE_CXX_FLAGS_RELEASE "-g -O3 -DNDEBUG $ENV{CXXFLAGS}")
if(NOT ${node_platform} MATCHES "Windows")
if(NOT ${node_platform} MATCHES windows)
add_definitions(-D__POSIX__=1)
endif()
@ -52,21 +61,35 @@ endif()
if(${node_platform} MATCHES darwin)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -framework Carbon")
else()
# OSX fdatasync() check wrong: http://public.kitware.com/Bug/view.php?id=10044
check_function_exists(fdatasync HAVE_FDATASYNC)
endif()
check_function_exists(fdatasync HAVE_FDATASYNC)
if(HAVE_FDATASYNC)
add_definitions(-DHAVE_FDATASYNC=1)
else()
add_definitions(-DHAVE_FDATASYNC=0)
endif()
if(DTRACE)
if(NOT ${node_platform} MATCHES sunos)
message(FATAL_ERROR "DTrace support only currently available on Solaris")
endif()
find_program(dtrace_bin dtrace)
if(NOT dtrace_bin)
message(FATAL_ERROR "DTrace binary not found")
endif()
add_definitions(-DHAVE_DTRACE=1)
endif()
add_definitions(
-DPLATFORM="${node_platform}"
-DX_STACKSIZE=65536
-D_LARGEFILE_SOURCE
-D_FILE_OFFSET_BITS=64
-DEV_MULTIPLICITY=0
-D_FORTIFY_SOURCE=2
)
# set the exec output path to be compatible with the current waf build system

54
cmake/node_build.cmake

@ -2,6 +2,32 @@
# node build stuff
#
set(macros_file ${PROJECT_BINARY_DIR}/macros.py)
# replace debug(x) and assert(x) with nothing in release build
if(${CMAKE_BUILD_TYPE} MATCHES Release)
file(APPEND ${macros_file} "macro debug(x) = ;\n")
file(APPEND ${macros_file} "macro assert(x) = ;\n")
endif()
if(NOT DTRACE)
set(dtrace_probes
DTRACE_HTTP_CLIENT_REQUEST
DTRACE_HTTP_CLIENT_RESPONSE
DTRACE_HTTP_SERVER_REQUEST
DTRACE_HTTP_SERVER_RESPONSE
DTRACE_NET_SERVER_CONNECTION
DTRACE_NET_STREAM_END
DTRACE_NET_SOCKET_READ
DTRACE_NET_SOCKET_WRITE)
foreach(probe ${dtrace_probes})
file(APPEND ${macros_file} "macro ${probe}(x) = ;\n")
endforeach()
endif()
# include macros file in generation
set(js2c_files ${js2c_files} ${macros_file})
add_custom_command(
OUTPUT ${PROJECT_BINARY_DIR}/src/node_natives.h
COMMAND ${PYTHON_EXECUTABLE} tools/js2c.py ${PROJECT_BINARY_DIR}/src/node_natives.h ${js2c_files}
@ -48,13 +74,10 @@ else()
endif()
get_directory_property(compile_defs COMPILE_DEFINITIONS)
foreach(def ${compile_defs})
# escape " in CPPFLAGS (-DPLATFORM="${node_platform}" would fuck stuff up
# otherwise)
string(REPLACE "\"" "\\\"" def ${def})
set(CPPFLAGS "${CPPFLAGS} -D${def}")
endforeach()
configure_file(src/node_config.h.in ${PROJECT_BINARY_DIR}/src/node_config.h)
configure_file(src/node_config.h.in ${PROJECT_BINARY_DIR}/src/node_config.h ESCAPE_QUOTES)
configure_file(config.h.cmake ${PROJECT_BINARY_DIR}/config.h)
include_directories(
@ -69,6 +92,15 @@ include_directories(
${PROJECT_BINARY_DIR}/src
)
if(DTRACE)
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/src/node_provider.h
COMMAND ${dtrace_bin} -x nolibs -h -o ${PROJECT_BINARY_DIR}/src/node_provider.h -s ${PROJECT_SOURCE_DIR}/src/node_provider.d
DEPENDS ${PROJECT_SOURCE_DIR}/src/node_provider.d)
set(node_sources ${node_sources} src/node_provider.o)
set(node_sources src/node_provider.h ${node_sources})
endif()
add_executable(node ${node_sources})
set_target_properties(node PROPERTIES DEBUG_POSTFIX "_g")
target_link_libraries(node
@ -80,6 +112,20 @@ target_link_libraries(node
${CMAKE_THREAD_LIBS_INIT}
${extra_libs})
if(DTRACE)
# manually gather up the object files for dtrace
get_property(sourcefiles TARGET node PROPERTY SOURCES)
foreach(src_file ${sourcefiles})
if(src_file MATCHES ".*\\.cc$")
set(node_objs ${node_objs} ${PROJECT_BINARY_DIR}/CMakeFiles/node.dir/${src_file}.o)
endif()
endforeach()
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/src/node_provider.o
#COMMAND cmake -E echo ${node_objs}
COMMAND ${dtrace_bin} -G -x nolibs -s ${PROJECT_SOURCE_DIR}/src/node_provider.d -o ${PROJECT_BINARY_DIR}/src/node_provider.o ${node_objs}
DEPENDS ${node_objs})
endif()
install(TARGETS node RUNTIME DESTINATION bin)
install(FILES

18
cmake/v8_build.cmake

@ -14,7 +14,15 @@ endif()
if(NOT SHARED_V8)
if(V8_SNAPSHOT)
set(v8snapshot snapshot=on)
set(v8_snapshot snapshot=on)
endif()
if(V8_OPROFILE)
set(v8_oprofile prof=oprofile)
endif()
if(V8_GDBJIT)
set(v8_gdbjit gdbjit=on)
endif()
if(${node_platform} MATCHES darwin)
@ -40,6 +48,9 @@ if(NOT SHARED_V8)
set(compile_env_vars "CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} AR=${CMAKE_AR} RANLIB=${CMAKE_RANLIB} CFLAGS=\"${CMAKE_C_FLAGS}\" CXXFLAGS=\"${CMAKE_CXX_FLAGS}\" LDFLAGS=\"${CMAKE_EXE_LINKER_FLAGS}\"")
set(compile_cmd "${compile_env_vars} ${PYTHON_EXECUTABLE} ${PROJECT_BINARY_DIR}/tools/scons/scons.py -j ${parallel_jobs} visibility=default mode=${v8mode} arch=${v8arch} library=static ${v8_snapshot} ${v8_oprofile} ${v8_gdbjit} verbose=on")
if(CMAKE_VERSION VERSION_GREATER 2.8 OR CMAKE_VERSION VERSION_EQUAL 2.8)
# use ExternalProject for CMake >2.8
include(ExternalProject)
@ -48,8 +59,7 @@ if(NOT SHARED_V8)
URL ${PROJECT_SOURCE_DIR}/deps/v8
BUILD_IN_SOURCE True
BUILD_COMMAND sh -c "${compile_env_vars} ${PYTHON_EXECUTABLE} ${PROJECT_BINARY_DIR}/tools/scons/scons.py library=static visibility=default ${v8snapshot} mode=${v8mode} verbose=on arch=${v8arch} -j ${parallel_jobs}"
BUILD_COMMAND sh -c "${compile_cmd}"
SOURCE_DIR ${PROJECT_BINARY_DIR}/deps/v8
# ignore this stuff, it's not needed for building v8 but ExternalProject
# demands these steps
@ -79,7 +89,7 @@ if(NOT SHARED_V8)
add_custom_command(
OUTPUT ${PROJECT_BINARY_DIR}/deps/v8/${v8_fn}
COMMAND sh -c "${compile_env_vars} ${PYTHON_EXECUTABLE} ${PROJECT_BINARY_DIR}/tools/scons/scons.py library=static visibility=default ${v8snapshot} mode=${v8mode} verbose=on arch=${v8arch} -j ${parallel_jobs}"
COMMAND sh -c "${compile_cmd}"
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/deps/v8/
DEPENDS ${v8_sources_dest}
)

9
deps/libev/Changes

@ -1,5 +1,14 @@
Revision history for libev, a high-performance and full-featured event loop.
4.04 Wed Feb 16 09:01:51 CET 2011
- fix two problems in the native win32 backend, where reuse of fd's
with different underlying handles caused handles not to be removed
or added to the select set (analyzed and tested by Bert Belder).
- do no rely on ceil() in ev_e?poll.c.
- backport libev to HP-UX versions before 11 v3.
- configure did not detect nanosleep and clock_gettime properly when
they are available in the libc (as opposed to -lrt).
4.03 Tue Jan 11 14:37:25 CET 2011
- officially support polling files with all backends.
- support files, /dev/zero etc. the same way as select in the epoll

2
deps/libev/configure.ac

@ -1,7 +1,7 @@
AC_INIT
AC_CONFIG_SRCDIR([ev_epoll.c])
AM_INIT_AUTOMAKE(libev,4.03) dnl also update ev.h!
AM_INIT_AUTOMAKE(libev,4.04) dnl also update ev.h!
AC_CONFIG_HEADERS([config.h])
AM_MAINTAINER_MODE

6
deps/libev/ev++.h

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO

41
deps/libev/ev.3

@ -124,7 +124,7 @@
.\" ========================================================================
.\"
.IX Title "LIBEV 3"
.TH LIBEV 3 "2011-01-11" "libev-4.03" "libev - high performance full featured event loop"
.TH LIBEV 3 "2011-02-16" "libev-4.04" "libev - high performance full featured event loop"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@ -191,7 +191,7 @@ libev \- a high performance full\-featured event loop written in C
\& // now wait for events to arrive
\& ev_run (loop, 0);
\&
\& // unloop was called, so exit
\& // break was called, so exit
\& return 0;
\& }
.Ve
@ -562,6 +562,9 @@ This behaviour is useful when you want to do your own signal handling, or
want to handle signals only in specific threads and want to avoid libev
unblocking the signals.
.Sp
It's also required by \s-1POSIX\s0 in a threaded program, as libev calls
\&\f(CW\*(C`sigprocmask\*(C'\fR, whose behaviour is officially unspecified.
.Sp
This flag's behaviour will become the default in future versions of libev.
.ie n .IP """EVBACKEND_SELECT"" (value 1, portable select backend)" 4
.el .IP "\f(CWEVBACKEND_SELECT\fR (value 1, portable select backend)" 4
@ -987,7 +990,7 @@ anymore.
\& ... queue jobs here, make sure they register event watchers as long
\& ... as they still have work to do (even an idle watcher will do..)
\& ev_run (my_loop, 0);
\& ... jobs done or somebody called unloop. yeah!
\& ... jobs done or somebody called break. yeah!
.Ve
.IP "ev_break (loop, how)" 4
.IX Item "ev_break (loop, how)"
@ -1492,8 +1495,10 @@ Before a watcher can be registered with the event looop it has to be
initialised. This can be done with a call to \f(CW\*(C`ev_TYPE_init\*(C'\fR, or calls to
\&\f(CW\*(C`ev_init\*(C'\fR followed by the watcher-specific \f(CW\*(C`ev_TYPE_set\*(C'\fR function.
.Sp
In this state it is simply some block of memory that is suitable for use
in an event loop. It can be moved around, freed, reused etc. at will.
In this state it is simply some block of memory that is suitable for
use in an event loop. It can be moved around, freed, reused etc. at
will \- as long as you either keep the memory contents intact, or call
\&\f(CW\*(C`ev_TYPE_init\*(C'\fR again.
.IP "started/running/active" 4
.IX Item "started/running/active"
Once a watcher has been started with a call to \f(CW\*(C`ev_TYPE_start\*(C'\fR it becomes
@ -1528,8 +1533,9 @@ of whether it was active or not, so stopping a watcher explicitly before
freeing it is often a good idea.
.Sp
While stopped (and not pending) the watcher is essentially in the
initialised state, that is it can be reused, moved, modified in any way
you wish.
initialised state, that is, it can be reused, moved, modified in any way
you wish (but when you trash the memory block, you need to \f(CW\*(C`ev_TYPE_init\*(C'\fR
it again).
.SS "\s-1WATCHER\s0 \s-1PRIORITY\s0 \s-1MODELS\s0"
.IX Subsection "WATCHER PRIORITY MODELS"
Many event loops support \fIwatcher priorities\fR, which are usually small
@ -2431,7 +2437,8 @@ and unblock them in an \f(CW\*(C`ev_prepare\*(C'\fR watcher.
Both the signal mask (\f(CW\*(C`sigprocmask\*(C'\fR) and the signal disposition
(\f(CW\*(C`sigaction\*(C'\fR) are unspecified after starting a signal watcher (and after
stopping it again), that is, libev might or might not block the signal,
and might or might not set or restore the installed signal handler.
and might or might not set or restore the installed signal handler (but
see \f(CW\*(C`EVFLAG_NOSIGMASK\*(C'\fR).
.PP
While this does not matter for the signal disposition (libev never
sets signals to \f(CW\*(C`SIG_IGN\*(C'\fR, so handlers will be reset to \f(CW\*(C`SIG_DFL\*(C'\fR on
@ -3309,7 +3316,7 @@ cleanup functions are called.
.ie n .SS """ev_async"" \- how to wake up an event loop"
.el .SS "\f(CWev_async\fP \- how to wake up an event loop"
.IX Subsection "ev_async - how to wake up an event loop"
In general, you cannot use an \f(CW\*(C`ev_run\*(C'\fR from multiple threads or other
In general, you cannot use an \f(CW\*(C`ev_loop\*(C'\fR from multiple threads or other
asynchronous sources such as signal handlers (as opposed to multiple event
loops \- those are of course safe to use in different threads).
.PP
@ -3424,10 +3431,12 @@ trust me.
.IP "ev_async_send (loop, ev_async *)" 4
.IX Item "ev_async_send (loop, ev_async *)"
Sends/signals/activates the given \f(CW\*(C`ev_async\*(C'\fR watcher, that is, feeds
an \f(CW\*(C`EV_ASYNC\*(C'\fR event on the watcher into the event loop. Unlike
\&\f(CW\*(C`ev_feed_event\*(C'\fR, this call is safe to do from other threads, signal or
similar contexts (see the discussion of \f(CW\*(C`EV_ATOMIC_T\*(C'\fR in the embedding
section below on what exactly this means).
an \f(CW\*(C`EV_ASYNC\*(C'\fR event on the watcher into the event loop, and instantly
returns.
.Sp
Unlike \f(CW\*(C`ev_feed_event\*(C'\fR, this call is safe to do from other threads,
signal or similar contexts (see the discussion of \f(CW\*(C`EV_ATOMIC_T\*(C'\fR in the
embedding section below on what exactly this means).
.Sp
Note that, as with other watchers in libev, multiple events might get
compressed into a single callback invocation (another way to look at this
@ -3660,7 +3669,7 @@ First, you need to associate some data with the event loop:
\& ev_set_invoke_pending_cb (EV_A_ l_invoke);
\& ev_set_loop_release_cb (EV_A_ l_release, l_acquire);
\&
\& // then create the thread running ev_loop
\& // then create the thread running ev_run
\& pthread_create (&u\->tid, 0, l_run, EV_A);
\& }
.Ve
@ -5290,7 +5299,7 @@ The physical time that is observed. It is apparently strictly monotonic :)
.IP "wall-clock time" 4
.IX Item "wall-clock time"
The time and date as shown on clocks. Unlike real time, it can actually
be wrong and jump forwards and backwards, e.g. when the you adjust your
be wrong and jump forwards and backwards, e.g. when you adjust your
clock.
.IP "watcher" 4
.IX Item "watcher"
@ -5299,4 +5308,4 @@ to be started (attached to an event loop) before they can receive events.
.SH "AUTHOR"
.IX Header "AUTHOR"
Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael
Magnusson and Emanuele Giaquinta.
Magnusson and Emanuele Giaquinta, and minor corrections by many others.

40
deps/libev/ev.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -378,7 +378,8 @@ EV_CPP(extern "C" {)
#endif
#if !EV_USE_NANOSLEEP
# ifndef _WIN32
/* hp-ux has it in sys/time.h, which we unconditionally include above */
# if !defined(_WIN32) && !defined(__hpux)
# include <sys/select.h>
# endif
#endif
@ -817,6 +818,14 @@ ev_sleep (ev_tstamp delay)
}
}
inline_speed int
ev_timeout_to_ms (ev_tstamp timeout)
{
int ms = timeout * 1000. + .999999;
return expect_true (ms) ? ms : timeout < 1e-6 ? 0 : 1;
}
/*****************************************************************************/
#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
@ -1363,14 +1372,16 @@ pipecb (EV_P_ ev_io *iow, int revents)
#endif
}
#if EV_SIGNAL_ENABLE
if (sig_pending)
{
{
sig_pending = 0;
for (i = EV_NSIG - 1; i--; )
if (expect_false (signals [i].pending))
ev_feed_signal_event (EV_A_ i + 1);
}
#endif
#if EV_ASYNC_ENABLE
if (async_pending)
@ -2201,6 +2212,15 @@ timers_reify (EV_P)
}
#if EV_PERIODIC_ENABLE
inline_speed void
periodic_recalc (EV_P_ ev_periodic *w)
{
/* TODO: use slow but potentially more correct incremental algo, */
/* also do not rely on ceil */
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
}
/* make periodics pending */
inline_size void
periodics_reify (EV_P)
@ -2229,7 +2249,8 @@ periodics_reify (EV_P)
}
else if (w->interval)
{
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
periodic_recalc (EV_A_ w);
/* if next trigger time is not sufficiently in the future, put it there */
/* this might happen because of floating point inexactness */
if (ev_at (w) - ev_rt_now < TIME_EPSILON)
@ -2273,7 +2294,7 @@ periodics_reschedule (EV_P)
if (w->reschedule_cb)
ev_at (w) = w->reschedule_cb (w, ev_rt_now);
else if (w->interval)
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
periodic_recalc (EV_A_ w);
ANHE_at_cache (periodics [i]);
}
@ -2775,8 +2796,7 @@ ev_periodic_start (EV_P_ ev_periodic *w)
else if (w->interval)
{
assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
/* this formula differs from the one in periodic_reify because we do not always round up */
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
periodic_recalc (EV_A_ w);
}
else
ev_at (w) = w->offset;
@ -3063,7 +3083,7 @@ infy_add (EV_P_ ev_stat *w)
*pend = 0;
w->wd = inotify_add_watch (fs_fd, path, mask);
}
}
while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
}
}

10
deps/libev/ev.h

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -185,7 +185,7 @@ struct ev_loop;
# define EV_INLINE static
#endif
/* EV_PROTOTYPES can be sued to switch of prototype declarations */
/* EV_PROTOTYPES can be used to switch of prototype declarations */
#ifndef EV_PROTOTYPES
# define EV_PROTOTYPES 1
#endif
@ -193,7 +193,7 @@ struct ev_loop;
/*****************************************************************************/
#define EV_VERSION_MAJOR 4
#define EV_VERSION_MINOR 3
#define EV_VERSION_MINOR 4
/* eventmask, revents, events... */
enum {

39
deps/libev/ev.pod

@ -60,7 +60,7 @@ libev - a high performance full-featured event loop written in C
// now wait for events to arrive
ev_run (loop, 0);
// unloop was called, so exit
// break was called, so exit
return 0;
}
@ -444,6 +444,9 @@ This behaviour is useful when you want to do your own signal handling, or
want to handle signals only in specific threads and want to avoid libev
unblocking the signals.
It's also required by POSIX in a threaded program, as libev calls
C<sigprocmask>, whose behaviour is officially unspecified.
This flag's behaviour will become the default in future versions of libev.
=item C<EVBACKEND_SELECT> (value 1, portable select backend)
@ -867,7 +870,7 @@ anymore.
... queue jobs here, make sure they register event watchers as long
... as they still have work to do (even an idle watcher will do..)
ev_run (my_loop, 0);
... jobs done or somebody called unloop. yeah!
... jobs done or somebody called break. yeah!
=item ev_break (loop, how)
@ -1377,8 +1380,10 @@ Before a watcher can be registered with the event looop it has to be
initialised. This can be done with a call to C<ev_TYPE_init>, or calls to
C<ev_init> followed by the watcher-specific C<ev_TYPE_set> function.
In this state it is simply some block of memory that is suitable for use
in an event loop. It can be moved around, freed, reused etc. at will.
In this state it is simply some block of memory that is suitable for
use in an event loop. It can be moved around, freed, reused etc. at
will - as long as you either keep the memory contents intact, or call
C<ev_TYPE_init> again.
=item started/running/active
@ -1416,8 +1421,9 @@ of whether it was active or not, so stopping a watcher explicitly before
freeing it is often a good idea.
While stopped (and not pending) the watcher is essentially in the
initialised state, that is it can be reused, moved, modified in any way
you wish.
initialised state, that is, it can be reused, moved, modified in any way
you wish (but when you trash the memory block, you need to C<ev_TYPE_init>
it again).
=back
@ -2302,7 +2308,8 @@ and unblock them in an C<ev_prepare> watcher.
Both the signal mask (C<sigprocmask>) and the signal disposition
(C<sigaction>) are unspecified after starting a signal watcher (and after
stopping it again), that is, libev might or might not block the signal,
and might or might not set or restore the installed signal handler.
and might or might not set or restore the installed signal handler (but
see C<EVFLAG_NOSIGMASK>).
While this does not matter for the signal disposition (libev never
sets signals to C<SIG_IGN>, so handlers will be reset to C<SIG_DFL> on
@ -3183,7 +3190,7 @@ cleanup functions are called.
=head2 C<ev_async> - how to wake up an event loop
In general, you cannot use an C<ev_run> from multiple threads or other
In general, you cannot use an C<ev_loop> from multiple threads or other
asynchronous sources such as signal handlers (as opposed to multiple event
loops - those are of course safe to use in different threads).
@ -3303,10 +3310,12 @@ trust me.
=item ev_async_send (loop, ev_async *)
Sends/signals/activates the given C<ev_async> watcher, that is, feeds
an C<EV_ASYNC> event on the watcher into the event loop. Unlike
C<ev_feed_event>, this call is safe to do from other threads, signal or
similar contexts (see the discussion of C<EV_ATOMIC_T> in the embedding
section below on what exactly this means).
an C<EV_ASYNC> event on the watcher into the event loop, and instantly
returns.
Unlike C<ev_feed_event>, this call is safe to do from other threads,
signal or similar contexts (see the discussion of C<EV_ATOMIC_T> in the
embedding section below on what exactly this means).
Note that, as with other watchers in libev, multiple events might get
compressed into a single callback invocation (another way to look at this
@ -3542,7 +3551,7 @@ First, you need to associate some data with the event loop:
ev_set_invoke_pending_cb (EV_A_ l_invoke);
ev_set_loop_release_cb (EV_A_ l_release, l_acquire);
// then create the thread running ev_loop
// then create the thread running ev_run
pthread_create (&u->tid, 0, l_run, EV_A);
}
@ -5217,7 +5226,7 @@ The physical time that is observed. It is apparently strictly monotonic :)
=item wall-clock time
The time and date as shown on clocks. Unlike real time, it can actually
be wrong and jump forwards and backwards, e.g. when the you adjust your
be wrong and jump forwards and backwards, e.g. when you adjust your
clock.
=item watcher
@ -5230,5 +5239,5 @@ to be started (attached to an event loop) before they can receive events.
=head1 AUTHOR
Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael
Magnusson and Emanuele Giaquinta.
Magnusson and Emanuele Giaquinta, and minor corrections by many others.

8
deps/libev/ev_epoll.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -148,7 +148,7 @@ epoll_poll (EV_P_ ev_tstamp timeout)
/* the default libev max wait time, however. */
EV_RELEASE_CB;
eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax,
epoll_epermcnt ? 0 : (int)ceil (timeout * 1000.));
epoll_epermcnt ? 0 : ev_timeout_to_ms (timeout));
EV_ACQUIRE_CB;
if (expect_false (eventcnt < 0))

10
deps/libev/ev_kqueue.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -103,12 +103,12 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
kqueue_changecnt = 0;
if (expect_false (res < 0))
{
{
if (errno != EINTR)
ev_syserr ("(libev) kevent");
return;
}
}
for (i = 0; i < res; ++i)
{

10
deps/libev/ev_poll.c

@ -1,19 +1,19 @@
/*
* libev poll fd activity backend
*
* Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -92,7 +92,7 @@ poll_poll (EV_P_ ev_tstamp timeout)
int res;
EV_RELEASE_CB;
res = poll (polls, pollcnt, (int)ceil (timeout * 1000.));
res = poll (polls, pollcnt, ev_timeout_to_ms (timeout));
EV_ACQUIRE_CB;
if (expect_false (res < 0))

8
deps/libev/ev_port.c

@ -1,19 +1,19 @@
/*
* libev solaris event port backend
*
* Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO

19
deps/libev/ev_select.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -39,8 +39,11 @@
#ifndef _WIN32
/* for unix systems */
# include <sys/select.h>
# include <inttypes.h>
# ifndef __hpux
/* for REAL unix systems */
# include <sys/select.h>
# endif
#endif
#ifndef EV_SELECT_USE_FD_SET
@ -280,10 +283,10 @@ select_init (EV_P_ int flags)
#endif
#else
vec_max = 0;
vec_ri = 0;
vec_ro = 0;
vec_wi = 0;
vec_wo = 0;
vec_ri = 0;
vec_ro = 0;
vec_wi = 0;
vec_wo = 0;
#ifdef _WIN32
vec_eo = 0;
#endif

6
deps/libev/ev_vars.h

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO

12
deps/libev/ev_win32.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
@ -46,7 +46,7 @@
/* MSDN says this is required to handle SIGFPE */
/* my wild guess would be that using something floating-pointy is required */
/* for the crt to do something about it */
volatile double SIGFPE_REQ = 0.0f;
volatile double SIGFPE_REQ = 0.0f;
/* oh, the humanity! */
static int
@ -59,7 +59,7 @@ ev_pipe (int filedes [2])
SOCKET listener;
SOCKET sock [2] = { -1, -1 };
if ((listener = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
if ((listener = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
return -1;
addr.sin_family = AF_INET;
@ -75,7 +75,7 @@ ev_pipe (int filedes [2])
if (listen (listener, 1))
goto fail;
if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
goto fail;
if (connect (sock [0], (struct sockaddr *)&addr, addr_size))

6
deps/libev/event.c

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO

6
deps/libev/event.h

@ -6,14 +6,14 @@
*
* Redistribution and use in source and binary forms, with or without modifica-
* tion, are permitted provided that the following conditions are met:
*
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
* CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO

4
deps/libev/libev.m4

@ -6,7 +6,7 @@ AC_CHECK_HEADERS(sys/inotify.h sys/epoll.h sys/event.h port.h poll.h sys/select.
AC_CHECK_FUNCS(inotify_init epoll_ctl kqueue port_create poll select eventfd signalfd)
AC_CHECK_FUNC(clock_gettime, [], [
AC_CHECK_FUNCS(clock_gettime, [], [
dnl on linux, try syscall wrapper first
if test $(uname) = Linux; then
AC_MSG_CHECKING(for clock_gettime syscall)
@ -27,7 +27,7 @@ AC_CHECK_FUNC(clock_gettime, [], [
fi
])
AC_CHECK_FUNC(nanosleep, [], [
AC_CHECK_FUNCS(nanosleep, [], [
if test -z "$LIBEV_M4_AVOID_LIBRT"; then
AC_CHECK_LIB(rt, nanosleep)
unset ac_cv_func_nanosleep

2
deps/v8/.gitignore

@ -20,6 +20,8 @@ d8_g
shell
shell_g
/obj/
/test/es5conform/data/
/test/mozilla/data/
/test/sputnik/sputniktests/
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o

1
deps/v8/AUTHORS

@ -9,6 +9,7 @@ ARM Ltd.
Hewlett-Packard Development Company, LP
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Bert Belder <bertbelder@gmail.com>

65
deps/v8/ChangeLog

@ -1,3 +1,64 @@
2011-02-16: Version 3.1.5
Change RegExp parsing to disallow /(*)/.
Added GDB JIT support for ARM.
Fixed several crash bugs.
Performance improvements on the IA32 platform.
2011-02-14: Version 3.1.4
Fixed incorrect compare of prototypes of the global object (issue
1082).
Fixed a bug in optimizing calls to global functions (issue 1106).
Made optimized Function.prototype.apply safe for non-JSObject first
arguments (issue 1128).
Fixed an error related to element accessors on Object.prototype and
parser errors (issue 1130).
Fixed a bug in sorting an array with large array indices (issue 1131).
Properly treat exceptions thrown while compiling (issue 1132).
Fixed bug in register requirements for function.apply (issue 1133).
Fixed a representation change bug in the Hydrogen graph construction
(issue 1134).
Fixed the semantics of delete on parameters (issue 1136).
Fixed a optimizer bug related to moving instructions with side effects
(issue 1138).
Added support for the global object in Object.keys (issue 1150).
Fixed incorrect value for Math.LOG10E
(issue http://code.google.com/p/chromium/issues/detail?id=72555)
Performance improvements on the IA32 platform.
Implement assignment to undefined reference in ES5 Strict Mode.
2011-02-09: Version 3.1.3
Fixed a bug triggered by functions with huge numbers of declared
arguments.
Fixed zap value aliasing a real object - debug mode only (issue 866).
Fixed issue where Array.prototype.__proto__ had been set to null
(issue 1121).
Fixed stability bugs in Crankshaft for x86.
2011-02-07: Version 3.1.2
Added better security checks when accessing properties via
@ -56,8 +117,8 @@
Introduced partial strict mode support.
Changed formatting of recursive error messages to match Firefox and Safari
(issue http://crbug.com/70334).
Changed formatting of recursive error messages to match Firefox and
Safari (issue http://crbug.com/70334).
Fixed incorrect rounding for float-to-integer conversions for external
array types, which implement the Typed Array spec

143
deps/v8/SConstruct

@ -27,6 +27,7 @@
import platform
import re
import subprocess
import sys
import os
from os.path import join, dirname, abspath
@ -136,12 +137,15 @@ LIBRARY_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions', '-fno-builtin-memcpy'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'visibility:hidden': {
# Use visibility=default to disable this.
'CXXFLAGS': ['-fvisibility=hidden']
},
'strictaliasing:off': {
'CCFLAGS': ['-fno-strict-aliasing']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
@ -188,9 +192,6 @@ LIBRARY_FLAGS = {
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
'os:cygwin': {
'WARNINGFLAGS': ['-Werror'],
},
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
@ -237,9 +238,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
},
'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
}
@ -541,10 +539,6 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'prof:oprofile': {
'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
'LIBS': ['opagent']
}
},
'msvc': {
'all': {
@ -661,8 +655,16 @@ def Abort(message):
sys.exit(1)
def GuessToolchain(os):
tools = Environment()['TOOLS']
def GuessOS(env):
return utils.GuessOS()
def GuessArch(env):
return utils.GuessArchitecture()
def GuessToolchain(env):
tools = env['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
@ -671,9 +673,11 @@ def GuessToolchain(os):
return None
def GuessVisibility(os, toolchain):
if os == 'win32' and toolchain == 'gcc':
# MinGW can't do it.
def GuessVisibility(env):
os = env['os']
toolchain = env['toolchain'];
if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
# MinGW / Cygwin can't do it.
return 'default'
elif os == 'solaris':
return 'default'
@ -681,27 +685,35 @@ def GuessVisibility(os, toolchain):
return 'hidden'
OS_GUESS = utils.GuessOS()
TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
ARCH_GUESS = utils.GuessArchitecture()
VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
def GuessStrictAliasing(env):
# There seems to be a problem with gcc 4.5.x
# see http://code.google.com/p/v8/issues/detail?id=884
# it can be worked around by disabling strict aliasing
toolchain = env['toolchain'];
if toolchain == 'gcc':
env = Environment(tools=['gcc'])
version = subprocess.Popen([env['CC'], '-dumpversion'],
stdout=subprocess.PIPE).communicate()[0]
if version.find('4.5.') == 0:
return 'off'
return 'default'
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
'guess': GuessToolchain,
'help': 'the toolchain to use'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'default': OS_GUESS,
'help': 'the os to build for (%s)' % OS_GUESS
'guess': GuessOS,
'help': 'the os to build for'
},
'arch': {
'values':['arm', 'ia32', 'x64', 'mips'],
'default': ARCH_GUESS,
'help': 'the architecture to build for (%s)' % ARCH_GUESS
'guess': GuessArch,
'help': 'the architecture to build for'
},
'regexp': {
'values': ['native', 'interpreted'],
@ -714,7 +726,7 @@ SIMPLE_OPTIONS = {
'help': 'build using snapshots for faster start-up'
},
'prof': {
'values': ['on', 'off', 'oprofile'],
'values': ['on', 'off'],
'default': 'off',
'help': 'enable profiling of build target'
},
@ -810,8 +822,15 @@ SIMPLE_OPTIONS = {
},
'visibility': {
'values': ['default', 'hidden'],
'default': VISIBILITY_GUESS,
'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
'guess': GuessVisibility,
'depends': ['os', 'toolchain'],
'help': 'shared library symbol visibility'
},
'strictaliasing': {
'values': ['default', 'off'],
'guess': GuessStrictAliasing,
'depends': ['toolchain'],
'help': 'assume strict aliasing while optimizing'
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
@ -821,6 +840,26 @@ SIMPLE_OPTIONS = {
}
def AddOption(result, name, option):
if 'guess' in option:
# Option has a guess function
guess = option.get('guess')
guess_env = Environment(options=result)
# Check if all options that the guess function depends on are set
if 'depends' in option:
for dependency in option.get('depends'):
if not dependency in guess_env:
return False
default = guess(guess_env)
else:
# Option has a fixed default
default = option.get('default')
help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
result.Add(name, help, default)
return True
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
@ -828,12 +867,28 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
help = '%s (%s)' % (name, ", ".join(option['values']))
result.Add(name, help, option.get('default'))
options = SIMPLE_OPTIONS
while len(options):
postpone = {}
for (name, option) in options.iteritems():
if not AddOption(result, name, option):
postpone[name] = option
options = postpone
return result
def GetTools(opts):
env = Environment(options=opts)
os = env['os']
toolchain = env['toolchain']
if os == 'win32' and toolchain == 'gcc':
return ['mingw']
elif os == 'win32' and toolchain == 'msvc':
return ['msvc', 'mslink', 'mslib', 'msvs']
else:
return ['default']
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
@ -899,10 +954,8 @@ def VerifyOptions(env):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64')):
if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
@ -916,7 +969,7 @@ def VerifyOptions(env):
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
if (not name in env):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
@ -1044,7 +1097,7 @@ def ParseEnvOverrides(arg, imports):
return overrides
def BuildSpecific(env, mode, env_overrides):
def BuildSpecific(env, mode, env_overrides, tools):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
@ -1097,7 +1150,7 @@ def BuildSpecific(env, mode, env_overrides):
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
exports='context',
exports='context tools',
duplicate=False
)
@ -1117,21 +1170,21 @@ def BuildSpecific(env, mode, env_overrides):
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
d8_env = Environment()
d8_env = Environment(tools=tools)
d8_env.Replace(**context.flags['d8'])
context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
sample_env = Environment()
sample_env = Environment(tools=tools)
sample_env.Replace(**context.flags['sample'])
sample_env.Prepend(LIBS=[library_name])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
exports='sample context',
exports='sample context tools',
duplicate=False
)
sample_name = sample + suffix
@ -1144,7 +1197,7 @@ def BuildSpecific(env, mode, env_overrides):
cctest_program = cctest_env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
exports='context object_files',
exports='context object_files tools',
duplicate=False
)
context.cctest_targets.append(cctest_program)
@ -1154,7 +1207,9 @@ def BuildSpecific(env, mode, env_overrides):
def Build():
opts = GetOptions()
env = Environment(options=opts)
tools = GetTools(opts)
env = Environment(options=opts, tools=tools)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
@ -1168,7 +1223,7 @@ def Build():
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
context = BuildSpecific(env.Copy(), mode, env_overrides)
context = BuildSpecific(env.Copy(), mode, env_overrides, tools)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets

1
deps/v8/include/v8.h

@ -462,7 +462,6 @@ class V8EXPORT HandleScope {
void Leave();
internal::Object** prev_next_;
internal::Object** prev_limit_;

3
deps/v8/samples/shell.cc

@ -27,6 +27,7 @@
#include <v8.h>
#include <v8-testing.h>
#include <assert.h>
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
@ -290,11 +291,13 @@ bool ExecuteString(v8::Handle<v8::String> source,
} else {
v8::Handle<v8::Value> result = script->Run();
if (result.IsEmpty()) {
assert(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions)
ReportException(&try_catch);
return false;
} else {
assert(!try_catch.HasCaught());
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.

7
deps/v8/src/SConscript

@ -31,6 +31,7 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c
Import('context')
Import('tools')
SOURCES = {
@ -97,7 +98,6 @@ SOURCES = {
objects.cc
objects-printer.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
preparser.cc
preparse-data.cc
@ -266,6 +266,9 @@ D8_FILES = {
'os:solaris': [
'd8-posix.cc'
],
'os:cygwin': [
'd8-posix.cc'
],
'os:win32': [
'd8-windows.cc'
],
@ -302,7 +305,7 @@ def Abort(message):
def ConfigureObjectFiles():
env = Environment()
env = Environment(tools=tools)
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)

1
deps/v8/src/accessors.cc

@ -447,6 +447,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!function->has_prototype()) {
if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;

68
deps/v8/src/api.cc

@ -115,7 +115,9 @@ static FatalErrorCallback exception_behavior = NULL;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
ENTER_V8;
#ifdef ENABLE_VMSTATE_TRACKING
i::VMState __state__(i::OTHER);
#endif
API_Fatal(location, message);
}
@ -668,7 +670,7 @@ static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
v8::PropertyAttribute attribute) {
if (IsDeadCheck("v8::Template::SetProperty()")) return;
if (IsDeadCheck("v8::Template::Set()")) return;
ENTER_V8;
HandleScope scope;
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
@ -2204,6 +2206,12 @@ bool Value::Equals(Handle<Value> that) const {
ENTER_V8;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
// If both obj and other are JSObjects, we'd better compare by identity
// immediately when going into JS builtin. The reason is Invoke
// would overwrite global object receiver with global proxy.
if (obj->IsJSObject() && other->IsJSObject()) {
return *obj == *other;
}
i::Object** args[1] = { other.location() };
EXCEPTION_PREAMBLE();
i::Handle<i::Object> result =
@ -2653,26 +2661,38 @@ int v8::Object::GetIdentityHash() {
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> hidden_props(i::GetHiddenProperties(self, true));
i::Handle<i::Object> hash_symbol = i::Factory::identity_hash_symbol();
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
int hash_value;
if (hash->IsSmi()) {
hash_value = i::Smi::cast(*hash)->value();
} else {
int attempts = 0;
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
i::SetProperty(hidden_props,
hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
static_cast<PropertyAttributes>(None));
i::Handle<i::Object> hidden_props_obj(i::GetHiddenProperties(self, true));
if (!hidden_props_obj->IsJSObject()) {
// We failed to create hidden properties. That's a detached
// global proxy.
ASSERT(hidden_props_obj->IsUndefined());
return 0;
}
i::Handle<i::JSObject> hidden_props =
i::Handle<i::JSObject>::cast(hidden_props_obj);
i::Handle<i::String> hash_symbol = i::Factory::identity_hash_symbol();
if (hidden_props->HasLocalProperty(*hash_symbol)) {
i::Handle<i::Object> hash = i::GetProperty(hidden_props, hash_symbol);
CHECK(!hash.is_null());
CHECK(hash->IsSmi());
return i::Smi::cast(*hash)->value();
}
int hash_value;
int attempts = 0;
do {
// Generate a random 32-bit hash value but limit range to fit
// within a smi.
hash_value = i::V8::Random() & i::Smi::kMaxValue;
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
CHECK(!i::SetLocalPropertyIgnoreAttributes(
hidden_props,
hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
static_cast<PropertyAttributes>(None)).is_null());
return hash_value;
}
@ -2749,9 +2769,9 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
return;
}
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
i::Handle<i::Map> slow_map =
i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*slow_map);
i::Handle<i::Map> pixel_array_map =
i::Factory::GetPixelArrayElementsMap(i::Handle<i::Map>(self->map()));
self->set_map(*pixel_array_map);
self->set_elements(*pixels);
}

2
deps/v8/src/arguments.h

@ -78,7 +78,7 @@ class Arguments BASE_EMBEDDED {
class CustomArguments : public Relocatable {
public:
inline CustomArguments(Object* data,
JSObject* self,
Object* self,
JSObject* holder) {
values_[2] = self;
values_[1] = holder;

4
deps/v8/src/arm/assembler-arm-inl.h

@ -198,6 +198,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -221,6 +223,8 @@ void RelocInfo::Visit() {
StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT

58
deps/v8/src/arm/assembler-arm.cc

@ -272,7 +272,6 @@ static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
// BUG(3245989): disable peephole optimization if crankshaft is enabled.
allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
@ -352,6 +351,11 @@ void Assembler::CodeTargetAlign() {
}
Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
@ -428,6 +432,20 @@ Register Assembler::GetRd(Instr instr) {
}
Register Assembler::GetRn(Instr instr) {
Register reg;
reg.code_ = Instruction::RnValue(instr);
return reg;
}
Register Assembler::GetRm(Instr instr) {
Register reg;
reg.code_ = Instruction::RmValue(instr);
return reg;
}
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
@ -465,6 +483,35 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
bool Assembler::IsTstImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | TST | S);
}
bool Assembler::IsCmpRegister(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
(CMP | S);
}
bool Assembler::IsCmpImmediate(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
(I | CMP | S);
}
Register Assembler::GetCmpImmediateRegister(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return GetRn(instr);
}
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
ASSERT(IsCmpImmediate(instr));
return instr & kOff12Mask;
}
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@ -1052,6 +1099,13 @@ void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
}
void Assembler::cmp_raw_immediate(
Register src, int raw_immediate, Condition cond) {
ASSERT(is_uint12(raw_immediate));
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
addrmod1(cond | CMN | S, src1, r0, src2);
}
@ -2363,7 +2417,7 @@ void Assembler::nop(int type) {
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
// Check for mov rx, rx where x = type.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
return instr == (al | 13*B21 | type*B12 | type);
}

9
deps/v8/src/arm/assembler-arm.h

@ -729,6 +729,7 @@ class Assembler : public Malloced {
void cmp(Register src1, Register src2, Condition cond = al) {
cmp(src1, Operand(src2), cond);
}
void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
void cmn(Register src1, const Operand& src2, Condition cond = al);
@ -1099,6 +1100,7 @@ class Assembler : public Malloced {
static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
@ -1109,6 +1111,8 @@ class Assembler : public Malloced {
static bool IsAddRegisterImmediate(Instr instr);
static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
static Register GetRd(Instr instr);
static Register GetRn(Instr instr);
static Register GetRm(Instr instr);
static bool IsPush(Instr instr);
static bool IsPop(Instr instr);
static bool IsStrRegFpOffset(Instr instr);
@ -1116,6 +1120,11 @@ class Assembler : public Malloced {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries

58
deps/v8/src/arm/builtins-arm.cc

@ -1156,12 +1156,48 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
__ stop("builtins-arm.cc: NotifyOSR");
// For now, we are relying on the fact that Runtime::NotifyOSR
// doesn't do any garbage collection which allows us to save/restore
// the registers without worrying about which of them contain
// pointers. This seems a bit fragile.
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ EnterInternalFrame();
__ CallRuntime(Runtime::kNotifyOSR, 0);
__ LeaveInternalFrame();
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
__ Ret();
}
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ stop("builtins-arm.cc: OnStackReplacement");
// Probe the CPU to set the supported features, because this builtin
// may be called before the initialization performs CPU setup.
CpuFeatures::Probe(false);
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ EnterInternalFrame();
__ push(r0);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
__ LeaveInternalFrame();
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
__ cmp(r0, Operand(Smi::FromInt(-1)));
__ b(ne, &skip);
__ Ret();
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(r0);
__ push(r0);
// Generate the code for doing the frame-to-frame translation using
// the deoptimizer infrastructure.
Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
generator.Generate();
}
@ -1195,6 +1231,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
@ -1358,10 +1402,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Change context eagerly to get the right global object if necessary.
__ ldr(r0, MemOperand(fp, kFunctionOffset));
__ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
// Load the shared function info while the function is still in r0.
__ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ ldr(r0, MemOperand(fp, kRecvOffset));
// Do not transform the receiver for strict mode functions.
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);

381
deps/v8/src/arm/code-stubs-arm.cc

@ -1298,7 +1298,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result;
Label not_heap_number;
Register scratch = r7;
Register scratch = r9.is(tos_) ? r7 : r9;
__ LoadRoot(ip, Heap::kNullValueRootIndex);
__ cmp(tos_, ip);
@ -2588,6 +2588,39 @@ void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
__ eor(right, left, Operand(right));
__ Ret();
break;
case Token::SAR:
// Remove tags from right operand.
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
// Smi tag result.
__ bic(right, right, Operand(kSmiTagMask));
__ Ret();
break;
case Token::SHR:
// Remove tags from operands. We can't do this on a 31 bit number
// because then the 0s get shifted into bit 30 instead of bit 31.
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
// Unsigned shift is not allowed to produce a negative number, so
// check the sign bit and the sign bit after Smi tagging.
__ tst(scratch1, Operand(0xc0000000));
__ b(ne, &not_smi_result);
// Smi tag result.
__ SmiTag(right, scratch1);
__ Ret();
break;
case Token::SHL:
// Remove tags from operands.
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
// Check that the signed result fits in a Smi.
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
__ b(mi, &not_smi_result);
__ SmiTag(right, scratch1);
__ Ret();
break;
default:
UNREACHABLE();
}
@ -2703,7 +2736,10 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
}
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND: {
case Token::BIT_AND:
case Token::SAR:
case Token::SHR:
case Token::SHL: {
if (smi_operands) {
__ SmiUntag(r3, left);
__ SmiUntag(r2, right);
@ -2726,6 +2762,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
d0,
not_numbers);
}
Label result_not_a_smi;
switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
@ -2736,11 +2774,35 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
case Token::BIT_AND:
__ and_(r2, r3, Operand(r2));
break;
case Token::SAR:
// Use only the 5 least significant bits of the shift count.
__ and_(r2, r2, Operand(0x1f));
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, ASR, r2));
break;
case Token::SHR:
// Use only the 5 least significant bits of the shift count.
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, LSR, r2), SetCC);
// SHR is special because it is required to produce a positive answer.
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
if (CpuFeatures::IsSupported(VFP3)) {
__ b(mi, &result_not_a_smi);
} else {
__ b(mi, not_numbers);
}
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
__ GetLeastBitsFromInt32(r2, r2, 5);
__ mov(r2, Operand(r3, LSL, r2));
break;
default:
UNREACHABLE();
}
Label result_not_a_smi;
// Check that the *signed* result fits in a smi.
__ add(r3, r2, Operand(0x40000000), SetCC);
__ b(mi, &result_not_a_smi);
@ -2760,10 +2822,15 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ mov(r0, Operand(r5));
if (CpuFeatures::IsSupported(VFP3)) {
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r2);
__ vcvt_f64_s32(d0, s0);
if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
}
__ sub(r3, r0, Operand(kHeapObjectTag));
__ vstr(d0, r3, HeapNumber::kValueOffset);
__ Ret();
@ -2790,15 +2857,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@ -2825,15 +2883,6 @@ void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label not_smis, call_runtime;
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
result_type_ == TRBinaryOpIC::SMI) {
// Only allow smi results.
@ -2864,15 +2913,6 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
GenerateTypeTransition(masm);
@ -2880,15 +2920,6 @@ void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label not_numbers, call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
@ -2903,15 +2934,6 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD ||
op_ == Token::SUB ||
op_ == Token::MUL ||
op_ == Token::DIV ||
op_ == Token::MOD ||
op_ == Token::BIT_OR ||
op_ == Token::BIT_AND ||
op_ == Token::BIT_XOR);
Label call_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
@ -2984,6 +3006,15 @@ void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
case Token::BIT_XOR:
__ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
break;
case Token::SAR:
__ InvokeBuiltin(Builtins::SAR, JUMP_JS);
break;
case Token::SHR:
__ InvokeBuiltin(Builtins::SHR, JUMP_JS);
break;
case Token::SHL:
__ InvokeBuiltin(Builtins::SHL, JUMP_JS);
break;
default:
UNREACHABLE();
}
@ -3268,105 +3299,13 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// r0 holds the exception.
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
__ cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
__ Throw(r0);
}
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
__ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
__ ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
__ ldr(r2, MemOperand(sp, kStateOffset));
__ cmp(r2, Operand(StackHandler::ENTRY));
__ b(eq, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
__ ldr(sp, MemOperand(sp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(r2);
__ str(r2, MemOperand(r3));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ mov(r0, Operand(false, RelocInfo::NONE));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
__ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
__ str(r0, MemOperand(r2));
}
// Stack layout at this point. See also StackHandlerConstants.
// sp -> state (ENTRY)
// fp
// lr
// Discard handler state (r2 is not used) and restore frame pointer.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
__ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
__ cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
__ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ pop(pc);
__ ThrowUncatchable(type, r0);
}
@ -3453,7 +3392,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
__ LeaveExitFrame(save_doubles_);
// Callee-saved register r4 still holds argc.
__ LeaveExitFrame(save_doubles_, r4);
__ mov(pc, lr);
// check if we should retry or throw exception
Label retry;
@ -4232,24 +4173,27 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
static const int kRegExpExecuteArguments = 7;
__ push(lr);
__ PrepareCallCFunction(kRegExpExecuteArguments, r0);
static const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript.
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers.
// Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
__ mov(r0, Operand(1));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
__ str(r0, MemOperand(sp, 3 * kPointerSize));
// Argument 6 (sp[4]): Start (high end) of backtracking stack memory area.
// Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
__ mov(r0, Operand(address_of_regexp_stack_memory_address));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r2, Operand(address_of_regexp_stack_memory_size));
__ ldr(r2, MemOperand(r2, 0));
__ add(r0, r0, Operand(r2));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[0]): static offsets vector buffer.
// Argument 5 (sp[4]): static offsets vector buffer.
__ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
__ str(r0, MemOperand(sp, 0 * kPointerSize));
__ str(r0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data and
// calculate the shift of the index (0 for ASCII and 1 for two byte).
@ -4271,8 +4215,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(r7, kRegExpExecuteArguments);
__ pop(lr);
DirectCEntryStub stub;
stub.GenerateCall(masm, r7);
__ LeaveExitFrame(false, no_reg);
// r0: result
// subject: subject string (callee saved)
@ -4281,6 +4227,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
__ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
__ b(eq, &success);
Label failure;
@ -4293,12 +4240,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ mov(r0, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r0, MemOperand(r0, 0));
__ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address)));
__ mov(r1, Operand(ExternalReference::the_hole_value_location()));
__ ldr(r1, MemOperand(r1, 0));
__ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
__ ldr(r0, MemOperand(r2, 0));
__ cmp(r0, r1);
__ b(eq, &runtime);
__ str(r1, MemOperand(r2, 0)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
__ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
__ cmp(r0, ip);
Label termination_exception;
__ b(eq, &termination_exception);
__ Throw(r0); // Expects thrown value in r0.
__ bind(&termination_exception);
__ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0.
__ bind(&failure);
// For failure and exception return null.
__ mov(r0, Operand(Factory::null_value()));
@ -5809,10 +5770,9 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
// For equality we do not care about the sign of the result.
__ sub(r0, r0, r1, SetCC);
} else {
__ sub(r1, r1, r0, SetCC);
// Correct sign of result in case of overflow.
__ rsb(r1, r1, Operand(0), SetCC, vs);
__ mov(r0, r1);
// Untag before subtracting to avoid handling overflow.
__ SmiUntag(r1);
__ sub(r0, r1, SmiUntagOperand(r0));
}
__ Ret();
@ -5923,14 +5883,24 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
ApiFunction *function) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Push return address (accessible to GC through exit frame pc).
__ mov(r2,
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
// Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(r2); // Call the api function.
}
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
RelocInfo::CODE_TARGET));
// Push return address (accessible to GC through exit frame pc).
__ str(pc, MemOperand(sp, 0));
__ Jump(target); // Call the C++ function.
}
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -5998,6 +5968,91 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
}
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register elements_map,
Register scratch1,
Register scratch2,
bool load_elements_from_receiver,
bool load_elements_map_from_elements,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged unless the
// store succeeds.
// key - holds the key (must be a smi) and is unchanged.
// value - holds the value (must be a smi) and is unchanged.
// elements - holds the element object of the receiver on entry if
// load_elements_from_receiver is false, otherwise used
// internally to store the pixel arrays elements and
// external array pointer.
// elements_map - holds the map of the element object if
// load_elements_map_from_elements is false, otherwise
// loaded with the element map.
//
Register external_pointer = elements;
Register untagged_key = scratch1;
Register untagged_value = scratch2;
if (load_elements_from_receiver) {
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
}
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
if (load_elements_map_from_elements) {
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
}
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(elements_map, ip);
__ b(ne, not_pixel_array);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(elements_map, ip);
__ Assert(eq, "Elements isn't a pixel array");
}
}
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
__ SmiUntag(untagged_key, key);
// Perform bounds check.
__ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(untagged_key, scratch2);
__ b(hs, out_of_range); // unsigned check handles negative keys.
__ JumpIfNotSmi(value, value_not_smi);
__ SmiUntag(untagged_value, value);
// Clamp the value to [0..255].
__ Usat(untagged_value, 8, Operand(untagged_value));
// Get the pointer to the external array. This clobbers elements.
__ ldr(external_pointer,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ strb(untagged_value, MemOperand(external_pointer, untagged_key));
__ Ret();
}
#undef __
} } // namespace v8::internal

46
deps/v8/src/arm/code-stubs-arm.h

@ -581,6 +581,7 @@ class DirectCEntryStub: public CodeStub {
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, ApiFunction *function);
void GenerateCall(MacroAssembler* masm, Register target);
private:
Major MajorKey() { return DirectCEntry; }
@ -589,14 +590,14 @@ class DirectCEntryStub: public CodeStub {
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
// Generate code to load an element from a pixel array. The receiver is assumed
// to not be a smi and to have elements, the caller must guarantee this
// precondition. If key is not a smi, then the generated code branches to
// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
// check has already been performed on key so that the smi check is not
// generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -609,6 +610,35 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Label* key_not_smi,
Label* out_of_range);
// Generate code to store an element into a pixel array, clamping values between
// [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated. If value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
// elements, the generated code will branch to not_pixel_array, unless
// not_pixel_array is NULL, in which case the caller must ensure that the
// receiver has pixel array elements. If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range. If
// load_elements_from_receiver is true, then the elements of receiver is loaded
// into elements, otherwise elements is assumed to already be the receiver's
// elements. If load_elements_map_from_elements is true, elements_map is loaded
// from elements, otherwise it is assumed to already contain the element map.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register elements_map,
Register scratch1,
Register scratch2,
bool load_elements_from_receiver,
bool load_elements_map_from_elements,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range);
} } // namespace v8::internal

24
deps/v8/src/arm/codegen-arm.cc

@ -2192,15 +2192,10 @@ void CodeGenerator::GenerateReturnSequence() {
DeleteFrame();
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in
// the add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceInstructions <=
masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -5849,15 +5844,20 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
Load(property->obj());
Load(property->key());
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
@ -6931,7 +6931,7 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
frame()->CallStoreIC(name, is_contextual);
frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
} else {
// Inline the in-object property case.
JumpTarget slow, done;

2
deps/v8/src/arm/constants-arm.h

@ -582,6 +582,7 @@ class Instruction {
inline int TypeValue() const { return Bits(27, 25); }
inline int RnValue() const { return Bits(19, 16); }
DECLARE_STATIC_ACCESSOR(RnValue);
inline int RdValue() const { return Bits(15, 12); }
DECLARE_STATIC_ACCESSOR(RdValue);
@ -625,6 +626,7 @@ class Instruction {
inline int SValue() const { return Bit(20); }
// with register
inline int RmValue() const { return Bits(3, 0); }
DECLARE_STATIC_ACCESSOR(RmValue);
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
inline ShiftOp ShiftField() const {
return static_cast<ShiftOp>(BitField(6, 5));

204
deps/v8/src/arm/deoptimizer-arm.cc

@ -124,19 +124,204 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
const int kInstrSize = Assembler::kInstrSize;
// The call of the stack guard check has the following form:
// e1 5d 00 0c cmp sp, <limit>
// 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
(al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
// We patch the code to the following form:
// e1 5d 00 0c cmp sp, <limit>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// and overwrite the constant containing the
// address of the stack check stub.
// Replace conditional jump with NOP.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(+4, cs);
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
}
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
for (int i = 0; i < length; i++) {
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
TranslationIterator it(translations, data->TranslationIndex(i)->value());
int value = it.Next();
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
// Read the number of frames.
value = it.Next();
if (value == 1) return i;
}
}
UNREACHABLE();
return -1;
}
void Deoptimizer::DoComputeOsrOutputFrame() {
UNIMPLEMENTED();
DeoptimizationInputData* data = DeoptimizationInputData::cast(
optimized_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, ast_id);
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
ByteArray* translations = data->TranslationByteArray();
TranslationIterator iterator(translations, translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
ASSERT(Translation::FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
USE(function);
ASSERT(function == function_);
unsigned height = iterator.Next();
unsigned height_in_bytes = height * kPointerSize;
USE(height_in_bytes);
unsigned fixed_size = ComputeFixedSize(function_);
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
if (FLAG_trace_osr) {
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function_));
function_->PrintName();
PrintF(" => node=%u, frame=%d->%d]\n",
ast_id,
input_frame_size,
output_frame_size);
}
// There's only one output frame in the OSR case.
output_count_ = 1;
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
unsigned output_offset = output_frame_size - kPointerSize;
int parameter_count = function_->shared()->formal_parameter_count() + 1;
for (int i = 0; i < parameter_count; ++i) {
output_[0]->SetFrameSlot(output_offset, 0);
output_offset -= kPointerSize;
}
// Translate the incoming parameters. This may overwrite some of the
// incoming argument slots we've just cleared.
int input_offset = input_frame_size - kPointerSize;
bool ok = true;
int limit = input_offset - (parameter_count * kPointerSize);
while (ok && input_offset > limit) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = 0; ok && i < 4; i++) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
output_offset,
input_value,
input_offset);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
}
// Translate the rest of the frame.
while (ok && input_offset >= 0) {
ok = DoOsrTranslateCommand(&iterator, &input_offset);
}
// If translation of any command failed, continue using the input frame.
if (!ok) {
delete output_[0];
output_[0] = input_;
output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
} else {
// Setup the frame pointer and the context pointer.
output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
if (FLAG_trace_osr) {
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
ok ? "finished" : "aborted",
reinterpret_cast<intptr_t>(function));
function->PrintName();
PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
}
}
@ -318,7 +503,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
// TOS: bailout-id; TOS+1: return address if not EAGER.
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -353,6 +537,10 @@ void Deoptimizer::EntryGenerator::Generate() {
__ mov(r3, Operand(0));
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
__ mov(r3, lr);
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ mov(r3, lr);
// Correct two words for bailout id and return address.
@ -375,7 +563,6 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame descriptor pointer to r1 (deoptimizer->input_);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
@ -396,7 +583,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Remove the bailout id, eventually return address, and the saved registers
// from the stack.
if (type() == EAGER) {
if (type() == EAGER || type() == OSR) {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else {
__ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
@ -450,11 +637,6 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
UNIMPLEMENTED();
}
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));

557
deps/v8/src/arm/full-codegen-arm.cc

@ -45,6 +45,67 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
// immediate value is used) is the delta from the pc to the first instruction of
// the patchable code.
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
ASSERT(patch_site_.is_bound() == info_emitted_);
}
// When initially emitting this ensure that a jump is always generated to skip
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
// Don't use b(al, ...) as that might emit the constant pool right after the
// branch. After patching when the branch is no longer unconditional
// execution can continue into the constant pool.
__ b(eq, target); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
ASSERT(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
__ cmp(reg, Operand(reg));
__ b(ne, target); // Never taken before patched.
}
void EmitPatchInfo() {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
reg.set_code(delta_to_patch_site / kOff12Mask);
__ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
info_emitted_ = true;
#endif
}
bool is_bound() const { return patch_site_.is_bound(); }
private:
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
@ -268,15 +329,10 @@ void FullCodeGenerator::EmitReturnSequence() {
}
#ifdef DEBUG
// Check that the size of the code used for returning matches what is
// expected by the debugger. If the sp_delts above cannot be encoded in the
// add instruction the add will generate two instructions.
int return_sequence_length =
masm_->InstructionsGeneratedSince(&check_exit_codesize);
CHECK(return_sequence_length ==
Assembler::kJSReturnSequenceInstructions ||
return_sequence_length ==
Assembler::kJSReturnSequenceInstructions + 1);
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceInstructions <=
masm_->InstructionsGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -285,7 +341,17 @@ void FullCodeGenerator::EmitReturnSequence() {
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
Token::Value op, Expression* left, Expression* right) {
ASSERT(ShouldInlineSmiCase(op));
return kNoConstants;
if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
// We never generate inlined constant smi operations for these.
return kNoConstants;
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
// Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
}
}
@ -681,18 +747,24 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForStackValue(prop->obj());
// property. Use (keyed) IC to set the initial value. We
// cannot visit the rewrite because it's shared and we risk
// recording duplicate AST IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
if (function != NULL) {
VisitForStackValue(prop->key());
__ push(r0);
VisitForAccumulatorValue(function);
__ pop(r1); // Key.
__ pop(r2);
} else {
VisitForAccumulatorValue(prop->key());
__ mov(r1, result_register()); // Key.
__ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
__ mov(r2, r0);
__ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
}
__ pop(r2); // Receiver.
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -752,24 +824,24 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform the comparison as if via '==='.
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r1, r0);
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
__ bind(&slow_case);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(eq, true, flags, r1, r0);
__ CallStub(&stub);
__ cmp(r0, Operand(0, RelocInfo::NONE));
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
EmitCallIC(ic, &patch_site);
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target()->entry_label());
@ -1536,34 +1608,316 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
}
void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
// Optimistically add smi value with unknown object. If result overflows or is
// not a smi then we had either a smi overflow or added a smi with a tagged
// pointer.
__ mov(r1, Operand(value));
__ add(r2, r0, r1, SetCC);
__ b(vs, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfNotSmi(r2, &call_stub);
__ mov(r0, r2);
__ b(&done);
// Call the shared stub.
__ bind(&call_stub);
if (!left_is_constant_smi) {
__ Swap(r0, r1, r2);
}
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
Label call_stub, done;
// Optimistically subtract smi value and unknown object. If result overflows
// or is not a smi then we had either a smi overflow or subtraction between a
// smi and a tagged pointer.
__ mov(r1, Operand(value));
if (left_is_constant_smi) {
__ sub(r2, r1, r0, SetCC);
} else {
__ sub(r2, r0, r1, SetCC);
}
__ b(vs, &call_stub);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfNotSmi(r2, &call_stub);
__ mov(r0, r2);
__ b(&done);
// Call the shared stub.
__ bind(&call_stub);
if (!left_is_constant_smi) {
__ Swap(r0, r1, r2);
}
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label call_stub, smi_case, done;
int shift_value = value->value() & 0x1f;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(r0, &smi_case);
// Call stub.
__ bind(&call_stub);
__ mov(r1, r0);
__ mov(r0, Operand(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ b(&done);
// Smi case.
__ bind(&smi_case);
switch (op) {
case Token::SHL:
if (shift_value != 0) {
__ mov(r1, r0);
if (shift_value > 1) {
__ mov(r1, Operand(r1, LSL, shift_value - 1));
}
// Convert int result to smi, checking that it is in int range.
__ SmiTag(r1, SetCC);
__ b(vs, &call_stub);
__ mov(r0, r1); // Put result back into r0.
}
break;
case Token::SAR:
if (shift_value != 0) {
__ mov(r0, Operand(r0, ASR, shift_value));
__ bic(r0, r0, Operand(kSmiTagMask));
}
break;
case Token::SHR:
// SHR must return a positive value. When shifting by 0 or 1 we need to
// check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(r2, Operand(shift_value));
__ SmiUntag(r1, r0);
if (shift_value != 0) {
__ mov(r1, Operand(r1, LSR, shift_value));
}
__ tst(r1, Operand(0xc0000000));
__ b(ne, &call_stub);
__ SmiTag(r0, r1); // result in r0.
} else {
__ SmiUntag(r0);
__ mov(r0, Operand(r0, LSR, shift_value));
__ SmiTag(r0);
}
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Smi* value) {
Label smi_case, done;
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(r0, &smi_case);
// The order of the arguments does not matter for bit-ops with a
// constant operand.
__ mov(r1, Operand(value));
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
// Smi case.
__ bind(&smi_case);
__ mov(r1, Operand(value));
switch (op) {
case Token::BIT_OR:
__ orr(r0, r0, Operand(r1));
break;
case Token::BIT_XOR:
__ eor(r0, r0, Operand(r1));
break;
case Token::BIT_AND:
__ and_(r0, r0, Operand(r1));
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
bool left_is_constant_smi,
Smi* value) {
switch (op) {
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
EmitConstantSmiBitOp(expr, op, mode, value);
break;
case Token::SHL:
case Token::SAR:
case Token::SHR:
ASSERT(!left_is_constant_smi);
EmitConstantSmiShiftOp(expr, op, mode, value);
break;
case Token::ADD:
EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
break;
case Token::SUB:
EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
break;
default:
UNREACHABLE();
}
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
Token::Value op,
OverwriteMode mode,
Expression* left,
Expression* right,
Expression* left_expr,
Expression* right_expr,
ConstantOperand constant) {
ASSERT(constant == kNoConstants); // Only handled case.
EmitBinaryOp(op, mode);
if (constant == kRightConstant) {
Smi* value = Smi::cast(*right_expr->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, false, value);
return;
} else if (constant == kLeftConstant) {
Smi* value = Smi::cast(*left_expr->AsLiteral()->handle());
EmitConstantSmiBinaryOp(expr, op, mode, true, value);
return;
}
Label done, smi_case, stub_call;
Register scratch1 = r2;
Register scratch2 = r3;
// Get the arguments.
Register left = r1;
Register right = r0;
__ pop(left);
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
STATIC_ASSERT(kSmiTag == 0);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ jmp(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
// recording binary operation stub, see
// TypeRecordingBinaryOpStub::GenerateSmiSmiOperation for comments.
switch (op) {
case Token::SAR:
__ b(&stub_call);
__ GetLeastBitsFromSmi(scratch1, right, 5);
__ mov(right, Operand(left, ASR, scratch1));
__ bic(right, right, Operand(kSmiTagMask));
break;
case Token::SHL: {
__ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSL, scratch2));
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
__ b(mi, &stub_call);
__ SmiTag(right, scratch1);
break;
}
case Token::SHR: {
__ b(&stub_call);
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
__ mov(scratch1, Operand(scratch1, LSR, scratch2));
__ tst(scratch1, Operand(0xc0000000));
__ b(ne, &stub_call);
__ SmiTag(right, scratch1);
break;
}
case Token::ADD:
__ add(scratch1, left, Operand(right), SetCC);
__ b(vs, &stub_call);
__ mov(right, scratch1);
break;
case Token::SUB:
__ sub(scratch1, left, Operand(right), SetCC);
__ b(vs, &stub_call);
__ mov(right, scratch1);
break;
case Token::MUL: {
__ SmiUntag(ip, right);
__ smull(scratch1, scratch2, left, ip);
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
__ tst(scratch1, Operand(scratch1));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
__ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
__ b(mi, &stub_call);
break;
}
case Token::BIT_OR:
__ orr(right, left, Operand(right));
break;
case Token::BIT_AND:
__ and_(right, left, Operand(right));
break;
case Token::BIT_XOR:
__ eor(right, left, Operand(right));
break;
default:
UNREACHABLE();
}
__ bind(&done);
context()->Plug(r0);
}
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
OverwriteMode mode) {
__ pop(r1);
if (op == Token::ADD ||
op == Token::SUB ||
op == Token::MUL ||
op == Token::DIV ||
op == Token::MOD ||
op == Token::BIT_OR ||
op == Token::BIT_AND ||
op == Token::BIT_XOR) {
TypeRecordingBinaryOpStub stub(op, mode);
__ CallStub(&stub);
} else {
GenericBinaryOpStub stub(op, mode, r1, r0);
__ CallStub(&stub);
}
TypeRecordingBinaryOpStub stub(op, mode);
EmitCallIC(stub.GetCode(), NULL);
context()->Plug(r0);
}
@ -1606,10 +1960,20 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ pop(r2);
if (prop->is_synthetic()) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(r2, r0);
__ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(r1, r0);
__ pop(r2);
}
__ pop(r0); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -1635,8 +1999,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// r2, and the global object in r1.
__ mov(r2, Operand(var->name()));
__ ldr(r1, GlobalObjectOperand());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Handle<Code> ic(Builtins::builtin(is_strict()
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
@ -2991,39 +3357,50 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (prop == NULL && var == NULL) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
context()->Plug(false);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
__ push(r1);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name()));
__ Push(r1, r0);
context()->Plug(r0);
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(r2, r1, r0);
__ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
} else if (var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Non-global variable. Call the runtime to delete from the
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(r0);
}
context()->Plug(r0);
} else {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
}
break;
}
@ -3214,13 +3591,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Inline smi case if we are in a loop.
Label stub_call, done;
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
__ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
__ b(vs, &stub_call);
// We could eliminate this smi check if we split the code at
// the first smi check before calling ToNumber.
__ JumpIfSmi(r0, &done);
patch_site.EmitJumpIfSmi(r0, &done);
__ bind(&stub_call);
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@ -3230,8 +3610,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call.
SetSourcePosition(expr->position());
GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
__ CallStub(&stub);
TypeRecordingBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
// Store the value returned in r0.
@ -3510,21 +3890,22 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ orr(r2, r0, Operand(r1));
__ JumpIfNotSmi(r2, &slow_case);
patch_site.EmitJumpIfNotSmi(r2, &slow_case);
__ cmp(r1, r0);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
CompareFlags flags = inline_smi_code
? NO_SMI_COMPARE_IN_STUB
: NO_COMPARE_FLAGS;
CompareStub stub(cond, strict, flags, r1, r0);
__ CallStub(&stub);
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
EmitCallIC(ic, &patch_site);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0, RelocInfo::NONE));
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
}
}
@ -3591,6 +3972,16 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
}
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
__ Call(ic, RelocInfo::CODE_TARGET);
if (patch_site != NULL && patch_site->is_bound()) {
patch_site->EmitPatchInfo();
} else {
__ nop(); // Signals no inlined code.
}
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));

117
deps/v8/src/arm/ic-arm.cc

@ -115,6 +115,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Register name,
Register scratch1,
Register scratch2) {
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
@ -843,7 +846,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- lr : return address
// -----------------------------------
// Check if the name is a string.
Label miss;
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1465,24 +1475,20 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// r4: elements map.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &slow);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ JumpIfNotSmi(value, &slow);
__ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the key.
__ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
__ cmp(r4, Operand(ip));
__ b(hs, &slow);
__ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
__ Usat(r5, 8, Operand(r5)); // Clamp the value to [0..255].
// Get the pointer to the external array. This clobbers elements.
__ ldr(elements,
FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
__ strb(r5, MemOperand(elements, r4)); // Elements is now external array.
__ Ret();
GenerateFastPixelArrayStore(masm,
r2,
r1,
r0,
elements,
r4,
r5,
r6,
false,
false,
NULL,
&slow,
&slow,
&slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@ -1533,7 +1539,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@ -1544,7 +1551,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
MONOMORPHIC,
extra_ic_state);
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
// Cache miss: Jump to runtime.
@ -1700,11 +1708,78 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Token::Name(op_));
}
#endif
// Activate inlined smi code.
if (previous_state == UNINITIALIZED) {
PatchInlinedSmiCode(address());
}
}
void PatchInlinedSmiCode(Address address) {
// Currently there is no smi inlining in the ARM full code generator.
Address cmp_instruction_address =
address + Assembler::kCallTargetAddressOffset;
// If the instruction following the call is not a cmp rx, #yyy, nothing
// was inlined.
Instr instr = Assembler::instr_at(cmp_instruction_address);
if (!Assembler::IsCmpImmediate(instr)) {
return;
}
// The delta to the start of the map check instruction and the
// condition code uses at the patched jump.
int delta = Assembler::GetCmpImmediateRawImmediate(instr);
delta +=
Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
// If the delta is 0 the instruction is cmp r0, #0 which also signals that
// nothing was inlined.
if (delta == 0) {
return;
}
#ifdef DEBUG
if (FLAG_trace_ic) {
PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
address, cmp_instruction_address, delta);
}
#endif
Address patch_address =
cmp_instruction_address - delta * Instruction::kInstrSize;
Instr instr_at_patch = Assembler::instr_at(patch_address);
Instr branch_instr =
Assembler::instr_at(patch_address + Instruction::kInstrSize);
ASSERT(Assembler::IsCmpRegister(instr_at_patch));
ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
Assembler::GetRm(instr_at_patch).code());
ASSERT(Assembler::IsBranch(branch_instr));
if (Assembler::GetCondition(branch_instr) == eq) {
// This is patching a "jump if not smi" site to be active.
// Changing
// cmp rx, rx
// b eq, <target>
// to
// tst rx, #kSmiTagMask
// b ne, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(ne);
} else {
ASSERT(Assembler::GetCondition(branch_instr) == ne);
// This is patching a "jump if smi" site to be active.
// Changing
// cmp rx, rx
// b ne, <target>
// to
// tst rx, #kSmiTagMask
// b eq, <target>
CodePatcher patcher(patch_address, 2);
Register reg = Assembler::GetRn(instr_at_patch);
patcher.masm()->tst(reg, Operand(kSmiTagMask));
patcher.EmitCondition(eq);
}
}

88
deps/v8/src/arm/lithium-arm.cc

@ -62,15 +62,13 @@ void LInstruction::VerifyCall() {
// Call instructions can use only fixed registers as
// temporaries and outputs because all registers
// are blocked by the calling convention.
// Inputs can use either fixed register or have a short lifetime (be
// used at start of the instruction).
// Inputs must use a fixed register.
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
for (UseIterator it(this); it.HasNext(); it.Advance()) {
LOperand* operand = it.Next();
ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
LUnallocated::cast(operand)->IsUsedAtStart() ||
!LUnallocated::cast(operand)->HasRegisterPolicy());
}
for (TempIterator it(this); it.HasNext(); it.Advance()) {
@ -186,6 +184,9 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
@ -802,6 +803,16 @@ LInstruction* LChunkBuilder::DoBit(Token::Value op,
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
if (instr->representation().IsTagged()) {
ASSERT(instr->left()->representation().IsTagged());
ASSERT(instr->right()->representation().IsTagged());
LOperand* left = UseFixed(instr->left(), r1);
LOperand* right = UseFixed(instr->right(), r0);
LArithmeticT* result = new LArithmeticT(op, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32());
@ -1021,7 +1032,7 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
UseOrConstantAtStart(right));
UseRegisterAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
@ -1077,6 +1088,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else if (v->IsIsConstructCall()) {
return new LIsConstructCallAndBranch(TempRegister());
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
@ -1131,8 +1144,8 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
LOperand* function = UseFixed(instr->function(), r1);
LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
LOperand* length = UseFixed(instr->length(), r2);
LOperand* elements = UseFixed(instr->elements(), r3);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
@ -1304,10 +1317,10 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
// the generated code, which requires registers r0
// and r1 to be used. We should remove that
// when we provide a native implementation.
LOperand* value = UseFixed(instr->left(), r0);
LOperand* dividend = UseFixed(instr->left(), r0);
LOperand* divisor = UseFixed(instr->right(), r1);
return AssignEnvironment(AssignPointerMap(
DefineFixed(new LDivI(value, divisor), r0)));
DefineFixed(new LDivI(dividend, divisor), r0)));
} else {
return DoArithmeticT(Token::DIV, instr);
}
@ -1417,7 +1430,7 @@ LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LOperand* right = UseRegisterAtStart(instr->right());
return DefineAsRegister(new LCmpID(left, right));
} else if (r.IsDouble()) {
ASSERT(instr->left()->representation().IsDouble());
@ -1478,6 +1491,15 @@ LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
}
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegister(instr->value());
return DefineAsRegister(new LGetCachedArrayIndex(value));
}
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
HHasCachedArrayIndex* instr) {
ASSERT(instr->value()->representation().IsTagged());
@ -1500,6 +1522,12 @@ LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
}
LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LPixelArrayLength(array));
}
LInstruction* LChunkBuilder::DoFixedArrayLength(HFixedArrayLength* instr) {
LOperand* array = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LFixedArrayLength(array));
@ -1642,13 +1670,11 @@ LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
Representation r = instr->representation();
if (r.IsInteger32()) {
int32_t value = instr->Integer32Value();
return DefineAsRegister(new LConstantI(value));
return DefineAsRegister(new LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
return DefineAsRegister(new LConstantD(value));
return DefineAsRegister(new LConstantD);
} else if (r.IsTagged()) {
return DefineAsRegister(new LConstantT(instr->handle()));
return DefineAsRegister(new LConstantT);
} else {
UNREACHABLE();
return NULL;
@ -1716,7 +1742,14 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineSameAsFirst(new LLoadElements(input));
return DefineAsRegister(new LLoadElements(input));
}
LInstruction* LChunkBuilder::DoLoadPixelArrayExternalPointer(
HLoadPixelArrayExternalPointer* instr) {
LOperand* input = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadPixelArrayExternalPointer(input));
}
@ -1731,6 +1764,19 @@ LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
}
LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
HLoadPixelArrayElement* instr) {
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer =
UseRegisterAtStart(instr->external_pointer());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadPixelArrayElement* result =
new LLoadPixelArrayElement(external_pointer, key);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* object = UseFixed(instr->object(), r1);
LOperand* key = UseFixed(instr->key(), r0);
@ -1832,8 +1878,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LOperand* object = UseFixed(instr->object(), r0);
LOperand* key = UseFixed(instr->key(), r1);
LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1881,7 +1927,7 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value()));
LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@ -1890,6 +1936,12 @@ LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}
LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
return DefineAsRegister(new LIsConstructCall());
}
LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
HEnvironment* env = current_block_->last_environment();
ASSERT(env != NULL);

106
deps/v8/src/arm/lithium-arm.h

@ -41,7 +41,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Constant) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
@ -95,6 +94,7 @@ class LCodeGen;
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@ -123,6 +123,8 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -132,6 +134,7 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -153,6 +156,8 @@ class LCodeGen;
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
@ -735,6 +740,17 @@ class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
};
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@ -903,44 +919,30 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
class LConstantI: public LConstant {
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantI(int32_t value) : value_(value) { }
int32_t value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
int32_t value_;
int32_t value() const { return hydrogen()->Integer32Value(); }
};
class LConstantD: public LConstant {
class LConstantD: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantD(double value) : value_(value) { }
double value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
double value_;
double value() const { return hydrogen()->DoubleValue(); }
};
class LConstantT: public LConstant {
class LConstantT: public LTemplateInstruction<1, 0, 0> {
public:
explicit LConstantT(Handle<Object> value) : value_(value) { }
Handle<Object> value() const { return value_; }
DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
DECLARE_HYDROGEN_ACCESSOR(Constant)
private:
Handle<Object> value_;
Handle<Object> value() const { return hydrogen()->handle(); }
};
@ -990,6 +992,17 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LPixelArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
};
class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
@ -1139,6 +1152,17 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadPixelArrayExternalPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
};
class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
@ -1154,6 +1178,22 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
};
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
@ -1716,6 +1756,24 @@ class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
};
class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
};
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
"is-construct-call-and-branch")
};
class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {

208
deps/v8/src/arm/lithium-codegen-arm.cc

@ -647,7 +647,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
if (cc == kNoCondition) {
if (cc == al) {
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
@ -1188,8 +1188,8 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ tst(left, Operand(left));
__ b(ne, &done);
if (instr->InputAt(1)->IsConstantOperand()) {
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(kNoCondition, instr->environment());
if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
DeoptimizeIf(al, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
@ -1322,6 +1322,13 @@ void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
}
void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
Register array = ToRegister(instr->InputAt(0));
@ -1605,7 +1612,7 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
__ cmp(ToRegister(left), ToOperand(right));
__ cmp(ToRegister(left), ToRegister(right));
}
@ -1619,8 +1626,7 @@ void LCodeGen::DoCmpID(LCmpID* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to unordered to return false.
__ b(vs, &unordered);
@ -1647,8 +1653,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
if (instr->is_double()) {
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
__ vcmp(ToDoubleRegister(left), ToDoubleRegister(right));
__ vmrs(pc);
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
// If a NaN is involved, i.e. the result is unordered (V set),
// jump to false block label.
__ b(vs, chunk_->GetAssemblyLabel(false_block));
@ -1891,14 +1896,42 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
}
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(input, String::kHashFieldOffset));
__ IndexFromHash(scratch, result);
}
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
Abort("DoHasCachedArrayIndex unimplemented.");
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ ldr(scratch,
FieldMemOperand(input, String::kHashFieldOffset));
__ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
EmitBranch(true_block, false_block, eq);
}
@ -2180,12 +2213,12 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
__ LoadRoot(ToRegister(instr->result()),
Heap::kTrueValueRootIndex,
condition);
@ -2196,7 +2229,21 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
Abort("DoCmpTAndBranch unimplemented.");
Token::Value op = instr->op();
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Handle<Code> ic = CompareIC::GetUninitialized(op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// The compare stub expects compare condition and the input operands
// reversed for GT and LTE.
Condition condition = ComputeCompareCondition(op);
if (op == Token::GT || op == Token::LTE) {
condition = ReverseCondition(condition);
}
__ cmp(r0, Operand(0));
EmitBranch(true_block, false_block, condition);
}
@ -2342,17 +2389,20 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
void LCodeGen::DoLoadElements(LLoadElements* instr) {
ASSERT(instr->result()->Equals(instr->InputAt(0)));
Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
__ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset));
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
Label done;
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ Check(eq, "Check for fast elements failed.");
@ -2361,6 +2411,14 @@ void LCodeGen::DoLoadElements(LLoadElements* instr) {
}
void LCodeGen::DoLoadPixelArrayExternalPointer(
LLoadPixelArrayExternalPointer* instr) {
Register to_reg = ToRegister(instr->result());
Register from_reg = ToRegister(instr->InputAt(0));
__ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register length = ToRegister(instr->length());
@ -2397,6 +2455,16 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
}
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
Register external_elements = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
// Load the result.
__ ldrb(result, MemOperand(external_elements, key));
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@ -2448,29 +2516,33 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
ASSERT(receiver.is(r0));
ASSERT(function.is(r1));
ASSERT(receiver.is(r0)); // Used for parameter count.
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
// If the receiver is null or undefined, we have to pass the
// global object as a receiver.
Label global_receiver, receiver_ok;
// If the receiver is null or undefined, we have to pass the global object
// as a receiver.
Label global_object, receiver_ok;
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
__ cmp(receiver, scratch);
__ b(eq, &global_receiver);
__ b(eq, &global_object);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ cmp(receiver, scratch);
__ b(ne, &receiver_ok);
__ bind(&global_receiver);
__ ldr(receiver, GlobalObjectOperand());
__ bind(&receiver_ok);
__ b(eq, &global_object);
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
// Deoptimize if the receiver is not a JS object.
__ tst(receiver, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
DeoptimizeIf(lo, instr->environment());
__ jmp(&receiver_ok);
Label invoke;
__ bind(&global_object);
__ ldr(receiver, GlobalObjectOperand());
__ bind(&receiver_ok);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
@ -2487,7 +2559,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Loop through the arguments pushing them onto the execution
// stack.
Label loop;
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ tst(length, Operand(length));
__ b(eq, &invoke);
@ -2510,6 +2582,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// by InvokeFunction.
v8::internal::ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@ -2899,7 +2972,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
// Name is always in r2.
__ mov(r2, Operand(instr->name()));
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
Handle<Code> ic(Builtins::builtin(info_->is_strict()
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@ -3778,6 +3853,55 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
}
void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
Label done;
EmitIsConstructCall(result, scratch0());
__ b(eq, &true_label);
__ LoadRoot(result, Heap::kFalseValueRootIndex);
__ b(&done);
__ bind(&true_label);
__ LoadRoot(result, Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
EmitIsConstructCall(temp1, scratch0());
EmitBranch(true_block, false_block, eq);
}
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
ASSERT(!temp1.is(temp2));
// Get the frame pointer for the calling frame.
__ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
__ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
__ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
// No code for lazy bailout instruction. Used to capture environment after a
// call for populating the safepoint data with deoptimization data.
@ -3785,14 +3909,16 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
DeoptimizeIf(kNoCondition, instr->environment());
DeoptimizeIf(al, instr->environment());
}
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
__ Push(object, key);
Register strict = scratch0();
__ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
__ Push(object, key, strict);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
LEnvironment* env = instr->deoptimization_environment();
@ -3818,7 +3944,19 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
Abort("DoOsrEntry unimplemented.");
// This is a pseudo-instruction that ensures that the environment here is
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
instr->SpilledDoubleRegisterArray());
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
ASSERT(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment);
ASSERT(osr_pc_offset_ == -1);
osr_pc_offset_ = masm()->pc_offset();
}

8
deps/v8/src/arm/lithium-codegen-arm.h

@ -129,6 +129,10 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
return info_->is_strict() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@ -264,6 +268,10 @@ class LCodeGen BASE_EMBEDDED {
Label* is_not_object,
Label* is_object);
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

172
deps/v8/src/arm/macro-assembler-arm.cc

@ -714,7 +714,8 @@ int MacroAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
@ -736,12 +737,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
str(r3, MemOperand(ip));
#endif
// Tear down the exit frame, pop the arguments, and return. Callee-saved
// register r4 still holds argc.
// Tear down the exit frame, pop the arguments, and return.
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
mov(pc, lr);
if (argument_count.is_valid()) {
add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
}
}
@ -929,8 +930,8 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
Register scratch,
Label* fail) {
ASSERT(kNotStringTag != 0);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
@ -1005,6 +1006,117 @@ void MacroAssembler::PopTryHandler() {
}
void MacroAssembler::Throw(Register value) {
// r0 is expected to hold the exception.
if (!value.is(r0)) {
mov(r0, value);
}
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(sp, MemOperand(r3));
// Restore the next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r2);
str(r2, MemOperand(r3));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
}
void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// r0 is expected to hold the exception.
if (!value.is(r0)) {
mov(r0, value);
}
// Drop sp to the top stack handler.
mov(r3, Operand(ExternalReference(Top::k_handler_address)));
ldr(sp, MemOperand(r3));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
ldr(r2, MemOperand(sp, kStateOffset));
cmp(r2, Operand(StackHandler::ENTRY));
b(eq, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
ldr(sp, MemOperand(sp, kNextOffset));
jmp(&loop);
bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r2);
str(r2, MemOperand(r3));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
mov(r0, Operand(false, RelocInfo::NONE));
mov(r2, Operand(external_caught));
str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
Failure* out_of_memory = Failure::OutOfMemoryException();
mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
str(r0, MemOperand(r2));
}
// Stack layout at this point. See also StackHandlerConstants.
// sp -> state (ENTRY)
// fp
// lr
// Discard handler state (r2 is not used) and restore frame pointer.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of a
// JS entry frame.
cmp(fp, Operand(0, RelocInfo::NONE));
// Set cp to NULL if fp is NULL.
mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
// Restore cp otherwise.
ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
#ifdef DEBUG
if (FLAG_debug_code) {
mov(lr, Operand(pc));
}
#endif
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
pop(pc);
}
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@ -1150,7 +1262,8 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
add(scratch2, result, Operand(obj_size_reg));
add(scratch2, result, Operand(obj_size_reg), SetCC);
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
str(scratch2, MemOperand(topaddr));
@ -1229,10 +1342,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2));
add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
} else {
add(scratch2, result, Operand(object_size));
add(scratch2, result, Operand(object_size), SetCC);
}
b(cs, gc_required);
cmp(scratch2, Operand(ip));
b(hi, gc_required);
@ -1552,9 +1666,10 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
cmp(r4, r5);
b(ne, &promote_scheduled_exception);
// LeaveExitFrame expects unwind space to be in r4.
// LeaveExitFrame expects unwind space to be in a register.
mov(r4, Operand(stack_space));
LeaveExitFrame(false);
LeaveExitFrame(false, r4);
mov(pc, lr);
bind(&promote_scheduled_exception);
MaybeObject* result = TryTailCallExternalReference(
@ -1771,6 +1886,13 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
}
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
Register src,
int num_least_bits) {
and_(dst, src, Operand((1 << num_least_bits) - 1));
}
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
@ -2113,6 +2235,19 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
}
void MacroAssembler::AbortIfNotString(Register object) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Assert(ne, "Operand is not a string");
push(object);
ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
pop(object);
Assert(lo, "Operand is not a string");
}
void MacroAssembler::AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
const char* message) {
@ -2379,7 +2514,6 @@ void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
instructions_(instructions),
@ -2402,15 +2536,21 @@ CodePatcher::~CodePatcher() {
}
void CodePatcher::Emit(Instr x) {
masm()->emit(x);
void CodePatcher::Emit(Instr instr) {
masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
#endif // ENABLE_DEBUGGER_SUPPORT
void CodePatcher::EmitCondition(Condition cond) {
Instr instr = Assembler::instr_at(masm_.pc_);
instr = (instr & ~kCondMask) | cond;
masm_.emit(instr);
}
} } // namespace v8::internal

27
deps/v8/src/arm/macro-assembler-arm.h

@ -45,6 +45,12 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
}
static inline Operand SmiUntagOperand(Register object) {
return Operand(object, ASR, kSmiTagSize);
}
// Give alias names to registers
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
@ -291,7 +297,9 @@ class MacroAssembler: public Assembler {
void EnterExitFrame(bool save_doubles, int stack_space = 0);
// Leave the current exit frame. Expects the return value in r0.
void LeaveExitFrame(bool save_doubles);
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count);
// Get the actual activation frame alignment for target environment.
static int ActivationFrameAlignment();
@ -365,6 +373,13 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopTryHandler();
// Passes thrown value (in r0) to the handler of top of the try handler chain.
void Throw(Register value);
// Propagates an uncatchable exception to the top of the current JS stack's
// handler chain.
void ThrowUncatchable(UncatchableExceptionType type, Register value);
// ---------------------------------------------------------------------------
// Inline caching support
@ -558,6 +573,7 @@ class MacroAssembler: public Assembler {
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
// Uses VFP instructions to Convert a Smi to a double.
void IntegerToDoubleConversionWithVFP3(Register inReg,
@ -784,6 +800,9 @@ class MacroAssembler: public Assembler {
void AbortIfSmi(Register object);
void AbortIfNotSmi(Register object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
// Abort execution if argument is not the root value with the given index.
void AbortIfNotRootValue(Register src,
Heap::RootListIndex root_value_index,
@ -886,11 +905,15 @@ class CodePatcher {
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr x);
void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
// Emit the condition part of an instruction leaving the rest of the current
// instruction unchanged.
void EmitCondition(Condition cond);
private:
byte* address_; // The address of the code being patched.
int instructions_; // Number of instructions of the expected patch size.

71
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -57,48 +57,57 @@ namespace internal {
* - r13/sp : points to tip of C stack.
*
* The remaining registers are free for computations.
*
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - direct_call (if 1, direct call from JavaScript code, if 0 call
* through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
* - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
* - backup of registers r4..r11
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
* - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a non-position.
* - At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
* --- sp ---
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
* backtracking stack).
* - fp[40] int* capture_array (int[num_saved_registers_], for output).
* - fp[36] secondary link/return address used by native call.
* --- sp when called ---
* - fp[32] return address (lr).
* - fp[28] old frame pointer (r11).
* - fp[0..24] backup of registers r4..r10.
* --- frame pointer ----
* - fp[-4] end of input (Address of end of string).
* - fp[-8] start of input (Address of first character in string).
* - fp[-12] start index (character index of start).
* - fp[-16] void* input_string (location of a handle containing the string).
* - fp[-20] Offset of location before start of input (effectively character
* position -1). Used to initialize capture registers to a
* non-position.
* - fp[-24] At start (if 1, we are starting at the start of the
* string, otherwise 0)
* - fp[-28] register 0 (Only positions must be stored in the first
* - register 1 num_saved_registers_ registers)
* - ...
* - register num_registers-1
* --- sp ---
*
* The first num_saved_registers_ registers are initialized to point to
* "character -1" in the string (i.e., char_size() bytes before the first
* character of the string). The remaining registers start out as garbage.
*
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
* int (*match)(String* input_string,
* int start_index,
* Address start,
* Address end,
* Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
* bool at_start,
* byte* stack_area_base,
* bool direct_call)
* bool direct_call = false)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in arm/simulator-arm.h.
* When calling as a non-direct call (i.e., from C++ code), the return address
* area is overwritten with the LR register by the RegExp code. When doing a
* direct call from generated code, the return address is placed there by
* the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@ -598,16 +607,17 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Entry code:
__ bind(&entry_label_);
// Push Link register.
// Push arguments
// Save callee-save registers.
// Start new stack frame.
// Store link register in existing stack-cell.
// Order here should correspond to order of offset constants in header file.
RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
__ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
// Set frame pointer just above the arguments.
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
__ push(r0); // Make room for "at start" constant (value is irrelevant).
@ -764,10 +774,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
if (stack_overflow_label_.is_linked()) {
SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
Label grow_failed;
// Call GrowStack(backtrack_stackpointer())
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -122,8 +122,9 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;

15
deps/v8/src/arm/simulator-arm.h

@ -48,10 +48,16 @@ namespace internal {
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
// Call the generated regexp code directly. The entry function pointer should
// expect seven int/pointer sized arguments and return an int.
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
(entry(p0, p1, p2, p3, p4, p5, p6))
(FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
@ -362,8 +368,7 @@ class Simulator {
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
Simulator::current()->Call( \
FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == \

41
deps/v8/src/arm/stub-cache-arm.cc

@ -3259,6 +3259,47 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
}
MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
JSObject* receiver) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
// -- r2 : receiver
// -- r3 : scratch
// -- r4 : scratch
// -- r5 : scratch
// -- r6 : scratch
// -- lr : return address
// -----------------------------------
Label miss;
// Check that the map matches.
__ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
GenerateFastPixelArrayStore(masm(),
r2,
r1,
r0,
r3,
r4,
r5,
r6,
true,
true,
&miss,
&miss,
NULL,
&miss);
__ bind(&miss);
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
return GetCode(NORMAL, NULL);
}
MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
// ----------- S t a t e -------------
// -- r0 : argc

13
deps/v8/src/arm/virtual-frame-arm.cc

@ -329,18 +329,25 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
}
void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
PopToR0();
RelocInfo::Mode mode;
if (is_contextual) {
SpillAll();
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
EmitPop(r1);
SpillAll();
mode = RelocInfo::CODE_TARGET;
}
__ mov(r2, Operand(name));
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
CallCodeObject(ic, mode, 0);
}

3
deps/v8/src/arm/virtual-frame-arm.h

@ -294,7 +294,8 @@ class VirtualFrame : public ZoneObject {
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
void CallStoreIC(Handle<String> name, bool is_contextual);
void CallStoreIC(Handle<String> name, bool is_contextual,
StrictModeFlag strict_mode);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.

10
deps/v8/src/array.js

@ -161,15 +161,7 @@ function Join(array, length, separator, convert) {
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
var length2 = (length << 1) - 1;
var j = length2;
var i = length;
elements[--j] = elements[--i];
while (i > 0) {
elements[--j] = separator;
elements[--j] = elements[--i];
}
return %StringBuilderConcat(elements, length2, '');
return %StringBuilderJoin(elements, length, separator);
} finally {
// Make sure to remove the last element of the visited array no
// matter what happens.

2
deps/v8/src/assembler.cc

@ -68,7 +68,7 @@ const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of Label

8
deps/v8/src/assembler.h

@ -178,10 +178,16 @@ class RelocInfo BASE_EMBEDDED {
// invalid/uninitialized position value.
static const int kNoPosition = -1;
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
// we do not normally record relocation info.
static const char* kFillerCommentString;
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // Code target used for contextual loads.
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,

4
deps/v8/src/ast.cc

@ -618,7 +618,9 @@ bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
cell_ = Handle<JSGlobalPropertyCell>::null();
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() && lookup.type() == NORMAL) {
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
lookup.holder() == *global) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));

11
deps/v8/src/bignum.cc

@ -67,7 +67,7 @@ void Bignum::AssignUInt64(uint64_t value) {
int needed_bigits = kUInt64Size / kBigitSize + 1;
EnsureCapacity(needed_bigits);
for (int i = 0; i < needed_bigits; ++i) {
bigits_[i] = value & kBigitMask;
bigits_[i] = static_cast<Chunk>(value & kBigitMask);
value = value >> kBigitSize;
}
used_digits_ = needed_bigits;
@ -266,7 +266,7 @@ void Bignum::MultiplyByUInt32(uint32_t factor) {
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@ -287,13 +287,13 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
uint64_t product_low = low * bigits_[i];
uint64_t product_high = high * bigits_[i];
uint64_t tmp = (carry & kBigitMask) + product_low;
bigits_[i] = tmp & kBigitMask;
bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
(product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
bigits_[used_digits_] = carry & kBigitMask;
bigits_[used_digits_] = static_cast<Chunk>(carry & kBigitMask);
used_digits_++;
carry >>= kBigitSize;
}
@ -748,7 +748,8 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
for (int i = 0; i < other.used_digits_; ++i) {
DoubleChunk product = static_cast<DoubleChunk>(factor) * other.bigits_[i];
DoubleChunk remove = borrow + product;
Chunk difference = bigits_[i + exponent_diff] - (remove & kBigitMask);
Chunk difference =
bigits_[i + exponent_diff] - static_cast<Chunk>(remove & kBigitMask);
bigits_[i + exponent_diff] = difference & kBigitMask;
borrow = static_cast<Chunk>((difference >> (kChunkSize - 1)) +
(remove >> kBigitSize));

51
deps/v8/src/bootstrapper.cc

@ -349,7 +349,7 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
prototype,
call_code,
is_ecma_native);
SetProperty(target, symbol, function, DONT_ENUM);
SetLocalPropertyNoThrow(target, symbol, function, DONT_ENUM);
if (is_ecma_native) {
function->shared()->set_instance_class_name(*symbol);
}
@ -580,8 +580,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(js_global_function->instance_prototype()));
SetProperty(prototype, Factory::constructor_symbol(),
Top::object_function(), NONE);
SetLocalPropertyNoThrow(
prototype, Factory::constructor_symbol(), Top::object_function(), NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
@ -683,7 +683,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_security_token(*inner_global);
Handle<String> object_name = Handle<String>(Heap::Object_symbol());
SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM);
SetLocalPropertyNoThrow(inner_global, object_name,
Top::object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@ -851,7 +852,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetProperty(global, name, json_object, DONT_ENUM);
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
}
@ -880,12 +881,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
global_context()->set_arguments_boilerplate(*result);
// Note: callee must be added as the first property and
// length must be added as the second property.
SetProperty(result, Factory::callee_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetProperty(result, Factory::length_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetLocalPropertyNoThrow(result, Factory::callee_symbol(),
Factory::undefined_value(),
DONT_ENUM);
SetLocalPropertyNoThrow(result, Factory::length_symbol(),
Factory::undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
@ -1085,10 +1086,8 @@ bool Genesis::InstallNatives() {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
Handle<String> global_symbol = Factory::LookupAsciiSymbol("global");
SetProperty(builtins,
global_symbol,
Handle<Object>(global_context()->global()),
attributes);
Handle<Object> global_obj(global_context()->global());
SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
// Setup the reference from the global object to the builtins object.
JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
@ -1480,17 +1479,17 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
Factory::LookupAsciiSymbol(FLAG_expose_natives_as);
SetProperty(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit");
SetProperty(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
NONE);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -1507,8 +1506,8 @@ void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
Handle<String> debug_string =
Factory::LookupAsciiSymbol(FLAG_expose_debug_as);
SetProperty(js_global, debug_string,
Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
Handle<Object> global_proxy(Debug::debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
#endif
}
@ -1679,7 +1678,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
SetProperty(to, key, value, details.attributes());
SetLocalPropertyNoThrow(to, key, value, details.attributes());
break;
}
case CONSTANT_FUNCTION: {
@ -1687,7 +1686,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
SetProperty(to, key, fun, details.attributes());
SetLocalPropertyNoThrow(to, key, fun, details.attributes());
break;
}
case CALLBACKS: {
@ -1737,7 +1736,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
}
PropertyDetails details = properties->DetailsAt(i);
SetProperty(to, key, value, details.attributes());
SetLocalPropertyNoThrow(to, key, value, details.attributes());
}
}
}

45
deps/v8/src/builtins.cc

@ -368,7 +368,9 @@ static bool ArrayPrototypeHasNoElements(Context* global_context,
array_proto = JSObject::cast(array_proto->GetPrototype());
ASSERT(array_proto->elements() == Heap::empty_fixed_array());
// Object.prototype
array_proto = JSObject::cast(array_proto->GetPrototype());
Object* proto = array_proto->GetPrototype();
if (proto == Heap::null_value()) return false;
array_proto = JSObject::cast(proto);
if (array_proto != global_context->initial_object_prototype()) return false;
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
ASSERT(array_proto->GetPrototype()->IsNull());
@ -1305,6 +1307,11 @@ static void Generate_StoreIC_Initialize(MacroAssembler* masm) {
}
static void Generate_StoreIC_Initialize_Strict(MacroAssembler* masm) {
StoreIC::GenerateInitialize(masm);
}
static void Generate_StoreIC_Miss(MacroAssembler* masm) {
StoreIC::GenerateMiss(masm);
}
@ -1315,8 +1322,18 @@ static void Generate_StoreIC_Normal(MacroAssembler* masm) {
}
static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
StoreIC::GenerateNormal(masm);
}
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm);
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict);
}
static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict);
}
@ -1325,11 +1342,21 @@ static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
}
static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
StoreIC::GenerateArrayLength(masm);
}
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm);
}
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
KeyedStoreIC::GenerateGeneric(masm);
}
@ -1442,13 +1469,13 @@ void Builtins::Setup(bool create_heap_objects) {
extra_args \
},
#define DEF_FUNCTION_PTR_A(name, kind, state) \
{ FUNCTION_ADDR(Generate_##name), \
NULL, \
#name, \
name, \
Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state), \
NO_EXTRA_ARGUMENTS \
#define DEF_FUNCTION_PTR_A(name, kind, state, extra) \
{ FUNCTION_ADDR(Generate_##name), \
NULL, \
#name, \
name, \
Code::ComputeFlags(Code::kind, NOT_IN_LOOP, state, extra), \
NO_EXTRA_ARGUMENTS \
},
// Define array of pointers to generators and C builtin functions.

170
deps/v8/src/builtins.h

@ -63,73 +63,135 @@ enum BuiltinExtraArguments {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(LazyCompile, BUILTIN, UNINITIALIZED) \
V(LazyRecompile, BUILTIN, UNINITIALIZED) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED) \
V(NotifyOSR, BUILTIN, UNINITIALIZED) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyCompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED) \
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC) \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
StoreIC::kStoreICStrict) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
StoreIC::kStoreICStrict) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
V(FunctionApply, BUILTIN, UNINITIALIZED) \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED) \
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED) \
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED)
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) \
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK)
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
Code::kNoExtraICState)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@ -152,7 +214,7 @@ enum BuiltinExtraArguments {
V(SHL, 1) \
V(SAR, 1) \
V(SHR, 1) \
V(DELETE, 1) \
V(DELETE, 2) \
V(IN, 1) \
V(INSTANCE_OF, 1) \
V(GET_KEYS, 0) \
@ -186,7 +248,7 @@ class Builtins : public AllStatic {
enum Name {
#define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state) name,
#define DEF_ENUM_A(name, kind, state, extra) name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)

4
deps/v8/src/code-stubs.cc

@ -32,7 +32,6 @@
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
#include "oprofile-agent.h"
namespace v8 {
namespace internal {
@ -63,9 +62,6 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
OPROFILE(CreateNativeCodeRegion(GetName(),
code->instruction_start(),
code->instruction_size()));
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());

3
deps/v8/src/code-stubs.h

@ -86,9 +86,6 @@ namespace internal {
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
// Types of uncatchable exceptions.
enum UncatchableExceptionType { OUT_OF_MEMORY, TERMINATION };
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum UnaryOverwriteMode { UNARY_OVERWRITE, UNARY_NO_OVERWRITE };

1
deps/v8/src/codegen.cc

@ -31,7 +31,6 @@
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "oprofile-agent.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
#include "rewriter.h"

25
deps/v8/src/compiler.cc

@ -39,7 +39,6 @@
#include "hydrogen.h"
#include "lithium.h"
#include "liveedit.h"
#include "oprofile-agent.h"
#include "parser.h"
#include "rewriter.h"
#include "runtime-profiler.h"
@ -289,6 +288,11 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
if (Top::has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
}
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> code = graph->Compile();
if (!code.is_null()) {
@ -419,9 +423,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
String::cast(script->name())));
OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
info->code()));
@ -432,9 +433,6 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
""));
OPROFILE(CreateNativeCodeRegion(info->is_eval() ? "Eval" : "Script",
info->code()->instruction_start(),
info->code()->instruction_size()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
@ -608,7 +606,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
// Compile the code.
if (!MakeCode(info)) {
Top::StackOverflow();
if (!Top::has_pending_exception()) {
Top::StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
@ -783,7 +783,6 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (Logger::is_logging() ||
OProfileAgent::is_enabled() ||
CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
@ -795,18 +794,10 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
*name,
String::cast(script->name()),
line_num));
OPROFILE(CreateNativeCodeRegion(*name,
String::cast(script->name()),
line_num,
code->instruction_start(),
code->instruction_size()));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*name));
OPROFILE(CreateNativeCodeRegion(*name,
code->instruction_start(),
code->instruction_size()));
}
}

4
deps/v8/src/compiler.h

@ -71,7 +71,6 @@ class CompilationInfo BASE_EMBEDDED {
flags_ |= IsGlobal::encode(true);
}
void MarkAsStrict() {
ASSERT(!is_lazy());
flags_ |= IsStrict::encode(true);
}
StrictModeFlag StrictMode() {
@ -153,6 +152,9 @@ class CompilationInfo BASE_EMBEDDED {
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (!shared_info_.is_null() && shared_info_->strict_mode()) {
MarkAsStrict();
}
}
void SetMode(Mode mode) {

2
deps/v8/src/d8.cc

@ -127,11 +127,13 @@ bool Shell::ExecuteString(Handle<String> source,
} else {
Handle<Value> result = script->Run();
if (result.IsEmpty()) {
ASSERT(try_catch.HasCaught());
// Print errors that happened during execution.
if (report_exceptions && !i::FLAG_debugger)
ReportException(&try_catch);
return false;
} else {
ASSERT(!try_catch.HasCaught());
if (print_result && !result->IsUndefined()) {
// If all went well and the result wasn't undefined then print
// the returned value.

7
deps/v8/src/date.js

@ -81,12 +81,7 @@ function TimeFromYear(year) {
function InLeapYear(time) {
return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
return DAY(time) - DayFromYear(YearFromTime(time));
return DaysInYear(YearFromTime(time)) - 365; // Returns 1 or 0.
}

4
deps/v8/src/debug.cc

@ -835,7 +835,9 @@ bool Debug::Load() {
// Expose the builtins object in the debugger context.
Handle<String> key = Factory::LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
SetProperty(global, key, Handle<Object>(global->builtins()), NONE);
RETURN_IF_EMPTY_HANDLE_VALUE(
SetProperty(global, key, Handle<Object>(global->builtins()), NONE),
false);
// Compile the JavaScript for the debugger in the debugger context.
Debugger::set_compiling_natives(true);

14
deps/v8/src/deoptimizer.cc

@ -663,7 +663,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::REGISTER: {
int output_reg = iterator->Next();
if (FLAG_trace_osr) {
PrintF(" %s <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
input_value,
*input_offset);
@ -690,7 +690,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
return false;
}
if (FLAG_trace_osr) {
PrintF(" %s <- %d (int32) ; [esp + %d]\n",
PrintF(" %s <- %d (int32) ; [sp + %d]\n",
converter.NameOfCPURegister(output_reg),
int32_value,
*input_offset);
@ -706,7 +706,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int output_reg = iterator->Next();
double double_value = input_object->Number();
if (FLAG_trace_osr) {
PrintF(" %s <- %g (double) ; [esp + %d]\n",
PrintF(" %s <- %g (double) ; [sp + %d]\n",
DoubleRegister::AllocationIndexToString(output_reg),
double_value,
*input_offset);
@ -720,7 +720,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
unsigned output_offset =
output->GetOffsetFromSlotIndex(this, output_index);
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
output_offset,
input_value,
*input_offset);
@ -749,7 +749,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
return false;
}
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- %d (int32) ; [esp + %d]\n",
PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
output_offset,
int32_value,
*input_offset);
@ -773,12 +773,12 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
int32_t lower = static_cast<int32_t>(int_value);
int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
if (FLAG_trace_osr) {
PrintF(" [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
output_offset + kUpperOffset,
upper,
double_value,
*input_offset);
PrintF(" [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
output_offset + kLowerOffset,
lower,
double_value,

1
deps/v8/src/execution.cc

@ -403,6 +403,7 @@ void StackGuard::ThreadLocal::Initialize() {
if (real_climit_ == kIllegalLimit) {
// Takes the address of the limit variable in order to find out where
// the top of stack is right now.
const uintptr_t kLimitSize = FLAG_stack_size * KB;
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);

2
deps/v8/src/execution.h

@ -243,8 +243,6 @@ class StackGuard : public AllStatic {
static void EnableInterrupts();
static void DisableInterrupts();
static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
#ifdef V8_TARGET_ARCH_X64
static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);

9
deps/v8/src/factory.cc

@ -334,6 +334,11 @@ Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
}
Handle<Map> Factory::GetPixelArrayElementsMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->GetPixelArrayElementsMap(), Map);
}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
}
@ -580,7 +585,9 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// Set function.prototype and give the prototype a constructor
// property that refers to the function.
SetPrototypeProperty(function, prototype);
SetProperty(prototype, Factory::constructor_symbol(), function, DONT_ENUM);
// Currently safe because it is only invoked from Genesis.
SetLocalPropertyNoThrow(
prototype, Factory::constructor_symbol(), function, DONT_ENUM);
return function;
}

2
deps/v8/src/factory.h

@ -196,6 +196,8 @@ class Factory : public AllStatic {
static Handle<Map> GetSlowElementsMap(Handle<Map> map);
static Handle<Map> GetPixelArrayElementsMap(Handle<Map> map);
static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// Numbers (eg, literals) are pretenured by the parser.

10
deps/v8/src/flag-definitions.h

@ -134,11 +134,7 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
#ifdef V8_TARGET_ARCH_IA32
DEFINE_bool(use_osr, true, "use on-stack replacement")
#else
DEFINE_bool(use_osr, false, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@ -231,6 +227,10 @@ DEFINE_bool(debugger_auto_break, true,
"in the queue")
DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
// execution.cc
DEFINE_int(stack_size, kPointerSize * 128,
"default size of stack region v8 is allowed to use (in KkBytes)")
// frames.cc
DEFINE_int(max_stack_trace_source_length, 300,
"maximum length of function source code printed in a stack trace.")
@ -374,6 +374,7 @@ DEFINE_bool(debug_script_collected_events, true,
DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
//
// Debug only flags
@ -493,7 +494,6 @@ DEFINE_bool(log_regexp, false, "Log regular expression execution.")
DEFINE_bool(sliding_state_window, false,
"Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(oprofile, false, "Enable JIT agent for OProfile.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
//

2
deps/v8/src/full-codegen.cc

@ -913,7 +913,7 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
__ bind(nested_statement.break_target());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);

6
deps/v8/src/full-codegen.h

@ -531,8 +531,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_strict() { return function()->strict_mode(); }
StrictModeFlag strict_mode_flag() {
return function()->strict_mode() ? kStrictMode : kNonStrictMode;
return is_strict() ? kStrictMode : kNonStrictMode;
}
FunctionLiteral* function() { return info_->function(); }
Scope* scope() { return info_->scope(); }
@ -544,7 +545,8 @@ class FullCodeGenerator: public AstVisitor {
void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
// Calling an IC stub with a patch site. Passing NULL for patch_site
// indicates no inlined smi code and emits a nop after the IC call.
// or non NULL patch_site which is not activated indicates no inlined smi code
// and emits a nop after the IC call.
void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
// Set fields in the stack frame. Offsets are the frame pointer relative

121
deps/v8/src/gdb-jit.cc

@ -395,7 +395,7 @@ class ELF BASE_EMBEDDED {
void WriteHeader(Writer* w) {
ASSERT(w->position() == 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if defined(V8_TARGET_ARCH_IA32)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
const uint8_t ident[16] =
{ 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#elif defined(V8_TARGET_ARCH_X64)
@ -413,6 +413,10 @@ class ELF BASE_EMBEDDED {
// System V ABI, AMD64 Supplement
// http://www.x86-64.org/documentation/abi.pdf
header->machine = 62;
#elif defined(V8_TARGET_ARCH_ARM)
// Set to EM_ARM, defined as 40, in "ARM ELF File Format" at
// infocenter.arm.com/help/topic/com.arm.doc.dui0101a/DUI0101A_Elf.pdf
header->machine = 40;
#else
#error Unsupported target architecture.
#endif
@ -503,8 +507,7 @@ class ELFSymbol BASE_EMBEDDED {
Binding binding() const {
return static_cast<Binding>(info >> 4);
}
#if defined(V8_TARGET_ARCH_IA32)
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_ARM)
struct SerializedLayout {
SerializedLayout(uint32_t name,
uintptr_t value,
@ -857,14 +860,20 @@ class DebugLineSection : public ELFSection {
Writer::Slot<uint32_t> total_length = w->CreateSlotHere<uint32_t>();
uintptr_t start = w->position();
// Used for special opcodes
const int8_t line_base = 1;
const uint8_t line_range = 7;
const int8_t max_line_incr = (line_base + line_range - 1);
const uint8_t opcode_base = DW_LNS_NEGATE_STMT + 1;
w->Write<uint16_t>(2); // Field version.
Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
uintptr_t prologue_start = w->position();
w->Write<uint8_t>(1); // Field minimum_instruction_length.
w->Write<uint8_t>(1); // Field default_is_stmt.
w->Write<int8_t>(0); // Field line_base.
w->Write<uint8_t>(2); // Field line_range.
w->Write<uint8_t>(DW_LNS_NEGATE_STMT + 1); // Field opcode_base.
w->Write<int8_t>(line_base); // Field line_base.
w->Write<uint8_t>(line_range); // Field line_range.
w->Write<uint8_t>(opcode_base); // Field opcode_base.
w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
@ -881,6 +890,7 @@ class DebugLineSection : public ELFSection {
WriteExtendedOpcode(w, DW_LNE_SET_ADDRESS, sizeof(intptr_t));
w->Write<intptr_t>(desc_->CodeStart());
w->Write<uint8_t>(DW_LNS_COPY);
intptr_t pc = 0;
intptr_t line = 1;
@ -888,29 +898,66 @@ class DebugLineSection : public ELFSection {
List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
pc_info->Sort(&ComparePCInfo);
for (int i = 0; i < pc_info->length(); i++) {
int pc_info_length = pc_info->length();
for (int i = 0; i < pc_info_length; i++) {
GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
uintptr_t pc_diff = info->pc_ - pc;
ASSERT(info->pc_ >= pc);
if (pc_diff != 0) {
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(pc_diff);
pc += pc_diff;
}
intptr_t line_diff = desc_->GetScriptLineNumber(info->pos_) - line;
if (line_diff != 0) {
w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
w->WriteSLEB128(line_diff);
line += line_diff;
// Reduce bloating in the debug line table by removing duplicate line
// entries (per DWARF2 standard).
intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
if (new_line == line) {
continue;
}
if (is_statement != info->is_statement_) {
// Mark statement boundaries. For a better debugging experience, mark
// the last pc address in the function as a statement (e.g. "}"), so that
// a user can see the result of the last line executed in the function,
// should control reach the end.
if ((i+1) == pc_info_length) {
if (!is_statement) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
}
} else if (is_statement != info->is_statement_) {
w->Write<uint8_t>(DW_LNS_NEGATE_STMT);
is_statement = !is_statement;
}
if (pc_diff != 0 || i == 0) {
// Generate special opcodes, if possible. This results in more compact
// debug line tables. See the DWARF 2.0 standard to learn more about
// special opcodes.
uintptr_t pc_diff = info->pc_ - pc;
intptr_t line_diff = new_line - line;
// Compute special opcode (see DWARF 2.0 standard)
intptr_t special_opcode = (line_diff - line_base) +
(line_range * pc_diff) + opcode_base;
// If special_opcode is less than or equal to 255, it can be used as a
// special opcode. If line_diff is larger than the max line increment
// allowed for a special opcode, or if line_diff is less than the minimum
// line that can be added to the line register (i.e. line_base), then
// special_opcode can't be used.
if ((special_opcode >= opcode_base) && (special_opcode <= 255) &&
(line_diff <= max_line_incr) && (line_diff >= line_base)) {
w->Write<uint8_t>(special_opcode);
} else {
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(pc_diff);
w->Write<uint8_t>(DW_LNS_ADVANCE_LINE);
w->WriteSLEB128(line_diff);
w->Write<uint8_t>(DW_LNS_COPY);
}
// Increment the pc and line operands.
pc += pc_diff;
line += line_diff;
}
// Advance the pc to the end of the routine, since the end sequence opcode
// requires this.
w->Write<uint8_t>(DW_LNS_ADVANCE_PC);
w->WriteSLEB128(desc_->CodeSize() - pc);
WriteExtendedOpcode(w, DW_LNE_END_SEQUENCE, 0);
total_length.set(static_cast<uint32_t>(w->position() - start));
return true;
@ -1237,6 +1284,20 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
static void RegisterCodeEntry(JITCodeEntry* entry) {
#if defined(DEBUG) && !defined(WIN32)
static int file_num = 0;
if (FLAG_gdbjit_dump) {
static const int kMaxFileNameSize = 64;
static const char* kElfFilePrefix = "/tmp/elfdump";
static const char* kObjFileExt = ".o";
char file_name[64];
OS::SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "%s%d%s",
kElfFilePrefix, file_num++, kObjFileExt);
WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
}
#endif
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != NULL) entry->next_->prev_ = entry;
__jit_debug_descriptor.first_entry_ =
@ -1294,7 +1355,13 @@ static bool SameCodeObjects(void* key1, void* key2) {
}
static HashMap entries(&SameCodeObjects);
static HashMap* GetEntries() {
static HashMap* entries = NULL;
if (entries == NULL) {
entries = new HashMap(&SameCodeObjects);
}
return entries;
}
static uint32_t HashForCodeObject(Code* code) {
@ -1398,7 +1465,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit) return;
AssertNoAllocation no_gc;
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
@ -1411,7 +1478,7 @@ void GDBJITInterface::AddCode(const char* name,
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
entries.Remove(code, HashForCodeObject(code));
GetEntries()->Remove(code, HashForCodeObject(code));
return;
}
@ -1464,7 +1531,9 @@ void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), false);
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
if (e == NULL) return;
if (IsLineInfoTagged(e->value)) {
@ -1475,14 +1544,14 @@ void GDBJITInterface::RemoveCode(Code* code) {
DestroyCodeEntry(entry);
}
e->value = NULL;
entries.Remove(code, HashForCodeObject(code));
GetEntries()->Remove(code, HashForCodeObject(code));
}
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = entries.Lookup(code, HashForCodeObject(code), true);
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
e->value = TagLineInfo(line_info);
}

12
deps/v8/src/handles.cc

@ -290,6 +290,17 @@ Handle<Object> SetLocalPropertyIgnoreAttributes(
}
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
ASSERT(!Top::has_pending_exception());
CHECK(!SetLocalPropertyIgnoreAttributes(
object, key, value, attributes).is_null());
CHECK(!Top::has_pending_exception());
}
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@ -808,6 +819,7 @@ static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
ASSERT(!Top::has_pending_exception());
bool result = Compiler::CompileLazy(info);
ASSERT(result != Top::has_pending_exception());
if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();

7
deps/v8/src/handles.h

@ -223,6 +223,13 @@ Handle<Object> SetLocalPropertyIgnoreAttributes(
Handle<Object> value,
PropertyAttributes attributes);
// Used to set local properties on the object we totally control
// and which therefore has no accessors and alikes.
void SetLocalPropertyNoThrow(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes = NONE);
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,

2
deps/v8/src/heap.cc

@ -4128,7 +4128,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh();
a += kPointerSize) {

1
deps/v8/src/heap.h

@ -184,6 +184,7 @@ namespace internal {
V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized") \
V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized") \
V(KeyedLoadPixelArray_symbol, "KeyedLoadPixelArray") \
V(KeyedStorePixelArray_symbol, "KeyedStorePixelArray") \
V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
V(illegal_access_symbol, "illegal access") \
V(out_of_memory_symbol, "out-of-memory") \

186
deps/v8/src/hydrogen-instructions.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -281,6 +281,33 @@ void HValue::SetOperandAt(int index, HValue* value) {
}
void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 3) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HValue::ReplaceAndDelete(HValue* other) {
ReplaceValue(other);
Delete();
@ -438,9 +465,16 @@ void HInstruction::PrintTo(StringStream* stream) const {
void HInstruction::Unlink() {
ASSERT(IsLinked());
ASSERT(!IsControlInstruction()); // Must never move control instructions.
ASSERT(!IsBlockEntry()); // Doesn't make sense to delete these.
ASSERT(previous_ != NULL);
previous_->next_ = next_;
if (next_ == NULL) {
ASSERT(block()->last() == this);
block()->set_last(previous_);
} else {
next_->previous_ = previous_;
}
clear_block();
if (previous_ != NULL) previous_->next_ = next_;
if (next_ != NULL) next_->previous_ = previous_;
}
@ -527,26 +561,64 @@ void HInstruction::Verify() {
#endif
HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
for (int i = 0; i < count; ++i) arguments_[i] = NULL;
set_representation(Representation::Tagged());
SetAllSideEffects();
void HCall::PrintDataTo(StringStream* stream) const {
stream->Add("#%d", argument_count());
}
void HCall::PrintDataTo(StringStream* stream) const {
stream->Add("(");
for (int i = 0; i < arguments_.length(); ++i) {
if (i != 0) stream->Add(", ");
arguments_.at(i)->PrintNameTo(stream);
void HUnaryCall::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
}
void HBinaryCall::PrintDataTo(StringStream* stream) const {
first()->PrintNameTo(stream);
stream->Add(" ");
second()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
}
void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
if (IsApplyFunction()) {
stream->Add("optimized apply ");
} else {
stream->Add("%o ", function()->shared()->DebugName());
}
stream->Add(")");
HCall::PrintDataTo(stream);
}
void HCallNamed::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallGlobal::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallKnownGlobal::PrintDataTo(StringStream* stream) const {
stream->Add("o ", target()->shared()->DebugName());
HCall::PrintDataTo(stream);
}
void HCallRuntime::PrintDataTo(StringStream* stream) const {
stream->Add("%o ", *name());
HCall::PrintDataTo(stream);
}
void HClassOfTest::PrintDataTo(StringStream* stream) const {
stream->Add("class_of_test(");
value()->PrintTo(stream);
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
}
@ -560,22 +632,6 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
void HCall::SetArgumentAt(int index, HPushArgument* push_argument) {
push_argument->set_argument_index(index);
SetOperandAt(index, push_argument);
}
void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
if (IsApplyFunction()) {
stream->Add("SPECIAL function: apply");
} else {
stream->Add("%s", *(function()->shared()->DebugName()->ToCString()));
}
HCall::PrintDataTo(stream);
}
void HControlInstruction::PrintDataTo(StringStream* stream) const {
if (FirstSuccessor() != NULL) {
int first_id = FirstSuccessor()->block_id();
@ -663,14 +719,6 @@ void HTypeofIs::PrintDataTo(StringStream* stream) const {
}
void HPushArgument::PrintDataTo(StringStream* stream) const {
HUnaryOperation::PrintDataTo(stream);
if (argument_index() != -1) {
stream->Add(" [%d]", argument_index_);
}
}
void HChange::PrintDataTo(StringStream* stream) const {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
@ -699,42 +747,19 @@ void HCheckFunction::PrintDataTo(StringStream* stream) const {
}
void HCallKeyed::PrintDataTo(StringStream* stream) const {
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("](");
for (int i = 1; i < arguments_.length(); ++i) {
if (i != 1) stream->Add(", ");
arguments_.at(i)->PrintNameTo(stream);
}
stream->Add(")");
}
void HCallNamed::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
}
void HCallGlobal::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
void HCallStub::PrintDataTo(StringStream* stream) const {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
HUnaryCall::PrintDataTo(stream);
}
void HCallRuntime::PrintDataTo(StringStream* stream) const {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s ", *name_string);
HCall::PrintDataTo(stream);
}
void HCallStub::PrintDataTo(StringStream* stream) const {
stream->Add("%s(%d)",
CodeStub::MajorName(major_key_, false),
argument_count_);
void HInstanceOf::PrintDataTo(StringStream* stream) const {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
stream->Add(" ");
context()->PrintNameTo(stream);
}
@ -900,17 +925,6 @@ void HPhi::AddInput(HValue* value) {
}
bool HPhi::HasReceiverOperand() {
for (int i = 0; i < OperandCount(); i++) {
if (OperandAt(i)->IsParameter() &&
HParameter::cast(OperandAt(i))->index() == 0) {
return true;
}
}
return false;
}
HValue* HPhi::GetRedundantReplacement() const {
HValue* candidate = NULL;
int count = OperandCount();
@ -1153,6 +1167,14 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const {
}
void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
external_pointer()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
}
void HStoreNamed::PrintDataTo(StringStream* stream) const {
object()->PrintNameTo(stream);
stream->Add(".");

441
deps/v8/src/hydrogen-instructions.h

@ -48,6 +48,7 @@ class LChunkBuilder;
#define HYDROGEN_ALL_INSTRUCTION_LIST(V) \
V(ArithmeticBinaryOperation) \
V(BinaryCall) \
V(BinaryOperation) \
V(BitwiseBinaryOperation) \
V(Call) \
@ -58,6 +59,7 @@ class LChunkBuilder;
V(Phi) \
V(StoreKeyed) \
V(StoreNamed) \
V(UnaryCall) \
V(UnaryControlInstruction) \
V(UnaryOperation) \
HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
@ -105,6 +107,7 @@ class LChunkBuilder;
V(EnterInlined) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
@ -113,6 +116,7 @@ class LChunkBuilder;
V(IsNull) \
V(IsObject) \
V(IsSmi) \
V(IsConstructCall) \
V(HasInstanceType) \
V(HasCachedArrayIndex) \
V(JSArrayLength) \
@ -126,12 +130,15 @@ class LChunkBuilder;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(Mod) \
V(Mul) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@ -163,6 +170,7 @@ class LChunkBuilder;
V(InobjectFields) \
V(BackingStoreFields) \
V(ArrayElements) \
V(PixelArrayElements) \
V(GlobalVars) \
V(Maps) \
V(ArrayLengths) \
@ -288,6 +296,7 @@ class Representation {
kTagged,
kDouble,
kInteger32,
kExternal,
kNumRepresentations
};
@ -297,6 +306,7 @@ class Representation {
static Representation Tagged() { return Representation(kTagged); }
static Representation Integer32() { return Representation(kInteger32); }
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
bool Equals(const Representation& other) const {
return kind_ == other.kind_;
@ -307,6 +317,7 @@ class Representation {
bool IsTagged() const { return kind_ == kTagged; }
bool IsInteger32() const { return kind_ == kInteger32; }
bool IsDouble() const { return kind_ == kDouble; }
bool IsExternal() const { return kind_ == kExternal; }
bool IsSpecialization() const {
return kind_ == kInteger32 || kind_ == kDouble;
}
@ -601,9 +612,6 @@ class HValue: public ZoneObject {
virtual HType CalculateInferredType() const;
// Helper for type conversions used by normal and phi instructions.
void InsertInputConversion(HInstruction* previous, int index, HType type);
#ifdef DEBUG
virtual void Verify() = 0;
#endif
@ -1040,27 +1048,15 @@ class HLeaveInlined: public HInstruction {
class HPushArgument: public HUnaryOperation {
public:
explicit HPushArgument(HValue* value)
: HUnaryOperation(value), argument_index_(-1) {
set_representation(Representation::Tagged());
}
explicit HPushArgument(HValue* value) : HUnaryOperation(value) { }
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* argument() const { return OperandAt(0); }
int argument_index() const { return argument_index_; }
void set_argument_index(int index) {
ASSERT(argument_index_ == -1 || index == argument_index_);
argument_index_ = index;
}
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
private:
int argument_index_;
};
@ -1123,36 +1119,80 @@ class HGlobalReceiver: public HUnaryOperation {
class HCall: public HInstruction {
public:
// Construct a call with uninitialized arguments. The argument count
// includes the receiver.
explicit HCall(int count);
// The argument count includes the receiver.
explicit HCall(int argument_count) : argument_count_(argument_count) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
virtual HType CalculateInferredType() const { return HType::Tagged(); }
// TODO(3190496): This needs a cleanup. We don't want the arguments
// be operands of the call instruction. This results in bad code quality.
virtual int argument_count() const { return arguments_.length(); }
virtual int OperandCount() const { return argument_count(); }
virtual HValue* OperandAt(int index) const { return arguments_[index]; }
virtual HPushArgument* PushArgumentAt(int index) const {
return HPushArgument::cast(OperandAt(index));
virtual int argument_count() const { return argument_count_; }
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(Call)
private:
int argument_count_;
};
class HUnaryCall: public HCall {
public:
HUnaryCall(HValue* value, int argument_count)
: HCall(argument_count), value_(NULL) {
SetOperandAt(0, value);
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* value() const { return value_; }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const {
ASSERT(index == 0);
return value_;
}
virtual HValue* ArgumentAt(int index) const {
return PushArgumentAt(index)->argument();
DECLARE_INSTRUCTION(UnaryCall)
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
ASSERT(index == 0);
value_ = value;
}
private:
HValue* value_;
};
class HBinaryCall: public HCall {
public:
HBinaryCall(HValue* first, HValue* second, int argument_count)
: HCall(argument_count) {
SetOperandAt(0, first);
SetOperandAt(1, second);
}
virtual void SetArgumentAt(int index, HPushArgument* push_argument);
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(Call)
HValue* first() const { return operands_[0]; }
HValue* second() const { return operands_[1]; }
virtual int OperandCount() const { return 2; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
DECLARE_INSTRUCTION(BinaryCall)
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
arguments_[index] = value;
operands_[index] = value;
}
int argument_count_;
Vector<HValue*> arguments_;
private:
HOperandVector<2> operands_;
};
@ -1162,6 +1202,7 @@ class HCallConstantFunction: public HCall {
: HCall(argument_count), function_(function) { }
Handle<JSFunction> function() const { return function_; }
bool IsApplyFunction() const {
return function_->code() == Builtins::builtin(Builtins::FunctionApply);
}
@ -1175,42 +1216,32 @@ class HCallConstantFunction: public HCall {
};
class HCallKeyed: public HCall {
class HCallKeyed: public HBinaryCall {
public:
HCallKeyed(HValue* key, int argument_count)
: HCall(argument_count + 1) {
SetOperandAt(0, key);
HCallKeyed(HValue* context, HValue* key, int argument_count)
: HBinaryCall(context, key, argument_count) {
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
// TODO(3190496): This is a hack to get an additional operand that
// is not an argument to work with the current setup. This _needs_ a cleanup.
// (see HCall)
virtual void PrintDataTo(StringStream* stream) const;
HValue* key() const { return OperandAt(0); }
virtual int argument_count() const { return arguments_.length() - 1; }
virtual int OperandCount() const { return arguments_.length(); }
virtual HValue* OperandAt(int index) const { return arguments_[index]; }
virtual HPushArgument* PushArgumentAt(int index) const {
return HPushArgument::cast(OperandAt(index + 1));
}
virtual void SetArgumentAt(int index, HPushArgument* push_argument) {
HCall::SetArgumentAt(index + 1, push_argument);
}
HValue* context() const { return first(); }
HValue* key() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
};
class HCallNamed: public HCall {
class HCallNamed: public HUnaryCall {
public:
HCallNamed(Handle<String> name, int argument_count)
: HCall(argument_count), name_(name) { }
HCallNamed(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
@ -1220,21 +1251,27 @@ class HCallNamed: public HCall {
};
class HCallFunction: public HCall {
class HCallFunction: public HUnaryCall {
public:
explicit HCallFunction(int argument_count) : HCall(argument_count) { }
HCallFunction(HValue* context, int argument_count)
: HUnaryCall(context, argument_count) {
}
HValue* context() const { return value(); }
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
};
class HCallGlobal: public HCall {
class HCallGlobal: public HUnaryCall {
public:
HCallGlobal(Handle<String> name, int argument_count)
: HCall(argument_count), name_(name) { }
HCallGlobal(HValue* context, Handle<String> name, int argument_count)
: HUnaryCall(context, argument_count), name_(name) {
}
virtual void PrintDataTo(StringStream* stream) const;
HValue* context() const { return value(); }
Handle<String> name() const { return name_; }
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
@ -1246,10 +1283,11 @@ class HCallGlobal: public HCall {
class HCallKnownGlobal: public HCall {
public:
HCallKnownGlobal(Handle<JSFunction> target,
int argument_count)
HCallKnownGlobal(Handle<JSFunction> target, int argument_count)
: HCall(argument_count), target_(target) { }
virtual void PrintDataTo(StringStream* stream) const;
Handle<JSFunction> target() const { return target_; }
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
@ -1259,15 +1297,18 @@ class HCallKnownGlobal: public HCall {
};
class HCallNew: public HCall {
class HCallNew: public HBinaryCall {
public:
explicit HCallNew(int argument_count) : HCall(argument_count) { }
HCallNew(HValue* context, HValue* constructor, int argument_count)
: HBinaryCall(context, constructor, argument_count) {
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
HValue* constructor() const { return ArgumentAt(0); }
HValue* context() const { return first(); }
HValue* constructor() const { return second(); }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
};
@ -1333,6 +1374,27 @@ class HFixedArrayLength: public HUnaryOperation {
};
class HPixelArrayLength: public HUnaryOperation {
public:
explicit HPixelArrayLength(HValue* value) : HUnaryOperation(value) {
set_representation(Representation::Integer32());
// The result of this instruction is idempotent as long as its inputs don't
// change. The length of a pixel array cannot change once set, so it's not
// necessary to introduce a kDependsOnArrayLengths or any other dependency.
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel_array_length")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HBitNot: public HUnaryOperation {
public:
explicit HBitNot(HValue* value) : HUnaryOperation(value) {
@ -1451,6 +1513,30 @@ class HLoadElements: public HUnaryOperation {
};
class HLoadPixelArrayExternalPointer: public HUnaryOperation {
public:
explicit HLoadPixelArrayExternalPointer(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::External());
// The result of this instruction is idempotent as long as its inputs don't
// change. The external array of a pixel array elements object cannot
// change once set, so it's no necessary to introduce any additional
// dependencies on top of the inputs.
SetFlag(kUseGVN);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HCheckMap: public HUnaryOperation {
public:
HCheckMap(HValue* value, Handle<Map> map)
@ -1701,7 +1787,7 @@ class HPhi: public HValue {
HValue* GetRedundantReplacement() const;
void AddInput(HValue* value);
bool HasReceiverOperand();
bool IsReceiver() { return merged_index_ == 0; }
int merged_index() const { return merged_index_; }
@ -1860,7 +1946,6 @@ class HBinaryOperation: public HInstruction {
operands_[index] = value;
}
private:
HOperandVector<2> operands_;
};
@ -2072,7 +2157,11 @@ class HCompare: public HBinaryOperation {
}
void SetInputRepresentation(Representation r);
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return input_representation_;
}
@ -2110,7 +2199,10 @@ class HCompareJSObjectEq: public HBinaryOperation {
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -2129,7 +2221,11 @@ class HUnaryPredicate: public HUnaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
@ -2179,6 +2275,24 @@ class HIsSmi: public HUnaryPredicate {
};
class HIsConstructCall: public HInstruction {
public:
HIsConstructCall() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is_construct_call")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HHasInstanceType: public HUnaryPredicate {
public:
HHasInstanceType(HValue* value, InstanceType type)
@ -2218,6 +2332,17 @@ class HHasCachedArrayIndex: public HUnaryPredicate {
};
class HGetCachedArrayIndex: public HUnaryPredicate {
public:
explicit HGetCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get_cached_array_index")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HClassOfTest: public HUnaryPredicate {
public:
HClassOfTest(HValue* value, Handle<String> class_name)
@ -2261,20 +2386,42 @@ class HTypeofIs: public HUnaryPredicate {
};
class HInstanceOf: public HBinaryOperation {
class HInstanceOf: public HInstruction {
public:
HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
HInstanceOf(HValue* context, HValue* left, HValue* right) {
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
set_representation(Representation::Tagged());
SetAllSideEffects();
}
virtual bool EmitAtUses() const { return uses()->length() <= 1; }
HValue* context() const { return operands_[0]; }
HValue* left() const { return operands_[1]; }
HValue* right() const { return operands_[2]; }
virtual bool EmitAtUses() const {
return !HasSideEffects() && (uses()->length() <= 1);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::Tagged();
}
virtual void PrintDataTo(StringStream* stream) const;
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
operands_[index] = value;
}
private:
HOperandVector<3> operands_;
};
@ -2543,18 +2690,17 @@ class HParameter: public HInstruction {
};
class HCallStub: public HInstruction {
class HCallStub: public HUnaryCall {
public:
HCallStub(CodeStub::Major major_key, int argument_count)
: major_key_(major_key),
argument_count_(argument_count),
HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
: HUnaryCall(context, argument_count),
major_key_(major_key),
transcendental_type_(TranscendentalCache::kNumberOfCaches) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
CodeStub::Major major_key() { return major_key_; }
int argument_count() { return argument_count_; }
HValue* context() const { return value(); }
void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
transcendental_type_ = transcendental_type;
@ -2562,13 +2708,13 @@ class HCallStub: public HInstruction {
TranscendentalCache::Type transcendental_type() {
return transcendental_type_;
}
virtual void PrintDataTo(StringStream* stream) const;
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
private:
CodeStub::Major major_key_;
int argument_count_;
TranscendentalCache::Type transcendental_type_;
};
@ -2746,15 +2892,16 @@ class HLoadNamedField: public HUnaryOperation {
};
class HLoadNamedGeneric: public HUnaryOperation {
class HLoadNamedGeneric: public HBinaryOperation {
public:
HLoadNamedGeneric(HValue* object, Handle<Object> name)
: HUnaryOperation(object), name_(name) {
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
: HBinaryOperation(context, object), name_(name) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
HValue* object() const { return OperandAt(0); }
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
Handle<Object> name() const { return name_; }
virtual Representation RequiredInputRepresentation(int index) const {
@ -2829,19 +2976,67 @@ class HLoadKeyedFastElement: public HLoadKeyed {
};
class HLoadPixelArrayElement: public HBinaryOperation {
public:
HLoadPixelArrayElement(HValue* external_elements, HValue* key)
: HBinaryOperation(external_elements, key) {
set_representation(Representation::Integer32());
SetFlag(kDependsOnPixelArrayElements);
// Native code could change the pixel array.
SetFlag(kDependsOnCalls);
SetFlag(kUseGVN);
}
virtual void PrintDataTo(StringStream* stream) const;
virtual Representation RequiredInputRepresentation(int index) const {
// The key is supposed to be Integer32, but the base pointer
// for the element load is a naked pointer.
return (index == 1) ? Representation::Integer32()
: Representation::External();
}
HValue* external_pointer() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load_pixel_array_element")
protected:
virtual bool DataEquals(HValue* other) const { return true; }
};
class HLoadKeyedGeneric: public HLoadKeyed {
public:
HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
HLoadKeyedGeneric(HContext* context, HValue* obj, HValue* key)
: HLoadKeyed(obj, key), context_(NULL) {
SetOperandAt(2, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* key() const { return operands_[1]; }
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const {
return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
class HStoreNamed: public HBinaryOperation {
public:
HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
HStoreNamed(HValue* obj, Handle<String> name, HValue* val)
: HBinaryOperation(obj, val), name_(name) {
}
@ -2852,21 +3047,21 @@ class HStoreNamed: public HBinaryOperation {
virtual void PrintDataTo(StringStream* stream) const;
HValue* object() const { return OperandAt(0); }
Handle<Object> name() const { return name_; }
Handle<String> name() const { return name_; }
HValue* value() const { return OperandAt(1); }
void set_value(HValue* value) { SetOperandAt(1, value); }
DECLARE_INSTRUCTION(StoreNamed)
private:
Handle<Object> name_;
Handle<String> name_;
};
class HStoreNamedField: public HStoreNamed {
public:
HStoreNamedField(HValue* obj,
Handle<Object> name,
Handle<String> name,
HValue* val,
bool in_object,
int offset)
@ -2905,12 +3100,32 @@ class HStoreNamedField: public HStoreNamed {
class HStoreNamedGeneric: public HStoreNamed {
public:
HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
: HStoreNamed(obj, name, val) {
HStoreNamedGeneric(HValue* context,
HValue* object,
Handle<String> name,
HValue* value)
: HStoreNamed(object, name, value), context_(NULL) {
SetOperandAt(2, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* value() const { return operands_[1]; }
virtual int OperandCount() const { return 3; }
virtual HValue* OperandAt(int index) const {
return (index < 2) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
@ -2945,7 +3160,6 @@ class HStoreKeyed: public HInstruction {
operands_[index] = value;
}
private:
HOperandVector<3> operands_;
};
@ -2970,12 +3184,33 @@ class HStoreKeyedFastElement: public HStoreKeyed {
class HStoreKeyedGeneric: public HStoreKeyed {
public:
HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
: HStoreKeyed(obj, key, val) {
HStoreKeyedGeneric(HValue* context,
HValue* object,
HValue* key,
HValue* value)
: HStoreKeyed(object, key, value), context_(NULL) {
SetOperandAt(3, context);
SetAllSideEffects();
}
HValue* context() const { return context_; }
HValue* object() const { return operands_[0]; }
HValue* key() const { return operands_[1]; }
HValue* value() const { return operands_[2]; }
virtual int OperandCount() const { return 4; }
virtual HValue* OperandAt(int index) const {
return (index < 3) ? operands_[index] : context_;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
protected:
virtual void InternalSetOperandAt(int index, HValue* value);
private:
HValue* context_;
};
@ -3077,22 +3312,36 @@ class HArrayLiteral: public HMaterializedLiteral {
class HObjectLiteral: public HMaterializedLiteral {
public:
HObjectLiteral(Handle<FixedArray> constant_properties,
HObjectLiteral(HValue* context,
Handle<FixedArray> constant_properties,
bool fast_elements,
int literal_index,
int depth)
: HMaterializedLiteral(literal_index, depth),
context_(NULL),
constant_properties_(constant_properties),
fast_elements_(fast_elements) {}
fast_elements_(fast_elements) {
SetOperandAt(0, context);
}
HValue* context() const { return context_; }
Handle<FixedArray> constant_properties() const {
return constant_properties_;
}
bool fast_elements() const { return fast_elements_; }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const { return context_; }
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
protected:
virtual void InternalSetOperandAt(int index, HValue* value) {
context_ = value;
}
private:
HValue* context_;
Handle<FixedArray> constant_properties_;
bool fast_elements_;
};

462
deps/v8/src/hydrogen.cc

@ -65,6 +65,7 @@ HBasicBlock::HBasicBlock(HGraph* graph)
first_instruction_index_(-1),
last_instruction_index_(-1),
deleted_phis_(4),
parent_loop_header_(NULL),
is_inline_return_target_(false) {
}
@ -105,18 +106,10 @@ void HBasicBlock::AddInstruction(HInstruction* instr) {
if (first_ == NULL) {
HBlockEntry* entry = new HBlockEntry();
entry->InitializeAsFirst(this);
first_ = entry;
first_ = last_ = entry;
}
instr->InsertAfter(GetLastInstruction());
}
HInstruction* HBasicBlock::GetLastInstruction() {
if (end_ != NULL) return end_->previous();
if (first_ == NULL) return NULL;
if (last_ == NULL) last_ = first_;
while (last_->next() != NULL) last_ = last_->next();
return last_;
instr->InsertAfter(last_);
last_ = instr;
}
@ -177,7 +170,7 @@ void HBasicBlock::SetJoinId(int id) {
for (int i = 0; i < length; i++) {
HBasicBlock* predecessor = predecessors_[i];
ASSERT(predecessor->end()->IsGoto());
HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction());
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
// We only need to verify the ID once.
ASSERT(i != 0 ||
predecessor->last_environment()->closure()->shared()
@ -293,20 +286,6 @@ void HBasicBlock::Verify() {
// Check that every block is finished.
ASSERT(IsFinished());
ASSERT(block_id() >= 0);
// Verify that all blocks targetting a branch target, have the same boolean
// value on top of their expression stack.
if (!cond().is_null()) {
ASSERT(predecessors()->length() > 0);
for (int i = 1; i < predecessors()->length(); i++) {
HBasicBlock* pred = predecessors()->at(i);
HValue* top = pred->last_environment()->Top();
ASSERT(top->IsConstant());
Object* a = *HConstant::cast(top)->handle();
Object* b = *cond();
ASSERT(a == b);
}
}
}
#endif
@ -870,13 +849,11 @@ void HGraph::EliminateRedundantPhis() {
}
uses_to_replace.Rewind(0);
block->RemovePhi(phi);
} else if (phi->HasNoUses() &&
!phi->HasReceiverOperand() &&
FLAG_eliminate_dead_phis) {
// We can't eliminate phis that have the receiver as an operand
// because in case of throwing an error we need the correct
// receiver value in the environment to construct a corrent
// stack trace.
} else if (FLAG_eliminate_dead_phis && phi->HasNoUses() &&
!phi->IsReceiver()) {
// We can't eliminate phis in the receiver position in the environment
// because in case of throwing an error we need this value to
// construct a stack trace.
block->RemovePhi(phi);
block->RecordDeletedPhi(phi->merged_index());
}
@ -1815,17 +1792,15 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
bool is_truncating) {
// Insert the representation change right before its use. For phi-uses we
// insert at the end of the corresponding predecessor.
HBasicBlock* insert_block = use->block();
HInstruction* next = NULL;
if (use->IsPhi()) {
int index = 0;
while (use->OperandAt(index) != value) ++index;
insert_block = insert_block->predecessors()->at(index);
next = use->block()->predecessors()->at(index)->end();
} else {
next = HInstruction::cast(use);
}
HInstruction* next = (insert_block == use->block())
? HInstruction::cast(use)
: insert_block->end();
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@ -2197,10 +2172,8 @@ void HGraphBuilder::VisitForControl(Expression* expr,
}
HValue* HGraphBuilder::VisitArgument(Expression* expr) {
void HGraphBuilder::VisitArgument(Expression* expr) {
VisitForValue(expr);
if (HasStackOverflow() || !subgraph()->HasExit()) return NULL;
return environment()->Top();
}
@ -2319,29 +2292,15 @@ void HGraphBuilder::PushAndAdd(HInstruction* instr) {
}
void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
const int kMaxStubArguments = 4;
ASSERT_GE(kMaxStubArguments, argument_count);
// Push the arguments on the stack.
HValue* arguments[kMaxStubArguments];
for (int i = argument_count - 1; i >= 0; i--) {
arguments[i] = Pop();
}
for (int i = 0; i < argument_count; i++) {
AddInstruction(new HPushArgument(arguments[i]));
}
}
void HGraphBuilder::ProcessCall(HCall* call) {
for (int i = call->argument_count() - 1; i >= 0; --i) {
HValue* value = Pop();
HPushArgument* push = new HPushArgument(value);
call->SetArgumentAt(i, push);
void HGraphBuilder::PreProcessCall(HCall* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count);
for (int i = 0; i < count; ++i) {
arguments.Add(Pop());
}
for (int i = 0; i < call->argument_count(); ++i) {
AddInstruction(call->PushArgumentAt(i));
while (!arguments.is_empty()) {
AddInstruction(new HPushArgument(arguments.RemoveLast()));
}
}
@ -2952,6 +2911,9 @@ void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
if (is_store && lookup->IsReadOnly()) {
BAILOUT("read-only global variable");
}
if (lookup->holder() != *global) {
BAILOUT("global property on prototype of global object");
}
}
@ -3021,7 +2983,10 @@ void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HObjectLiteral* literal = (new HObjectLiteral(expr->constant_properties(),
HContext* context = new HContext;
AddInstruction(context);
HObjectLiteral* literal = (new HObjectLiteral(context,
expr->constant_properties(),
expr->fast_elements(),
expr->literal_index(),
expr->depth()));
@ -3048,7 +3013,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
VISIT_FOR_VALUE(value);
HValue* value = Pop();
Handle<String> name = Handle<String>::cast(key->handle());
AddInstruction(new HStoreNamedGeneric(literal, name, value));
HStoreNamedGeneric* store =
new HStoreNamedGeneric(context, literal, name, value);
AddInstruction(store);
AddSimulate(key->id());
} else {
VISIT_FOR_EFFECT(value);
@ -3111,53 +3078,47 @@ void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
}
HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
ZoneList<HSubgraph*>* subgraphs,
HValue* receiver,
HBasicBlock* HGraphBuilder::BuildTypeSwitch(HValue* receiver,
ZoneMapList* maps,
ZoneList<HSubgraph*>* body_graphs,
HSubgraph* default_graph,
int join_id) {
ASSERT(subgraphs->length() == (maps->length() + 1));
// Build map compare subgraphs for all but the first map.
ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1);
for (int i = maps->length() - 1; i > 0; --i) {
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* else_subgraph =
(i == (maps->length() - 1))
? subgraphs->last()
: map_compare_subgraphs.last();
HCompareMap* compare = new HCompareMap(receiver,
maps->at(i),
subgraphs->at(i)->entry_block(),
else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
map_compare_subgraphs.Add(subgraph);
}
// Generate first map check to end the current block.
ASSERT(maps->length() == body_graphs->length());
HBasicBlock* join_block = graph()->CreateBasicBlock();
AddInstruction(new HCheckNonSmi(receiver));
HSubgraph* else_subgraph =
(maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
HCompareMap* compare = new HCompareMap(receiver,
Handle<Map>(maps->first()),
subgraphs->first()->entry_block(),
else_subgraph->entry_block());
current_subgraph_->exit_block()->Finish(compare);
// Join all the call subgraphs in a new basic block and make
// this basic block the current basic block.
HBasicBlock* join_block = graph_->CreateBasicBlock();
for (int i = 0; i < subgraphs->length(); ++i) {
HSubgraph* subgraph = subgraphs->at(i);
if (subgraph->HasExit()) {
for (int i = 0; i < maps->length(); ++i) {
// Build the branches, connect all the target subgraphs to the join
// block. Use the default as a target of the last branch.
HSubgraph* if_true = body_graphs->at(i);
HSubgraph* if_false = (i == maps->length() - 1)
? default_graph
: CreateBranchSubgraph(environment());
HCompareMap* compare =
new HCompareMap(receiver,
maps->at(i),
if_true->entry_block(),
if_false->entry_block());
subgraph()->exit_block()->Finish(compare);
if (if_true->HasExit()) {
// In an effect context the value of the type switch is not needed.
// There is no need to merge it at the join block only to discard it.
HBasicBlock* subgraph_exit = subgraph->exit_block();
if (ast_context()->IsEffect()) {
subgraph_exit->last_environment()->Drop(1);
if_true->exit_block()->last_environment()->Drop(1);
}
subgraph_exit->Goto(join_block);
if_true->exit_block()->Goto(join_block);
}
subgraph()->set_exit_block(if_false->exit_block());
}
// Connect the default if necessary.
if (subgraph()->HasExit()) {
if (ast_context()->IsEffect()) {
environment()->Drop(1);
}
subgraph()->exit_block()->Goto(join_block);
}
if (join_block->predecessors()->is_empty()) return NULL;
@ -3228,7 +3189,9 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value) {
return new HStoreNamedGeneric(object, name, value);
HContext* context = new HContext;
AddInstruction(context);
return new HStoreNamedGeneric(context, object, name, value);
}
@ -3261,7 +3224,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxStorePolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxStorePolymorphism);
// Build subgraphs for each of the specific maps.
@ -3273,7 +3236,6 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Handle<Map> map = types->at(i);
LookupResult lookup;
if (ComputeStoredField(map, name, &lookup)) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HInstruction* instr =
@ -3281,6 +3243,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3290,7 +3253,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// If none of the properties were named fields we generate a
// generic store.
if (maps.length() == 0) {
HInstruction* instr = new HStoreNamedGeneric(object, name, value);
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
@ -3298,22 +3261,20 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
ast_context()->ReturnValue(Pop());
} else {
// Build subgraph for generic store through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = new HStoreNamedGeneric(object, name, value);
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -3354,7 +3315,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
return;
} else {
instr = new HStoreNamedGeneric(object, name, value);
instr = BuildStoreNamedGeneric(object, name, value);
}
} else {
@ -3414,10 +3375,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
BinaryOperation* operation = expr->binary_operation();
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
BAILOUT("non-stack/non-global in compound assignment");
}
VISIT_FOR_VALUE(operation);
if (var->is_global()) {
@ -3425,8 +3382,16 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Top(),
expr->position(),
expr->AssignmentId());
} else {
} else if (var->IsStackAllocated()) {
Bind(var, Top());
} else if (var->IsContextSlot()) {
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
BAILOUT("compound assignment to lookup slot");
}
ast_context()->ReturnValue(Pop());
@ -3474,7 +3439,6 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
bool is_fast_elements = prop->IsMonomorphic() &&
prop->GetMonomorphicReceiverType()->has_fast_elements();
HInstruction* load = is_fast_elements
? BuildLoadKeyedFastElement(obj, key, prop)
: BuildLoadKeyedGeneric(obj, key);
@ -3589,7 +3553,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
Handle<String> name) {
int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxLoadPolymorphism);
// Build subgraphs for each of the specific maps.
@ -3602,7 +3566,6 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
LookupResult lookup;
map->LookupInDescriptors(NULL, *name, &lookup);
if (lookup.IsProperty() && lookup.type() == FIELD) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HLoadNamedField* instr =
@ -3610,6 +3573,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
instr->set_position(expr->position());
instr->ClearFlag(HValue::kUseGVN); // Don't do GVN on polymorphic loads.
PushAndAdd(instr);
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3624,21 +3588,19 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
ast_context()->ReturnInstruction(instr, expr->id());
} else {
// Build subgraph for generic load through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
instr->set_position(expr->position());
PushAndAdd(instr);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
BuildTypeSwitch(object, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -3677,7 +3639,9 @@ HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
Property* expr) {
ASSERT(expr->key()->IsPropertyName());
Handle<Object> name = expr->key()->AsLiteral()->handle();
return new HLoadNamedGeneric(obj, name);
HContext* context = new HContext;
AddInstruction(context);
return new HLoadNamedGeneric(context, obj, name);
}
@ -3706,7 +3670,9 @@ HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
HValue* key) {
return new HLoadKeyedGeneric(object, key);
HContext* context = new HContext;
AddInstruction(context);
return new HLoadKeyedGeneric(context, object, key);
}
@ -3734,10 +3700,34 @@ HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
}
HInstruction* HGraphBuilder::BuildLoadKeyedPixelArrayElement(HValue* object,
HValue* key,
Property* expr) {
ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
AddInstruction(new HCheckNonSmi(object));
Handle<Map> map = expr->GetMonomorphicReceiverType();
ASSERT(!map->has_fast_elements());
ASSERT(map->has_pixel_array_elements());
AddInstruction(new HCheckMap(object, map));
HLoadElements* elements = new HLoadElements(object);
AddInstruction(elements);
HInstruction* length = AddInstruction(new HPixelArrayLength(elements));
AddInstruction(new HBoundsCheck(key, length));
HLoadPixelArrayExternalPointer* external_elements =
new HLoadPixelArrayExternalPointer(elements);
AddInstruction(external_elements);
HLoadPixelArrayElement* pixel_array_value =
new HLoadPixelArrayElement(external_elements, key);
return pixel_array_value;
}
HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
HValue* key,
HValue* value) {
return new HStoreKeyedGeneric(object, key, value);
HContext* context = new HContext;
AddInstruction(context);
return new HStoreKeyedGeneric(context, object, key, value);
}
@ -3841,12 +3831,20 @@ void HGraphBuilder::VisitProperty(Property* expr) {
HValue* key = Pop();
HValue* obj = Pop();
bool is_fast_elements = expr->IsMonomorphic() &&
expr->GetMonomorphicReceiverType()->has_fast_elements();
instr = is_fast_elements
? BuildLoadKeyedFastElement(obj, key, expr)
: BuildLoadKeyedGeneric(obj, key);
if (expr->IsMonomorphic()) {
Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
// An object has either fast elements or pixel array elements, but never
// both. Pixel array maps that are assigned to pixel array elements are
// always created with the fast elements flag cleared.
if (receiver_type->has_pixel_array_elements()) {
instr = BuildLoadKeyedPixelArrayElement(obj, key, expr);
} else if (receiver_type->has_fast_elements()) {
instr = BuildLoadKeyedFastElement(obj, key, expr);
}
}
if (instr == NULL) {
instr = BuildLoadKeyedGeneric(obj, key);
}
}
instr->set_position(expr->position());
ast_context()->ReturnInstruction(instr, expr->id());
@ -3879,7 +3877,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
int number_of_types = Min(types->length(), kMaxCallPolymorphism);
ZoneMapList maps(number_of_types);
ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
ZoneList<HSubgraph*> subgraphs(number_of_types);
bool needs_generic = (types->length() > kMaxCallPolymorphism);
// Build subgraphs for each of the specific maps.
@ -3890,7 +3888,6 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
for (int i = 0; i < number_of_types; ++i) {
Handle<Map> map = types->at(i);
if (expr->ComputeTarget(map, name)) {
maps.Add(map);
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
AddCheckConstantFunction(expr, receiver, map, false);
@ -3904,9 +3901,10 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
CHECK_BAILOUT;
HCall* call = new HCallConstantFunction(expr->target(), argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
PushAndAdd(call);
}
maps.Add(map);
subgraphs.Add(subgraph);
} else {
needs_generic = true;
@ -3916,28 +3914,30 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
// If we couldn't compute the target for any of the maps just perform an
// IC call.
if (maps.length() == 0) {
HCall* call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
} else {
// Build subgraph for generic call through IC.
{
HSubgraph* subgraph = CreateBranchSubgraph(environment());
SubgraphScope scope(this, subgraph);
HSubgraph* default_graph = CreateBranchSubgraph(environment());
{ SubgraphScope scope(this, default_graph);
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
subgraph->FinishExit(new HDeoptimize());
default_graph->FinishExit(new HDeoptimize());
} else {
HCall* call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCall* call = new HCallNamed(context, name, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
PushAndAdd(call);
}
subgraphs.Add(subgraph);
}
HBasicBlock* new_exit_block =
BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
BuildTypeSwitch(receiver, &maps, &subgraphs, default_graph, expr->id());
subgraph()->set_exit_block(new_exit_block);
// In an effect context, we did not materialized the value in the
// predecessor environments so there's no need to handle it here.
@ -4004,6 +4004,9 @@ bool HGraphBuilder::TryInline(Call* expr) {
CompilationInfo inner_info(target);
if (!ParserApi::Parse(&inner_info) ||
!Scope::Analyze(&inner_info)) {
if (Top::has_pending_exception()) {
SetStackOverflow();
}
return false;
}
FunctionLiteral* function = inner_info.function();
@ -4348,9 +4351,11 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallKeyed(key, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallKeyed(context, key, argument_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
Drop(1); // Key.
ast_context()->ReturnInstruction(call, expr->id());
return;
@ -4362,7 +4367,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
if (TryCallApply(expr)) return;
CHECK_BAILOUT;
HValue* receiver = VisitArgument(prop->obj());
VisitArgument(prop->obj());
CHECK_BAILOUT;
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
@ -4372,6 +4377,8 @@ void HGraphBuilder::VisitCall(Call* expr) {
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
HValue* receiver =
environment()->ExpressionStackAt(expr->arguments()->length());
if (expr->IsMonomorphic()) {
Handle<Map> receiver_map =
(types == NULL) ? Handle<Map>::null() : types->first();
@ -4387,7 +4394,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
// When the target has a custom call IC generator, use the IC,
// because it is likely to generate better code. Also use the
// IC when a primitive receiver check is required.
call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallNamed(context, name, argument_count);
} else {
AddCheckConstantFunction(expr, receiver, receiver_map, true);
@ -4416,7 +4425,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
return;
} else {
call = new HCallNamed(name, argument_count);
HContext* context = new HContext;
AddInstruction(context);
call = new HCallNamed(context, name, argument_count);
}
} else {
@ -4486,7 +4497,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallGlobal(var->name(), argument_count);
call = new HCallGlobal(context, var->name(), argument_count);
}
} else {
@ -4498,12 +4509,12 @@ void HGraphBuilder::VisitCall(Call* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
call = new HCallFunction(argument_count);
call = new HCallFunction(context, argument_count);
}
}
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@ -4516,10 +4527,16 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
VisitArgumentList(expr->arguments());
CHECK_BAILOUT;
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
HCall* call = new HCallNew(argument_count);
HContext* context = new HContext;
AddInstruction(context);
// The constructor is both an operand to the instruction and an argument
// to the construct call.
int arg_count = expr->arguments()->length() + 1; // Plus constructor.
HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
HCall* call = new HCallNew(context, constructor, arg_count);
call->set_position(expr->position());
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
@ -4573,7 +4590,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(function->intrinsic_type == Runtime::RUNTIME);
HCall* call = new HCallRuntime(name, expr->function(), argument_count);
call->set_position(RelocInfo::kNoPosition);
ProcessCall(call);
PreProcessCall(call);
ast_context()->ReturnInstruction(call, expr->id());
}
}
@ -4600,12 +4617,18 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
// The subexpression does not have side effects.
ast_context()->ReturnValue(graph()->GetConstantFalse());
} else if (prop != NULL) {
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
HValue* key = Pop();
HValue* obj = Pop();
ast_context()->ReturnInstruction(new HDeleteProperty(obj, key),
expr->id());
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
ast_context()->ReturnValue(graph()->GetConstantFalse());
} else {
VISIT_FOR_VALUE(prop->obj());
VISIT_FOR_VALUE(prop->key());
HValue* key = Pop();
HValue* obj = Pop();
HDeleteProperty* instr = new HDeleteProperty(obj, key);
ast_context()->ReturnInstruction(instr, expr->id());
}
} else if (var->is_global()) {
BAILOUT("delete with global variable");
} else {
@ -4685,10 +4708,6 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
bool inc = expr->op() == Token::INC;
if (var != NULL) {
if (!var->is_global() && !var->IsStackAllocated()) {
BAILOUT("non-stack/non-global variable in count operation");
}
VISIT_FOR_VALUE(target);
// Match the full code generator stack by simulating an extra stack
@ -4704,9 +4723,16 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
after,
expr->position(),
expr->AssignmentId());
} else {
ASSERT(var->IsStackAllocated());
} else if (var->IsStackAllocated()) {
Bind(var, after);
} else if (var->IsContextSlot()) {
HValue* context = BuildContextChainWalk(var);
int index = var->AsSlot()->index();
HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
AddInstruction(instr);
if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
} else {
BAILOUT("lookup variable in count operation");
}
Drop(has_extra ? 2 : 1);
ast_context()->ReturnValue(expr->is_postfix() ? before : after);
@ -4785,7 +4811,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
HInstruction* store = is_fast_elements
? BuildStoreKeyedFastElement(obj, key, after, prop)
: new HStoreKeyedGeneric(obj, key, after);
: BuildStoreKeyedGeneric(obj, key, after);
AddInstruction(store);
// Drop the key from the bailout environment. Overwrite the receiver
@ -5042,7 +5068,9 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// If the target is not null we have found a known global function that is
// assumed to stay the same for this instanceof.
if (target.is_null()) {
instr = new HInstanceOf(left, right);
HContext* context = new HContext;
AddInstruction(context);
instr = new HInstanceOf(context, left, right);
} else {
AddInstruction(new HCheckFunction(right, target));
instr = new HInstanceOfKnownGlobal(left, target);
@ -5185,9 +5213,10 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
}
// Support for construct call checks.
// Support for construct call checks.
void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
BAILOUT("inlined runtime function: IsConstructCall");
ASSERT(argument_count == 0);
ast_context()->ReturnInstruction(new HIsConstructCall, ast_id);
}
@ -5251,8 +5280,11 @@ void HGraphBuilder::GenerateStringCharFromCode(int argument_count,
// Fast support for string.charAt(n) and string[n].
void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringCharAt, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringCharAt, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5281,8 +5313,11 @@ void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) {
// Fast support for StringAdd.
void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringAdd, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringAdd, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5290,8 +5325,11 @@ void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
// Fast support for SubString.
void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::SubString, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::SubString, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5299,8 +5337,11 @@ void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
// Fast support for StringCompare.
void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
ASSERT_EQ(2, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::StringCompare, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::StringCompare, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5308,8 +5349,11 @@ void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
// Support for direct calls from JavaScript to native RegExp code.
void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
ASSERT_EQ(4, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::RegExpExec, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::RegExpExec, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5318,9 +5362,11 @@ void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
int ast_id) {
ASSERT_EQ(3, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::RegExpConstructResult, argument_count);
new HCallStub(context, CodeStub::RegExpConstructResult, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5334,8 +5380,11 @@ void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) {
// Fast support for number to string.
void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HCallStub* result = new HCallStub(CodeStub::NumberToString, argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(context, CodeStub::NumberToString, argument_count);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5366,30 +5415,36 @@ void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) {
void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::SIN);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::COS);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
ASSERT_EQ(1, argument_count);
PushArgumentsForStubCall(argument_count);
HContext* context = new HContext;
AddInstruction(context);
HCallStub* result =
new HCallStub(CodeStub::TranscendentalCache, argument_count);
new HCallStub(context, CodeStub::TranscendentalCache, argument_count);
result->set_transcendental_type(TranscendentalCache::LOG);
PreProcessCall(result);
ast_context()->ReturnInstruction(result, ast_id);
}
@ -5408,7 +5463,10 @@ void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count,
void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
int ast_id) {
BAILOUT("inlined runtime function: GetCachedArrayIndex");
ASSERT(argument_count == 1);
HValue* value = Pop();
HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
ast_context()->ReturnInstruction(result, ast_id);
}

38
deps/v8/src/hydrogen.h

@ -60,6 +60,8 @@ class HBasicBlock: public ZoneObject {
HGraph* graph() const { return graph_; }
const ZoneList<HPhi*>* phis() const { return &phis_; }
HInstruction* first() const { return first_; }
HInstruction* last() const { return last_; }
void set_last(HInstruction* instr) { last_ = instr; }
HInstruction* GetLastInstruction();
HControlInstruction* end() const { return end_; }
HLoopInformation* loop_information() const { return loop_information_; }
@ -103,16 +105,14 @@ class HBasicBlock: public ZoneObject {
void ClearEnvironment() { last_environment_ = NULL; }
bool HasEnvironment() const { return last_environment_ != NULL; }
void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
HBasicBlock* parent_loop_header() const {
if (!HasParentLoopHeader()) return NULL;
return parent_loop_header_.get();
}
HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
void set_parent_loop_header(HBasicBlock* block) {
parent_loop_header_.set(block);
ASSERT(parent_loop_header_ == NULL);
parent_loop_header_ = block;
}
bool HasParentLoopHeader() const { return parent_loop_header_.is_set(); }
bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
void SetJoinId(int id);
@ -136,9 +136,6 @@ class HBasicBlock: public ZoneObject {
bool IsInlineReturnTarget() const { return is_inline_return_target_; }
void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
Handle<Object> cond() { return cond_; }
void set_cond(Handle<Object> value) { cond_ = value; }
#ifdef DEBUG
void Verify();
#endif
@ -153,7 +150,7 @@ class HBasicBlock: public ZoneObject {
HGraph* graph_;
ZoneList<HPhi*> phis_;
HInstruction* first_;
HInstruction* last_; // Last non-control instruction of the block.
HInstruction* last_;
HControlInstruction* end_;
HLoopInformation* loop_information_;
ZoneList<HBasicBlock*> predecessors_;
@ -166,9 +163,8 @@ class HBasicBlock: public ZoneObject {
int first_instruction_index_;
int last_instruction_index_;
ZoneList<int> deleted_phis_;
SetOncePointer<HBasicBlock> parent_loop_header_;
HBasicBlock* parent_loop_header_;
bool is_inline_return_target_;
Handle<Object> cond_;
};
@ -706,19 +702,17 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
// Visit an argument and wrap it in a PushArgument instruction.
HValue* VisitArgument(Expression* expr);
// Visit an argument subexpression.
void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
void PushArgumentsForStubCall(int argument_count);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
void ProcessCall(HCall* call);
void PreProcessCall(HCall* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@ -791,6 +785,9 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildLoadKeyedFastElement(HValue* object,
HValue* key,
Property* expr);
HInstruction* BuildLoadKeyedPixelArrayElement(HValue* object,
HValue* key,
Property* expr);
HInstruction* BuildLoadKeyedGeneric(HValue* object,
HValue* key);
@ -831,9 +828,10 @@ class HGraphBuilder: public AstVisitor {
bool smi_and_map_check);
HBasicBlock* BuildTypeSwitch(ZoneMapList* maps,
ZoneList<HSubgraph*>* subgraphs,
HValue* receiver,
HBasicBlock* BuildTypeSwitch(HValue* receiver,
ZoneMapList* maps,
ZoneList<HSubgraph*>* body_graphs,
HSubgraph* default_graph,
int join_id);
TypeFeedbackOracle* oracle_;

17
deps/v8/src/ia32/assembler-ia32.cc

@ -2559,6 +2559,19 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
}
void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x66);
EMIT(0x0F);
EMIT(0x3A);
EMIT(0x22);
emit_sse_operand(dst, src);
EMIT(offset);
}
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@ -2594,8 +2607,8 @@ void Assembler::RecordDebugBreakSlot() {
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
void Assembler::RecordComment(const char* msg, bool force) {
if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}

53
deps/v8/src/ia32/assembler-ia32.h

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight IA32 Assembler.
@ -64,30 +64,14 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
static const int kNumAllocatableRegisters = 5;
static const int kNumAllocatableRegisters = 6;
static const int kNumRegisters = 8;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < 4 || reg.code() == 7);
return (reg.code() == 7) ? 4 : reg.code();
}
static inline const char* AllocationIndexToString(int index);
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index == 4) ? from_code(7) : from_code(index);
}
static inline int ToAllocationIndex(Register reg);
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"eax",
"ecx",
"edx",
"ebx",
"edi"
};
return names[index];
}
static inline Register FromAllocationIndex(int index);
static Register from_code(int code) {
Register r = { code };
@ -110,6 +94,7 @@ struct Register {
int code_;
};
const Register eax = { 0 };
const Register ecx = { 1 };
const Register edx = { 2 };
@ -121,6 +106,26 @@ const Register edi = { 7 };
const Register no_reg = { -1 };
inline const char* Register::AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
}
inline int Register::ToAllocationIndex(Register reg) {
ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
}
inline Register Register::FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
struct XMMRegister {
static const int kNumAllocatableRegisters = 7;
static const int kNumRegisters = 8;
@ -928,6 +933,7 @@ class Assembler : public Malloced {
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
void movntdqa(XMMRegister src, const Operand& dst);
@ -951,8 +957,9 @@ class Assembler : public Malloced {
void RecordDebugBreakSlot();
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
// Use --code-comments to enable, or provide "force = true" flag to always
// write a comment.
void RecordComment(const char* msg, bool force = false);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.

15
deps/v8/src/ia32/builtins-ia32.cc

@ -589,6 +589,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Change context eagerly in case we need the global receiver.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Do not transform the receiver for strict mode functions.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &shift_arguments);
// Compute the receiver in non-strict mode.
__ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &convert_to_object);
@ -736,6 +743,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Compute the receiver.
Label call_to_object, use_global_receiver, push_receiver;
__ mov(ebx, Operand(ebp, 3 * kPointerSize));
// Do not transform the receiver for strict mode functions.
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
1 << SharedFunctionInfo::kStrictModeBitWithinByte);
__ j(not_equal, &push_receiver);
// Compute the receiver in non-strict mode.
__ test(ebx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
__ cmp(ebx, Factory::null_value());

223
deps/v8/src/ia32/code-stubs-ia32.cc

@ -3887,7 +3887,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(&Counters::regexp_entry_native, 1);
static const int kRegExpExecuteArguments = 7;
__ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
__ EnterApiExitFrame(kRegExpExecuteArguments);
// Argument 7: Indicate that this is a direct call from JavaScript.
__ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
@ -3932,7 +3932,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
__ CallCFunction(edx, kRegExpExecuteArguments);
__ call(Operand(edx));
// Drop arguments and come back to JS mode.
__ LeaveApiExitFrame();
// Check the result.
Label success;
@ -3949,12 +3952,30 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax,
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
__ cmp(eax, Operand::StaticVariable(pending_exception));
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, Operand(eax));
__ j(equal, &runtime);
// For exception, throw the exception again.
// Clear the pending exception variable.
__ mov(Operand::StaticVariable(pending_exception), edx);
// Special handling of termination exceptions which are uncatchable
// by javascript code.
__ cmp(eax, Factory::termination_exception());
Label throw_termination_exception;
__ j(equal, &throw_termination_exception);
// Handle normal exception by following handler chain.
__ Throw(eax);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(TERMINATION, eax);
__ bind(&failure);
// For failure and exception return null.
// For failure to match, return null.
__ mov(Operand(eax), Factory::null_value());
__ ret(4 * kPointerSize);
@ -4628,34 +4649,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception.
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop the sp to the top of the handler.
ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address));
// Restore next handler and frame pointer, discard handler state.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // Remove state.
// Before returning we restore the context from the frame pointer if
// not NULL. The frame pointer is NULL in the exception handler of
// a JS entry frame.
__ Set(esi, Immediate(0)); // Tentatively set context pointer to NULL.
NearLabel skip;
__ cmp(ebp, 0);
__ j(equal, &skip, not_taken);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
__ Throw(eax);
}
@ -4723,6 +4717,23 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ test(ecx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned, not_taken);
ExternalReference pending_exception_address(Top::k_pending_exception_address);
// Check that there is no pending exception, otherwise we
// should have returned some failure value.
if (FLAG_debug_code) {
__ push(edx);
__ mov(edx, Operand::StaticVariable(
ExternalReference::the_hole_value_location()));
NearLabel okay;
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay);
__ int3();
__ bind(&okay);
__ pop(edx);
}
// Exit the JavaScript to C++ exit frame.
__ LeaveExitFrame(save_doubles_);
__ ret(0);
@ -4741,7 +4752,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(equal, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
ExternalReference pending_exception_address(Top::k_pending_exception_address);
__ mov(eax, Operand::StaticVariable(pending_exception_address));
__ mov(edx,
Operand::StaticVariable(ExternalReference::the_hole_value_location()));
@ -4762,52 +4772,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
// Drop sp to the top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ mov(esp, Operand::StaticVariable(handler_address));
// Unwind the handlers until the ENTRY handler is found.
NearLabel loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
const int kStateOffset = StackHandlerConstants::kStateOffset;
__ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
const int kNextOffset = StackHandlerConstants::kNextOffset;
__ mov(esp, Operand(esp, kNextOffset));
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
__ pop(Operand::StaticVariable(handler_address));
if (type == OUT_OF_MEMORY) {
// Set external caught exception to false.
ExternalReference external_caught(Top::k_external_caught_exception_address);
__ mov(eax, false);
__ mov(Operand::StaticVariable(external_caught), eax);
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Top::k_pending_exception_address);
__ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
__ mov(Operand::StaticVariable(pending_exception), eax);
}
// Clear the context pointer.
__ Set(esi, Immediate(0));
// Restore fp from handler and discard handler state.
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
__ pop(ebp);
__ pop(edx); // State.
STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
__ ret(0);
__ ThrowUncatchable(type, eax);
}
@ -6543,9 +6508,19 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
// Verify that the receiver has pixel array elements.
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ Assert(equal, "Elements isn't a pixel array");
}
}
// Key must be in range.
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
@ -6559,6 +6534,90 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
}
// Stores an indexed element into a pixel array, clamping the stored value.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register scratch1,
bool load_elements_from_receiver,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range) {
// Register use:
// receiver - holds the receiver and is unchanged unless the
// store succeeds.
// key - holds the key (must be a smi) and is unchanged.
// value - holds the value (must be a smi) and is unchanged.
// elements - holds the element object of the receiver on entry if
// load_elements_from_receiver is false, otherwise used
// internally to store the pixel arrays elements and
// external array pointer.
//
// receiver, key and value remain unmodified until it's guaranteed that the
// store will succeed.
Register external_pointer = elements;
Register untagged_key = scratch1;
Register untagged_value = receiver; // Only set once success guaranteed.
// Fetch the receiver's elements if the caller hasn't already done so.
if (load_elements_from_receiver) {
__ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
}
// By passing NULL as not_pixel_array, callers signal that they have already
// verified that the receiver has pixel array elements.
if (not_pixel_array != NULL) {
__ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
} else {
if (FLAG_debug_code) {
// Map check should have already made sure that elements is a pixel array.
__ cmp(FieldOperand(elements, HeapObject::kMapOffset),
Immediate(Factory::pixel_array_map()));
__ Assert(equal, "Elements isn't a pixel array");
}
}
// Some callers already have verified that the key is a smi. key_not_smi is
// set to NULL as a sentinel for that case. Otherwise, add an explicit check
// to ensure the key is a smi must be added.
if (key_not_smi != NULL) {
__ JumpIfNotSmi(key, key_not_smi);
} else {
if (FLAG_debug_code) {
__ AbortIfNotSmi(key);
}
}
// Key must be a smi and it must be in range.
__ mov(untagged_key, key);
__ SmiUntag(untagged_key);
__ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
__ j(above_equal, out_of_range); // unsigned check handles negative keys.
// Value must be a smi.
__ JumpIfNotSmi(value, value_not_smi);
__ mov(untagged_value, value);
__ SmiUntag(untagged_value);
{ // Clamp the value to [0..255].
NearLabel done;
__ test(untagged_value, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, untagged_value); // 1 if negative, 0 if positive.
__ dec_b(untagged_value); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(external_pointer,
FieldOperand(elements, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
__ ret(0); // Return value in eax.
}
#undef __
} } // namespace v8::internal

38
deps/v8/src/ia32/code-stubs-ia32.h

@ -490,14 +490,14 @@ class NumberToStringStub: public CodeStub {
};
// Generate code the to load an element from a pixel array. The receiver is
// assumed to not be a smi and to have elements, the caller must guarantee this
// precondition. If the receiver does not have elements that are pixel arrays,
// the generated code jumps to not_pixel_array. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated . If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
// Generate code to load an element from a pixel array. The receiver is assumed
// to not be a smi and to have elements, the caller must guarantee this
// precondition. If key is not a smi, then the generated code branches to
// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
// check has already been performed on key so that the smi check is not
// generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
@ -508,6 +508,28 @@ void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Label* key_not_smi,
Label* out_of_range);
// Generate code to store an element into a pixel array, clamping values between
// [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated. If the value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
// elements, the generated code will branch to not_pixel_array, unless
// not_pixel_array is NULL, in which case the caller must ensure that the
// receiver has pixel array elements. If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register scratch1,
bool load_elements_from_receiver,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range);
} } // namespace v8::internal

27
deps/v8/src/ia32/codegen-ia32.cc

@ -3771,14 +3771,15 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
DeleteFrame();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
@ -5587,7 +5588,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Load(property->value());
if (property->emit_store()) {
Result ignored =
frame_->CallStoreIC(Handle<String>::cast(key), false);
frame_->CallStoreIC(Handle<String>::cast(key), false,
strict_mode_flag());
// A test eax instruction following the store IC call would
// indicate the presence of an inlined version of the
// store. Add a nop to indicate that there is no such
@ -8223,19 +8225,24 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
if (property != NULL) {
Load(property->obj());
Load(property->key());
Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
frame_->Push(Smi::FromInt(strict_mode_flag()));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
}
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
frame_->Push(variable->name());
frame_->Push(Smi::FromInt(kNonStrictMode));
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 2);
CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
@ -9670,7 +9677,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
Result result;
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
result = frame()->CallStoreIC(name, is_contextual);
result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
@ -9754,7 +9761,7 @@ Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
slow.Bind(&value, &receiver);
frame()->Push(&receiver);
frame()->Push(&value);
result = frame()->CallStoreIC(name, is_contextual);
result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
// Encode the offset to the map check instruction and the offset
// to the write barrier store address computation in a test eax
// instruction.

58
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -80,6 +80,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
Address prev_address = code_start_address;
for (unsigned i = 0; i < table.length(); ++i) {
Address curr_address = code_start_address + table.GetPcOffset(i);
ASSERT_GE(curr_address, prev_address);
ZapCodeRange(prev_address, curr_address);
SafepointEntry safepoint_entry = table.GetEntry(i);
@ -97,7 +98,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
RelocInfo::RUNTIME_ENTRY,
reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
ASSERT_GE(reloc_info_writer.pos(),
reloc_info->address() + ByteArray::kHeaderSize);
curr_address += patch_size();
}
prev_address = curr_address;
@ -137,39 +139,39 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kPointerSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
Address call_target_address = pc_after - kIntSize;
ASSERT(check_code->entry() ==
Assembler::target_address_at(call_target_address));
// The stack check code matches the pattern:
//
// cmp esp, <limit>
// jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x90; // nop
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
}
void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
Code* check_code,
Code* replacement_code) {
Address call_target_address = pc_after - kPointerSize;
Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to

12
deps/v8/src/ia32/disasm-ia32.cc

@ -1115,10 +1115,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pextrd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfCPURegister(regop),
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
AppendToBuffer("pinsrd %s,%s,%d",
NameOfXMMRegister(regop),
NameOfCPURegister(rm),
static_cast<int>(imm8));
data += 2;
} else {
UnimplementedInstruction();
}

133
deps/v8/src/ia32/full-codegen-ia32.cc

@ -47,8 +47,7 @@ namespace internal {
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm)
: masm_(masm) {
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
#ifdef DEBUG
info_emitted_ = false;
#endif
@ -60,7 +59,7 @@ class JumpPatchSite BASE_EMBEDDED {
void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
__ test(reg, Immediate(kSmiTagMask));
EmitJump(not_carry, target); // Always taken before patched.
EmitJump(not_carry, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, NearLabel* target) {
@ -310,12 +309,14 @@ void FullCodeGenerator::EmitReturnSequence() {
// patch with the code required by the debugger.
__ mov(esp, ebp);
__ pop(ebp);
__ ret((scope()->num_parameters() + 1) * kPointerSize);
int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
__ Ret(arguments_bytes, ecx);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Assembler::kJSReturnSequenceLength,
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
// Check that the size of the code used for returning is large enough
// for the debugger's requirements.
ASSERT(Assembler::kJSReturnSequenceLength <=
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
#endif
}
}
@ -330,6 +331,7 @@ FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
} else if (right->IsSmiLiteral()) {
return kRightConstant;
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
// Don't inline shifts with constant left hand side.
return kLeftConstant;
} else {
return kNoConstants;
@ -614,6 +616,7 @@ void FullCodeGenerator::Move(Slot* dst,
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
int offset = Context::SlotOffset(dst->index());
ASSERT(!scratch1.is(esi) && !src.is(esi) && !scratch2.is(esi));
__ RecordWrite(scratch1, offset, src, scratch2);
}
}
@ -717,18 +720,25 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
} else if (prop != NULL) {
if (function != NULL || mode == Variable::CONST) {
// We are declaring a function or constant that rewrites to a
// property. Use (keyed) IC to set the initial value.
VisitForStackValue(prop->obj());
// property. Use (keyed) IC to set the initial value. We cannot
// visit the rewrite because it's shared and we risk recording
// duplicate AST IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
if (function != NULL) {
VisitForStackValue(prop->key());
__ push(eax);
VisitForAccumulatorValue(function);
__ pop(ecx);
__ pop(edx);
} else {
VisitForAccumulatorValue(prop->key());
__ mov(ecx, result_register());
__ mov(result_register(), Factory::the_hole_value());
__ mov(edx, eax);
__ mov(eax, Factory::the_hole_value());
}
__ pop(edx);
ASSERT(prop->key()->AsLiteral() != NULL &&
prop->key()->AsLiteral()->handle()->IsSmi());
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -1635,6 +1645,9 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically add smi value with unknown object. If result overflows or is
// not a smi then we had either a smi overflow or added a smi with a tagged
// pointer.
__ add(Operand(eax), Immediate(value));
__ j(overflow, &call_stub);
JumpPatchSite patch_site(masm_);
@ -1643,8 +1656,7 @@ void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
// Undo the optimistic add operation and call the shared stub.
__ bind(&call_stub);
__ sub(Operand(eax), Immediate(value));
Token::Value op = Token::ADD;
TypeRecordingBinaryOpStub stub(op, mode);
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
if (left_is_constant_smi) {
__ mov(edx, Immediate(value));
} else {
@ -1663,6 +1675,9 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
bool left_is_constant_smi,
Smi* value) {
NearLabel call_stub, done;
// Optimistically subtract smi value with unknown object. If result overflows
// or is not a smi then we had either a smi overflow or added a smi with a
// tagged pointer.
if (left_is_constant_smi) {
__ mov(ecx, eax);
__ mov(eax, Immediate(value));
@ -1683,8 +1698,7 @@ void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
__ mov(edx, eax);
__ mov(eax, Immediate(value));
}
Token::Value op = Token::SUB;
TypeRecordingBinaryOpStub stub(op, mode);
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);
@ -1720,7 +1734,7 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
__ shl(edx, shift_value - 1);
}
// Convert int result to smi, checking that it is in int range.
ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
__ add(edx, Operand(edx));
__ j(overflow, &call_stub);
__ mov(eax, edx); // Put result back into eax.
@ -1733,6 +1747,8 @@ void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
}
break;
case Token::SHR:
// SHR must return a positive value. When shifting by 0 or 1 we need to
// check that smi tagging the result will not create a negative value.
if (shift_value < 2) {
__ mov(edx, eax);
__ SmiUntag(edx);
@ -1975,10 +1991,20 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
}
case KEYED_PROPERTY: {
__ push(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
if (prop->is_synthetic()) {
ASSERT(prop->obj()->AsVariableProxy() != NULL);
ASSERT(prop->key()->AsLiteral() != NULL);
{ AccumulatorValueContext for_object(this);
EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
}
__ mov(edx, eax);
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(ecx, eax);
__ pop(edx);
}
__ pop(eax); // Restore value.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
@ -2004,8 +2030,10 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// ecx, and the global object on the stack.
__ mov(ecx, var->name());
__ mov(edx, GlobalObjectOperand());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET);
Handle<Code> ic(Builtins::builtin(
is_strict() ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@ -3700,37 +3728,47 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
Property* prop = expr->expression()->AsProperty();
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
if (prop == NULL && var == NULL) {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
} else if (var != NULL &&
!var->is_global() &&
var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Property or variable reference. Call the delete builtin with
// object and property name as arguments.
if (prop != NULL) {
if (prop != NULL) {
if (prop->is_synthetic()) {
// Result of deleting parameters is false, even when they rewrite
// to accesses on the arguments object.
context()->Plug(false);
} else {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
context()->Plug(eax);
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(kNonStrictMode)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
} else if (var->AsSlot() != NULL &&
var->AsSlot()->type() != Slot::LOOKUP) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(false);
} else {
// Non-global variable. Call the runtime to delete from the
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kDeleteContextSlot, 2);
context()->Plug(eax);
}
context()->Plug(eax);
} else {
// Result of deleting non-property, non-variable reference is true.
// The subexpression may have side effects.
VisitForEffect(expr->expression());
context()->Plug(true);
}
break;
}
@ -3949,8 +3987,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub for +1/-1.
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
TypeRecordingBinaryOpStub stub(expr->binary_op(),
NO_OVERWRITE);
TypeRecordingBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
EmitCallIC(stub.GetCode(), &patch_site);
__ bind(&done);

49
deps/v8/src/ia32/ic-ia32.cc

@ -108,6 +108,9 @@ static void GenerateStringDictionaryProbes(MacroAssembler* masm,
Register name,
Register r0,
Register r1) {
// Assert that name contains a string.
if (FLAG_debug_code) __ AbortIfNotString(name);
// Compute the capacity mask.
const int kCapacityOffset =
StringDictionary::kHeaderSize +
@ -806,28 +809,17 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &slow);
__ mov(ebx, ecx);
__ SmiUntag(ebx);
__ cmp(ebx, FieldOperand(edi, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ mov(ecx, eax); // Save the value. Key is not longer needed.
__ SmiUntag(ecx);
{ // Clamp the value to [0..255].
Label done;
__ test(ecx, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, ecx); // 1 if negative, 0 if positive.
__ dec_b(ecx); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov(edi, FieldOperand(edi, PixelArray::kExternalPointerOffset));
__ mov_b(Operand(edi, ebx, times_1, 0), ecx);
__ ret(0); // Return value in eax.
GenerateFastPixelArrayStore(masm,
edx,
ecx,
eax,
edi,
ebx,
false,
NULL,
&slow,
&slow,
&slow);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@ -1208,7 +1200,14 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
// Check if the name is a string.
Label miss;
__ test(ecx, Immediate(kSmiTagMask));
__ j(zero, &miss);
Condition cond = masm->IsObjectStringType(ecx, eax, eax);
__ j(NegateCondition(cond), &miss);
GenerateCallNormal(masm, argc);
__ bind(&miss);
GenerateMiss(masm, argc);
}
@ -1488,7 +1487,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::ExtraICState extra_ic_state) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@ -1498,7 +1498,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
NOT_IN_LOOP,
MONOMORPHIC);
MONOMORPHIC,
extra_ic_state);
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
// Cache miss: Jump to runtime.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save